Blackfin RTC driver: shave off another memcpy() by using assignment.
[linux-2.6/linux-loongson.git] / drivers / net / forcedeth.c
blob36342230a6deeb79ea89c5cb011c1724b1040bca
1 /*
2 * forcedeth: Ethernet driver for NVIDIA nForce media access controllers.
4 * Note: This driver is a cleanroom reimplementation based on reverse
5 * engineered documentation written by Carl-Daniel Hailfinger
6 * and Andrew de Quincey.
8 * NVIDIA, nForce and other NVIDIA marks are trademarks or registered
9 * trademarks of NVIDIA Corporation in the United States and other
10 * countries.
12 * Copyright (C) 2003,4,5 Manfred Spraul
13 * Copyright (C) 2004 Andrew de Quincey (wol support)
14 * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane
15 * IRQ rate fixes, bigendian fixes, cleanups, verification)
16 * Copyright (c) 2004,2005,2006,2007,2008 NVIDIA Corporation
18 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License as published by
20 * the Free Software Foundation; either version 2 of the License, or
21 * (at your option) any later version.
23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * GNU General Public License for more details.
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, write to the Free Software
30 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
32 * Changelog:
33 * 0.01: 05 Oct 2003: First release that compiles without warnings.
34 * 0.02: 05 Oct 2003: Fix bug for nv_drain_tx: do not try to free NULL skbs.
35 * Check all PCI BARs for the register window.
36 * udelay added to mii_rw.
37 * 0.03: 06 Oct 2003: Initialize dev->irq.
38 * 0.04: 07 Oct 2003: Initialize np->lock, reduce handled irqs, add printks.
39 * 0.05: 09 Oct 2003: printk removed again, irq status print tx_timeout.
40 * 0.06: 10 Oct 2003: MAC Address read updated, pff flag generation updated,
41 * irq mask updated
42 * 0.07: 14 Oct 2003: Further irq mask updates.
43 * 0.08: 20 Oct 2003: rx_desc.Length initialization added, nv_alloc_rx refill
44 * added into irq handler, NULL check for drain_ring.
45 * 0.09: 20 Oct 2003: Basic link speed irq implementation. Only handle the
46 * requested interrupt sources.
47 * 0.10: 20 Oct 2003: First cleanup for release.
48 * 0.11: 21 Oct 2003: hexdump for tx added, rx buffer sizes increased.
49 * MAC Address init fix, set_multicast cleanup.
50 * 0.12: 23 Oct 2003: Cleanups for release.
51 * 0.13: 25 Oct 2003: Limit for concurrent tx packets increased to 10.
52 * Set link speed correctly. start rx before starting
53 * tx (nv_start_rx sets the link speed).
54 * 0.14: 25 Oct 2003: Nic dependant irq mask.
55 * 0.15: 08 Nov 2003: fix smp deadlock with set_multicast_list during
56 * open.
57 * 0.16: 15 Nov 2003: include file cleanup for ppc64, rx buffer size
58 * increased to 1628 bytes.
59 * 0.17: 16 Nov 2003: undo rx buffer size increase. Substract 1 from
60 * the tx length.
61 * 0.18: 17 Nov 2003: fix oops due to late initialization of dev_stats
62 * 0.19: 29 Nov 2003: Handle RxNoBuf, detect & handle invalid mac
63 * addresses, really stop rx if already running
64 * in nv_start_rx, clean up a bit.
65 * 0.20: 07 Dec 2003: alloc fixes
66 * 0.21: 12 Jan 2004: additional alloc fix, nic polling fix.
67 * 0.22: 19 Jan 2004: reprogram timer to a sane rate, avoid lockup
68 * on close.
69 * 0.23: 26 Jan 2004: various small cleanups
70 * 0.24: 27 Feb 2004: make driver even less anonymous in backtraces
71 * 0.25: 09 Mar 2004: wol support
72 * 0.26: 03 Jun 2004: netdriver specific annotation, sparse-related fixes
73 * 0.27: 19 Jun 2004: Gigabit support, new descriptor rings,
74 * added CK804/MCP04 device IDs, code fixes
75 * for registers, link status and other minor fixes.
76 * 0.28: 21 Jun 2004: Big cleanup, making driver mostly endian safe
77 * 0.29: 31 Aug 2004: Add backup timer for link change notification.
78 * 0.30: 25 Sep 2004: rx checksum support for nf 250 Gb. Add rx reset
79 * into nv_close, otherwise reenabling for wol can
80 * cause DMA to kfree'd memory.
81 * 0.31: 14 Nov 2004: ethtool support for getting/setting link
82 * capabilities.
83 * 0.32: 16 Apr 2005: RX_ERROR4 handling added.
84 * 0.33: 16 May 2005: Support for MCP51 added.
85 * 0.34: 18 Jun 2005: Add DEV_NEED_LINKTIMER to all nForce nics.
86 * 0.35: 26 Jun 2005: Support for MCP55 added.
87 * 0.36: 28 Jun 2005: Add jumbo frame support.
88 * 0.37: 10 Jul 2005: Additional ethtool support, cleanup of pci id list
89 * 0.38: 16 Jul 2005: tx irq rewrite: Use global flags instead of
90 * per-packet flags.
91 * 0.39: 18 Jul 2005: Add 64bit descriptor support.
92 * 0.40: 19 Jul 2005: Add support for mac address change.
93 * 0.41: 30 Jul 2005: Write back original MAC in nv_close instead
94 * of nv_remove
95 * 0.42: 06 Aug 2005: Fix lack of link speed initialization
96 * in the second (and later) nv_open call
97 * 0.43: 10 Aug 2005: Add support for tx checksum.
98 * 0.44: 20 Aug 2005: Add support for scatter gather and segmentation.
99 * 0.45: 18 Sep 2005: Remove nv_stop/start_rx from every link check
100 * 0.46: 20 Oct 2005: Add irq optimization modes.
101 * 0.47: 26 Oct 2005: Add phyaddr 0 in phy scan.
102 * 0.48: 24 Dec 2005: Disable TSO, bugfix for pci_map_single
103 * 0.49: 10 Dec 2005: Fix tso for large buffers.
104 * 0.50: 20 Jan 2006: Add 8021pq tagging support.
105 * 0.51: 20 Jan 2006: Add 64bit consistent memory allocation for rings.
106 * 0.52: 20 Jan 2006: Add MSI/MSIX support.
107 * 0.53: 19 Mar 2006: Fix init from low power mode and add hw reset.
108 * 0.54: 21 Mar 2006: Fix spin locks for multi irqs and cleanup.
109 * 0.55: 22 Mar 2006: Add flow control (pause frame).
110 * 0.56: 22 Mar 2006: Additional ethtool config and moduleparam support.
111 * 0.57: 14 May 2006: Mac address set in probe/remove and order corrections.
112 * 0.58: 30 Oct 2006: Added support for sideband management unit.
113 * 0.59: 30 Oct 2006: Added support for recoverable error.
114 * 0.60: 20 Jan 2007: Code optimizations for rings, rx & tx data paths, and stats.
116 * Known bugs:
117 * We suspect that on some hardware no TX done interrupts are generated.
118 * This means recovery from netif_stop_queue only happens if the hw timer
119 * interrupt fires (100 times/second, configurable with NVREG_POLL_DEFAULT)
120 * and the timer is active in the IRQMask, or if a rx packet arrives by chance.
121 * If your hardware reliably generates tx done interrupts, then you can remove
122 * DEV_NEED_TIMERIRQ from the driver_data flags.
123 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
124 * superfluous timer interrupts from the nic.
126 #ifdef CONFIG_FORCEDETH_NAPI
127 #define DRIVERNAPI "-NAPI"
128 #else
129 #define DRIVERNAPI
130 #endif
131 #define FORCEDETH_VERSION "0.61"
132 #define DRV_NAME "forcedeth"
134 #include <linux/module.h>
135 #include <linux/types.h>
136 #include <linux/pci.h>
137 #include <linux/interrupt.h>
138 #include <linux/netdevice.h>
139 #include <linux/etherdevice.h>
140 #include <linux/delay.h>
141 #include <linux/spinlock.h>
142 #include <linux/ethtool.h>
143 #include <linux/timer.h>
144 #include <linux/skbuff.h>
145 #include <linux/mii.h>
146 #include <linux/random.h>
147 #include <linux/init.h>
148 #include <linux/if_vlan.h>
149 #include <linux/dma-mapping.h>
151 #include <asm/irq.h>
152 #include <asm/io.h>
153 #include <asm/uaccess.h>
154 #include <asm/system.h>
156 #if 0
157 #define dprintk printk
158 #else
159 #define dprintk(x...) do { } while (0)
160 #endif
162 #define TX_WORK_PER_LOOP 64
163 #define RX_WORK_PER_LOOP 64
166 * Hardware access:
169 #define DEV_NEED_TIMERIRQ 0x0001 /* set the timer irq flag in the irq mask */
170 #define DEV_NEED_LINKTIMER 0x0002 /* poll link settings. Relies on the timer irq */
171 #define DEV_HAS_LARGEDESC 0x0004 /* device supports jumbo frames and needs packet format 2 */
172 #define DEV_HAS_HIGH_DMA 0x0008 /* device supports 64bit dma */
173 #define DEV_HAS_CHECKSUM 0x0010 /* device supports tx and rx checksum offloads */
174 #define DEV_HAS_VLAN 0x0020 /* device supports vlan tagging and striping */
175 #define DEV_HAS_MSI 0x0040 /* device supports MSI */
176 #define DEV_HAS_MSI_X 0x0080 /* device supports MSI-X */
177 #define DEV_HAS_POWER_CNTRL 0x0100 /* device supports power savings */
178 #define DEV_HAS_PAUSEFRAME_TX 0x0200 /* device supports tx pause frames */
179 #define DEV_HAS_STATISTICS_V1 0x0400 /* device supports hw statistics version 1 */
180 #define DEV_HAS_STATISTICS_V2 0x0800 /* device supports hw statistics version 2 */
181 #define DEV_HAS_TEST_EXTENDED 0x1000 /* device supports extended diagnostic test */
182 #define DEV_HAS_MGMT_UNIT 0x2000 /* device supports management unit */
183 #define DEV_HAS_CORRECT_MACADDR 0x4000 /* device supports correct mac address order */
185 enum {
186 NvRegIrqStatus = 0x000,
187 #define NVREG_IRQSTAT_MIIEVENT 0x040
188 #define NVREG_IRQSTAT_MASK 0x81ff
189 NvRegIrqMask = 0x004,
190 #define NVREG_IRQ_RX_ERROR 0x0001
191 #define NVREG_IRQ_RX 0x0002
192 #define NVREG_IRQ_RX_NOBUF 0x0004
193 #define NVREG_IRQ_TX_ERR 0x0008
194 #define NVREG_IRQ_TX_OK 0x0010
195 #define NVREG_IRQ_TIMER 0x0020
196 #define NVREG_IRQ_LINK 0x0040
197 #define NVREG_IRQ_RX_FORCED 0x0080
198 #define NVREG_IRQ_TX_FORCED 0x0100
199 #define NVREG_IRQ_RECOVER_ERROR 0x8000
200 #define NVREG_IRQMASK_THROUGHPUT 0x00df
201 #define NVREG_IRQMASK_CPU 0x0060
202 #define NVREG_IRQ_TX_ALL (NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED)
203 #define NVREG_IRQ_RX_ALL (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED)
204 #define NVREG_IRQ_OTHER (NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RECOVER_ERROR)
206 #define NVREG_IRQ_UNKNOWN (~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR| \
207 NVREG_IRQ_TX_OK|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RX_FORCED| \
208 NVREG_IRQ_TX_FORCED|NVREG_IRQ_RECOVER_ERROR))
210 NvRegUnknownSetupReg6 = 0x008,
211 #define NVREG_UNKSETUP6_VAL 3
214 * NVREG_POLL_DEFAULT is the interval length of the timer source on the nic
215 * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms
217 NvRegPollingInterval = 0x00c,
218 #define NVREG_POLL_DEFAULT_THROUGHPUT 970 /* backup tx cleanup if loop max reached */
219 #define NVREG_POLL_DEFAULT_CPU 13
220 NvRegMSIMap0 = 0x020,
221 NvRegMSIMap1 = 0x024,
222 NvRegMSIIrqMask = 0x030,
223 #define NVREG_MSI_VECTOR_0_ENABLED 0x01
224 NvRegMisc1 = 0x080,
225 #define NVREG_MISC1_PAUSE_TX 0x01
226 #define NVREG_MISC1_HD 0x02
227 #define NVREG_MISC1_FORCE 0x3b0f3c
229 NvRegMacReset = 0x34,
230 #define NVREG_MAC_RESET_ASSERT 0x0F3
231 NvRegTransmitterControl = 0x084,
232 #define NVREG_XMITCTL_START 0x01
233 #define NVREG_XMITCTL_MGMT_ST 0x40000000
234 #define NVREG_XMITCTL_SYNC_MASK 0x000f0000
235 #define NVREG_XMITCTL_SYNC_NOT_READY 0x0
236 #define NVREG_XMITCTL_SYNC_PHY_INIT 0x00040000
237 #define NVREG_XMITCTL_MGMT_SEMA_MASK 0x00000f00
238 #define NVREG_XMITCTL_MGMT_SEMA_FREE 0x0
239 #define NVREG_XMITCTL_HOST_SEMA_MASK 0x0000f000
240 #define NVREG_XMITCTL_HOST_SEMA_ACQ 0x0000f000
241 #define NVREG_XMITCTL_HOST_LOADED 0x00004000
242 #define NVREG_XMITCTL_TX_PATH_EN 0x01000000
243 NvRegTransmitterStatus = 0x088,
244 #define NVREG_XMITSTAT_BUSY 0x01
246 NvRegPacketFilterFlags = 0x8c,
247 #define NVREG_PFF_PAUSE_RX 0x08
248 #define NVREG_PFF_ALWAYS 0x7F0000
249 #define NVREG_PFF_PROMISC 0x80
250 #define NVREG_PFF_MYADDR 0x20
251 #define NVREG_PFF_LOOPBACK 0x10
253 NvRegOffloadConfig = 0x90,
254 #define NVREG_OFFLOAD_HOMEPHY 0x601
255 #define NVREG_OFFLOAD_NORMAL RX_NIC_BUFSIZE
256 NvRegReceiverControl = 0x094,
257 #define NVREG_RCVCTL_START 0x01
258 #define NVREG_RCVCTL_RX_PATH_EN 0x01000000
259 NvRegReceiverStatus = 0x98,
260 #define NVREG_RCVSTAT_BUSY 0x01
262 NvRegRandomSeed = 0x9c,
263 #define NVREG_RNDSEED_MASK 0x00ff
264 #define NVREG_RNDSEED_FORCE 0x7f00
265 #define NVREG_RNDSEED_FORCE2 0x2d00
266 #define NVREG_RNDSEED_FORCE3 0x7400
268 NvRegTxDeferral = 0xA0,
269 #define NVREG_TX_DEFERRAL_DEFAULT 0x15050f
270 #define NVREG_TX_DEFERRAL_RGMII_10_100 0x16070f
271 #define NVREG_TX_DEFERRAL_RGMII_1000 0x14050f
272 NvRegRxDeferral = 0xA4,
273 #define NVREG_RX_DEFERRAL_DEFAULT 0x16
274 NvRegMacAddrA = 0xA8,
275 NvRegMacAddrB = 0xAC,
276 NvRegMulticastAddrA = 0xB0,
277 #define NVREG_MCASTADDRA_FORCE 0x01
278 NvRegMulticastAddrB = 0xB4,
279 NvRegMulticastMaskA = 0xB8,
280 #define NVREG_MCASTMASKA_NONE 0xffffffff
281 NvRegMulticastMaskB = 0xBC,
282 #define NVREG_MCASTMASKB_NONE 0xffff
284 NvRegPhyInterface = 0xC0,
285 #define PHY_RGMII 0x10000000
287 NvRegTxRingPhysAddr = 0x100,
288 NvRegRxRingPhysAddr = 0x104,
289 NvRegRingSizes = 0x108,
290 #define NVREG_RINGSZ_TXSHIFT 0
291 #define NVREG_RINGSZ_RXSHIFT 16
292 NvRegTransmitPoll = 0x10c,
293 #define NVREG_TRANSMITPOLL_MAC_ADDR_REV 0x00008000
294 NvRegLinkSpeed = 0x110,
295 #define NVREG_LINKSPEED_FORCE 0x10000
296 #define NVREG_LINKSPEED_10 1000
297 #define NVREG_LINKSPEED_100 100
298 #define NVREG_LINKSPEED_1000 50
299 #define NVREG_LINKSPEED_MASK (0xFFF)
300 NvRegUnknownSetupReg5 = 0x130,
301 #define NVREG_UNKSETUP5_BIT31 (1<<31)
302 NvRegTxWatermark = 0x13c,
303 #define NVREG_TX_WM_DESC1_DEFAULT 0x0200010
304 #define NVREG_TX_WM_DESC2_3_DEFAULT 0x1e08000
305 #define NVREG_TX_WM_DESC2_3_1000 0xfe08000
306 NvRegTxRxControl = 0x144,
307 #define NVREG_TXRXCTL_KICK 0x0001
308 #define NVREG_TXRXCTL_BIT1 0x0002
309 #define NVREG_TXRXCTL_BIT2 0x0004
310 #define NVREG_TXRXCTL_IDLE 0x0008
311 #define NVREG_TXRXCTL_RESET 0x0010
312 #define NVREG_TXRXCTL_RXCHECK 0x0400
313 #define NVREG_TXRXCTL_DESC_1 0
314 #define NVREG_TXRXCTL_DESC_2 0x002100
315 #define NVREG_TXRXCTL_DESC_3 0xc02200
316 #define NVREG_TXRXCTL_VLANSTRIP 0x00040
317 #define NVREG_TXRXCTL_VLANINS 0x00080
318 NvRegTxRingPhysAddrHigh = 0x148,
319 NvRegRxRingPhysAddrHigh = 0x14C,
320 NvRegTxPauseFrame = 0x170,
321 #define NVREG_TX_PAUSEFRAME_DISABLE 0x01ff0080
322 #define NVREG_TX_PAUSEFRAME_ENABLE 0x01800010
323 NvRegMIIStatus = 0x180,
324 #define NVREG_MIISTAT_ERROR 0x0001
325 #define NVREG_MIISTAT_LINKCHANGE 0x0008
326 #define NVREG_MIISTAT_MASK 0x000f
327 #define NVREG_MIISTAT_MASK2 0x000f
328 NvRegMIIMask = 0x184,
329 #define NVREG_MII_LINKCHANGE 0x0008
331 NvRegAdapterControl = 0x188,
332 #define NVREG_ADAPTCTL_START 0x02
333 #define NVREG_ADAPTCTL_LINKUP 0x04
334 #define NVREG_ADAPTCTL_PHYVALID 0x40000
335 #define NVREG_ADAPTCTL_RUNNING 0x100000
336 #define NVREG_ADAPTCTL_PHYSHIFT 24
337 NvRegMIISpeed = 0x18c,
338 #define NVREG_MIISPEED_BIT8 (1<<8)
339 #define NVREG_MIIDELAY 5
340 NvRegMIIControl = 0x190,
341 #define NVREG_MIICTL_INUSE 0x08000
342 #define NVREG_MIICTL_WRITE 0x00400
343 #define NVREG_MIICTL_ADDRSHIFT 5
344 NvRegMIIData = 0x194,
345 NvRegWakeUpFlags = 0x200,
346 #define NVREG_WAKEUPFLAGS_VAL 0x7770
347 #define NVREG_WAKEUPFLAGS_BUSYSHIFT 24
348 #define NVREG_WAKEUPFLAGS_ENABLESHIFT 16
349 #define NVREG_WAKEUPFLAGS_D3SHIFT 12
350 #define NVREG_WAKEUPFLAGS_D2SHIFT 8
351 #define NVREG_WAKEUPFLAGS_D1SHIFT 4
352 #define NVREG_WAKEUPFLAGS_D0SHIFT 0
353 #define NVREG_WAKEUPFLAGS_ACCEPT_MAGPAT 0x01
354 #define NVREG_WAKEUPFLAGS_ACCEPT_WAKEUPPAT 0x02
355 #define NVREG_WAKEUPFLAGS_ACCEPT_LINKCHANGE 0x04
356 #define NVREG_WAKEUPFLAGS_ENABLE 0x1111
358 NvRegPatternCRC = 0x204,
359 NvRegPatternMask = 0x208,
360 NvRegPowerCap = 0x268,
361 #define NVREG_POWERCAP_D3SUPP (1<<30)
362 #define NVREG_POWERCAP_D2SUPP (1<<26)
363 #define NVREG_POWERCAP_D1SUPP (1<<25)
364 NvRegPowerState = 0x26c,
365 #define NVREG_POWERSTATE_POWEREDUP 0x8000
366 #define NVREG_POWERSTATE_VALID 0x0100
367 #define NVREG_POWERSTATE_MASK 0x0003
368 #define NVREG_POWERSTATE_D0 0x0000
369 #define NVREG_POWERSTATE_D1 0x0001
370 #define NVREG_POWERSTATE_D2 0x0002
371 #define NVREG_POWERSTATE_D3 0x0003
372 NvRegTxCnt = 0x280,
373 NvRegTxZeroReXmt = 0x284,
374 NvRegTxOneReXmt = 0x288,
375 NvRegTxManyReXmt = 0x28c,
376 NvRegTxLateCol = 0x290,
377 NvRegTxUnderflow = 0x294,
378 NvRegTxLossCarrier = 0x298,
379 NvRegTxExcessDef = 0x29c,
380 NvRegTxRetryErr = 0x2a0,
381 NvRegRxFrameErr = 0x2a4,
382 NvRegRxExtraByte = 0x2a8,
383 NvRegRxLateCol = 0x2ac,
384 NvRegRxRunt = 0x2b0,
385 NvRegRxFrameTooLong = 0x2b4,
386 NvRegRxOverflow = 0x2b8,
387 NvRegRxFCSErr = 0x2bc,
388 NvRegRxFrameAlignErr = 0x2c0,
389 NvRegRxLenErr = 0x2c4,
390 NvRegRxUnicast = 0x2c8,
391 NvRegRxMulticast = 0x2cc,
392 NvRegRxBroadcast = 0x2d0,
393 NvRegTxDef = 0x2d4,
394 NvRegTxFrame = 0x2d8,
395 NvRegRxCnt = 0x2dc,
396 NvRegTxPause = 0x2e0,
397 NvRegRxPause = 0x2e4,
398 NvRegRxDropFrame = 0x2e8,
399 NvRegVlanControl = 0x300,
400 #define NVREG_VLANCONTROL_ENABLE 0x2000
401 NvRegMSIXMap0 = 0x3e0,
402 NvRegMSIXMap1 = 0x3e4,
403 NvRegMSIXIrqStatus = 0x3f0,
405 NvRegPowerState2 = 0x600,
406 #define NVREG_POWERSTATE2_POWERUP_MASK 0x0F11
407 #define NVREG_POWERSTATE2_POWERUP_REV_A3 0x0001
410 /* Big endian: should work, but is untested */
411 struct ring_desc {
412 __le32 buf;
413 __le32 flaglen;
416 struct ring_desc_ex {
417 __le32 bufhigh;
418 __le32 buflow;
419 __le32 txvlan;
420 __le32 flaglen;
423 union ring_type {
424 struct ring_desc* orig;
425 struct ring_desc_ex* ex;
428 #define FLAG_MASK_V1 0xffff0000
429 #define FLAG_MASK_V2 0xffffc000
430 #define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1)
431 #define LEN_MASK_V2 (0xffffffff ^ FLAG_MASK_V2)
433 #define NV_TX_LASTPACKET (1<<16)
434 #define NV_TX_RETRYERROR (1<<19)
435 #define NV_TX_FORCED_INTERRUPT (1<<24)
436 #define NV_TX_DEFERRED (1<<26)
437 #define NV_TX_CARRIERLOST (1<<27)
438 #define NV_TX_LATECOLLISION (1<<28)
439 #define NV_TX_UNDERFLOW (1<<29)
440 #define NV_TX_ERROR (1<<30)
441 #define NV_TX_VALID (1<<31)
443 #define NV_TX2_LASTPACKET (1<<29)
444 #define NV_TX2_RETRYERROR (1<<18)
445 #define NV_TX2_FORCED_INTERRUPT (1<<30)
446 #define NV_TX2_DEFERRED (1<<25)
447 #define NV_TX2_CARRIERLOST (1<<26)
448 #define NV_TX2_LATECOLLISION (1<<27)
449 #define NV_TX2_UNDERFLOW (1<<28)
450 /* error and valid are the same for both */
451 #define NV_TX2_ERROR (1<<30)
452 #define NV_TX2_VALID (1<<31)
453 #define NV_TX2_TSO (1<<28)
454 #define NV_TX2_TSO_SHIFT 14
455 #define NV_TX2_TSO_MAX_SHIFT 14
456 #define NV_TX2_TSO_MAX_SIZE (1<<NV_TX2_TSO_MAX_SHIFT)
457 #define NV_TX2_CHECKSUM_L3 (1<<27)
458 #define NV_TX2_CHECKSUM_L4 (1<<26)
460 #define NV_TX3_VLAN_TAG_PRESENT (1<<18)
462 #define NV_RX_DESCRIPTORVALID (1<<16)
463 #define NV_RX_MISSEDFRAME (1<<17)
464 #define NV_RX_SUBSTRACT1 (1<<18)
465 #define NV_RX_ERROR1 (1<<23)
466 #define NV_RX_ERROR2 (1<<24)
467 #define NV_RX_ERROR3 (1<<25)
468 #define NV_RX_ERROR4 (1<<26)
469 #define NV_RX_CRCERR (1<<27)
470 #define NV_RX_OVERFLOW (1<<28)
471 #define NV_RX_FRAMINGERR (1<<29)
472 #define NV_RX_ERROR (1<<30)
473 #define NV_RX_AVAIL (1<<31)
475 #define NV_RX2_CHECKSUMMASK (0x1C000000)
476 #define NV_RX2_CHECKSUM_IP (0x10000000)
477 #define NV_RX2_CHECKSUM_IP_TCP (0x14000000)
478 #define NV_RX2_CHECKSUM_IP_UDP (0x18000000)
479 #define NV_RX2_DESCRIPTORVALID (1<<29)
480 #define NV_RX2_SUBSTRACT1 (1<<25)
481 #define NV_RX2_ERROR1 (1<<18)
482 #define NV_RX2_ERROR2 (1<<19)
483 #define NV_RX2_ERROR3 (1<<20)
484 #define NV_RX2_ERROR4 (1<<21)
485 #define NV_RX2_CRCERR (1<<22)
486 #define NV_RX2_OVERFLOW (1<<23)
487 #define NV_RX2_FRAMINGERR (1<<24)
488 /* error and avail are the same for both */
489 #define NV_RX2_ERROR (1<<30)
490 #define NV_RX2_AVAIL (1<<31)
492 #define NV_RX3_VLAN_TAG_PRESENT (1<<16)
493 #define NV_RX3_VLAN_TAG_MASK (0x0000FFFF)
495 /* Miscelaneous hardware related defines: */
496 #define NV_PCI_REGSZ_VER1 0x270
497 #define NV_PCI_REGSZ_VER2 0x2d4
498 #define NV_PCI_REGSZ_VER3 0x604
500 /* various timeout delays: all in usec */
501 #define NV_TXRX_RESET_DELAY 4
502 #define NV_TXSTOP_DELAY1 10
503 #define NV_TXSTOP_DELAY1MAX 500000
504 #define NV_TXSTOP_DELAY2 100
505 #define NV_RXSTOP_DELAY1 10
506 #define NV_RXSTOP_DELAY1MAX 500000
507 #define NV_RXSTOP_DELAY2 100
508 #define NV_SETUP5_DELAY 5
509 #define NV_SETUP5_DELAYMAX 50000
510 #define NV_POWERUP_DELAY 5
511 #define NV_POWERUP_DELAYMAX 5000
512 #define NV_MIIBUSY_DELAY 50
513 #define NV_MIIPHY_DELAY 10
514 #define NV_MIIPHY_DELAYMAX 10000
515 #define NV_MAC_RESET_DELAY 64
517 #define NV_WAKEUPPATTERNS 5
518 #define NV_WAKEUPMASKENTRIES 4
520 /* General driver defaults */
521 #define NV_WATCHDOG_TIMEO (5*HZ)
523 #define RX_RING_DEFAULT 128
524 #define TX_RING_DEFAULT 256
525 #define RX_RING_MIN 128
526 #define TX_RING_MIN 64
527 #define RING_MAX_DESC_VER_1 1024
528 #define RING_MAX_DESC_VER_2_3 16384
530 /* rx/tx mac addr + type + vlan + align + slack*/
531 #define NV_RX_HEADERS (64)
532 /* even more slack. */
533 #define NV_RX_ALLOC_PAD (64)
535 /* maximum mtu size */
536 #define NV_PKTLIMIT_1 ETH_DATA_LEN /* hard limit not known */
537 #define NV_PKTLIMIT_2 9100 /* Actual limit according to NVidia: 9202 */
539 #define OOM_REFILL (1+HZ/20)
540 #define POLL_WAIT (1+HZ/100)
541 #define LINK_TIMEOUT (3*HZ)
542 #define STATS_INTERVAL (10*HZ)
545 * desc_ver values:
546 * The nic supports three different descriptor types:
547 * - DESC_VER_1: Original
548 * - DESC_VER_2: support for jumbo frames.
549 * - DESC_VER_3: 64-bit format.
551 #define DESC_VER_1 1
552 #define DESC_VER_2 2
553 #define DESC_VER_3 3
555 /* PHY defines */
556 #define PHY_OUI_MARVELL 0x5043
557 #define PHY_OUI_CICADA 0x03f1
558 #define PHY_OUI_VITESSE 0x01c1
559 #define PHY_OUI_REALTEK 0x0732
560 #define PHYID1_OUI_MASK 0x03ff
561 #define PHYID1_OUI_SHFT 6
562 #define PHYID2_OUI_MASK 0xfc00
563 #define PHYID2_OUI_SHFT 10
564 #define PHYID2_MODEL_MASK 0x03f0
565 #define PHY_MODEL_MARVELL_E3016 0x220
566 #define PHY_MARVELL_E3016_INITMASK 0x0300
567 #define PHY_CICADA_INIT1 0x0f000
568 #define PHY_CICADA_INIT2 0x0e00
569 #define PHY_CICADA_INIT3 0x01000
570 #define PHY_CICADA_INIT4 0x0200
571 #define PHY_CICADA_INIT5 0x0004
572 #define PHY_CICADA_INIT6 0x02000
573 #define PHY_VITESSE_INIT_REG1 0x1f
574 #define PHY_VITESSE_INIT_REG2 0x10
575 #define PHY_VITESSE_INIT_REG3 0x11
576 #define PHY_VITESSE_INIT_REG4 0x12
577 #define PHY_VITESSE_INIT_MSK1 0xc
578 #define PHY_VITESSE_INIT_MSK2 0x0180
579 #define PHY_VITESSE_INIT1 0x52b5
580 #define PHY_VITESSE_INIT2 0xaf8a
581 #define PHY_VITESSE_INIT3 0x8
582 #define PHY_VITESSE_INIT4 0x8f8a
583 #define PHY_VITESSE_INIT5 0xaf86
584 #define PHY_VITESSE_INIT6 0x8f86
585 #define PHY_VITESSE_INIT7 0xaf82
586 #define PHY_VITESSE_INIT8 0x0100
587 #define PHY_VITESSE_INIT9 0x8f82
588 #define PHY_VITESSE_INIT10 0x0
589 #define PHY_REALTEK_INIT_REG1 0x1f
590 #define PHY_REALTEK_INIT_REG2 0x19
591 #define PHY_REALTEK_INIT_REG3 0x13
592 #define PHY_REALTEK_INIT1 0x0000
593 #define PHY_REALTEK_INIT2 0x8e00
594 #define PHY_REALTEK_INIT3 0x0001
595 #define PHY_REALTEK_INIT4 0xad17
597 #define PHY_GIGABIT 0x0100
599 #define PHY_TIMEOUT 0x1
600 #define PHY_ERROR 0x2
602 #define PHY_100 0x1
603 #define PHY_1000 0x2
604 #define PHY_HALF 0x100
606 #define NV_PAUSEFRAME_RX_CAPABLE 0x0001
607 #define NV_PAUSEFRAME_TX_CAPABLE 0x0002
608 #define NV_PAUSEFRAME_RX_ENABLE 0x0004
609 #define NV_PAUSEFRAME_TX_ENABLE 0x0008
610 #define NV_PAUSEFRAME_RX_REQ 0x0010
611 #define NV_PAUSEFRAME_TX_REQ 0x0020
612 #define NV_PAUSEFRAME_AUTONEG 0x0040
614 /* MSI/MSI-X defines */
615 #define NV_MSI_X_MAX_VECTORS 8
616 #define NV_MSI_X_VECTORS_MASK 0x000f
617 #define NV_MSI_CAPABLE 0x0010
618 #define NV_MSI_X_CAPABLE 0x0020
619 #define NV_MSI_ENABLED 0x0040
620 #define NV_MSI_X_ENABLED 0x0080
622 #define NV_MSI_X_VECTOR_ALL 0x0
623 #define NV_MSI_X_VECTOR_RX 0x0
624 #define NV_MSI_X_VECTOR_TX 0x1
625 #define NV_MSI_X_VECTOR_OTHER 0x2
627 /* statistics */
628 struct nv_ethtool_str {
629 char name[ETH_GSTRING_LEN];
632 static const struct nv_ethtool_str nv_estats_str[] = {
633 { "tx_bytes" },
634 { "tx_zero_rexmt" },
635 { "tx_one_rexmt" },
636 { "tx_many_rexmt" },
637 { "tx_late_collision" },
638 { "tx_fifo_errors" },
639 { "tx_carrier_errors" },
640 { "tx_excess_deferral" },
641 { "tx_retry_error" },
642 { "rx_frame_error" },
643 { "rx_extra_byte" },
644 { "rx_late_collision" },
645 { "rx_runt" },
646 { "rx_frame_too_long" },
647 { "rx_over_errors" },
648 { "rx_crc_errors" },
649 { "rx_frame_align_error" },
650 { "rx_length_error" },
651 { "rx_unicast" },
652 { "rx_multicast" },
653 { "rx_broadcast" },
654 { "rx_packets" },
655 { "rx_errors_total" },
656 { "tx_errors_total" },
658 /* version 2 stats */
659 { "tx_deferral" },
660 { "tx_packets" },
661 { "rx_bytes" },
662 { "tx_pause" },
663 { "rx_pause" },
664 { "rx_drop_frame" }
667 struct nv_ethtool_stats {
668 u64 tx_bytes;
669 u64 tx_zero_rexmt;
670 u64 tx_one_rexmt;
671 u64 tx_many_rexmt;
672 u64 tx_late_collision;
673 u64 tx_fifo_errors;
674 u64 tx_carrier_errors;
675 u64 tx_excess_deferral;
676 u64 tx_retry_error;
677 u64 rx_frame_error;
678 u64 rx_extra_byte;
679 u64 rx_late_collision;
680 u64 rx_runt;
681 u64 rx_frame_too_long;
682 u64 rx_over_errors;
683 u64 rx_crc_errors;
684 u64 rx_frame_align_error;
685 u64 rx_length_error;
686 u64 rx_unicast;
687 u64 rx_multicast;
688 u64 rx_broadcast;
689 u64 rx_packets;
690 u64 rx_errors_total;
691 u64 tx_errors_total;
693 /* version 2 stats */
694 u64 tx_deferral;
695 u64 tx_packets;
696 u64 rx_bytes;
697 u64 tx_pause;
698 u64 rx_pause;
699 u64 rx_drop_frame;
702 #define NV_DEV_STATISTICS_V2_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64))
703 #define NV_DEV_STATISTICS_V1_COUNT (NV_DEV_STATISTICS_V2_COUNT - 6)
705 /* diagnostics */
706 #define NV_TEST_COUNT_BASE 3
707 #define NV_TEST_COUNT_EXTENDED 4
709 static const struct nv_ethtool_str nv_etests_str[] = {
710 { "link (online/offline)" },
711 { "register (offline) " },
712 { "interrupt (offline) " },
713 { "loopback (offline) " }
716 struct register_test {
717 __u32 reg;
718 __u32 mask;
721 static const struct register_test nv_registers_test[] = {
722 { NvRegUnknownSetupReg6, 0x01 },
723 { NvRegMisc1, 0x03c },
724 { NvRegOffloadConfig, 0x03ff },
725 { NvRegMulticastAddrA, 0xffffffff },
726 { NvRegTxWatermark, 0x0ff },
727 { NvRegWakeUpFlags, 0x07777 },
728 { 0,0 }
731 struct nv_skb_map {
732 struct sk_buff *skb;
733 dma_addr_t dma;
734 unsigned int dma_len;
738 * SMP locking:
739 * All hardware access under dev->priv->lock, except the performance
740 * critical parts:
741 * - rx is (pseudo-) lockless: it relies on the single-threading provided
742 * by the arch code for interrupts.
743 * - tx setup is lockless: it relies on netif_tx_lock. Actual submission
744 * needs dev->priv->lock :-(
745 * - set_multicast_list: preparation lockless, relies on netif_tx_lock.
748 /* in dev: base, irq */
749 struct fe_priv {
750 spinlock_t lock;
752 struct net_device *dev;
753 struct napi_struct napi;
755 /* General data:
756 * Locking: spin_lock(&np->lock); */
757 struct nv_ethtool_stats estats;
758 int in_shutdown;
759 u32 linkspeed;
760 int duplex;
761 int autoneg;
762 int fixed_mode;
763 int phyaddr;
764 int wolenabled;
765 unsigned int phy_oui;
766 unsigned int phy_model;
767 u16 gigabit;
768 int intr_test;
769 int recover_error;
771 /* General data: RO fields */
772 dma_addr_t ring_addr;
773 struct pci_dev *pci_dev;
774 u32 orig_mac[2];
775 u32 irqmask;
776 u32 desc_ver;
777 u32 txrxctl_bits;
778 u32 vlanctl_bits;
779 u32 driver_data;
780 u32 register_size;
781 int rx_csum;
782 u32 mac_in_use;
784 void __iomem *base;
786 /* rx specific fields.
787 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
789 union ring_type get_rx, put_rx, first_rx, last_rx;
790 struct nv_skb_map *get_rx_ctx, *put_rx_ctx;
791 struct nv_skb_map *first_rx_ctx, *last_rx_ctx;
792 struct nv_skb_map *rx_skb;
794 union ring_type rx_ring;
795 unsigned int rx_buf_sz;
796 unsigned int pkt_limit;
797 struct timer_list oom_kick;
798 struct timer_list nic_poll;
799 struct timer_list stats_poll;
800 u32 nic_poll_irq;
801 int rx_ring_size;
803 /* media detection workaround.
804 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
806 int need_linktimer;
807 unsigned long link_timeout;
809 * tx specific fields.
811 union ring_type get_tx, put_tx, first_tx, last_tx;
812 struct nv_skb_map *get_tx_ctx, *put_tx_ctx;
813 struct nv_skb_map *first_tx_ctx, *last_tx_ctx;
814 struct nv_skb_map *tx_skb;
816 union ring_type tx_ring;
817 u32 tx_flags;
818 int tx_ring_size;
819 int tx_stop;
821 /* vlan fields */
822 struct vlan_group *vlangrp;
824 /* msi/msi-x fields */
825 u32 msi_flags;
826 struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS];
828 /* flow control */
829 u32 pause_flags;
833 * Maximum number of loops until we assume that a bit in the irq mask
834 * is stuck. Overridable with module param.
836 static int max_interrupt_work = 5;
839 * Optimization can be either throuput mode or cpu mode
841 * Throughput Mode: Every tx and rx packet will generate an interrupt.
842 * CPU Mode: Interrupts are controlled by a timer.
844 enum {
845 NV_OPTIMIZATION_MODE_THROUGHPUT,
846 NV_OPTIMIZATION_MODE_CPU
848 static int optimization_mode = NV_OPTIMIZATION_MODE_THROUGHPUT;
851 * Poll interval for timer irq
853 * This interval determines how frequent an interrupt is generated.
854 * The is value is determined by [(time_in_micro_secs * 100) / (2^10)]
855 * Min = 0, and Max = 65535
857 static int poll_interval = -1;
860 * MSI interrupts
862 enum {
863 NV_MSI_INT_DISABLED,
864 NV_MSI_INT_ENABLED
866 static int msi = NV_MSI_INT_ENABLED;
869 * MSIX interrupts
871 enum {
872 NV_MSIX_INT_DISABLED,
873 NV_MSIX_INT_ENABLED
875 static int msix = NV_MSIX_INT_DISABLED;
878 * DMA 64bit
880 enum {
881 NV_DMA_64BIT_DISABLED,
882 NV_DMA_64BIT_ENABLED
884 static int dma_64bit = NV_DMA_64BIT_ENABLED;
886 static inline struct fe_priv *get_nvpriv(struct net_device *dev)
888 return netdev_priv(dev);
891 static inline u8 __iomem *get_hwbase(struct net_device *dev)
893 return ((struct fe_priv *)netdev_priv(dev))->base;
896 static inline void pci_push(u8 __iomem *base)
898 /* force out pending posted writes */
899 readl(base);
902 static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v)
904 return le32_to_cpu(prd->flaglen)
905 & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2);
908 static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v)
910 return le32_to_cpu(prd->flaglen) & LEN_MASK_V2;
913 static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
914 int delay, int delaymax, const char *msg)
916 u8 __iomem *base = get_hwbase(dev);
918 pci_push(base);
919 do {
920 udelay(delay);
921 delaymax -= delay;
922 if (delaymax < 0) {
923 if (msg)
924 printk(msg);
925 return 1;
927 } while ((readl(base + offset) & mask) != target);
928 return 0;
931 #define NV_SETUP_RX_RING 0x01
932 #define NV_SETUP_TX_RING 0x02
934 static inline u32 dma_low(dma_addr_t addr)
936 return addr;
939 static inline u32 dma_high(dma_addr_t addr)
941 return addr>>31>>1; /* 0 if 32bit, shift down by 32 if 64bit */
944 static void setup_hw_rings(struct net_device *dev, int rxtx_flags)
946 struct fe_priv *np = get_nvpriv(dev);
947 u8 __iomem *base = get_hwbase(dev);
949 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
950 if (rxtx_flags & NV_SETUP_RX_RING) {
951 writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
953 if (rxtx_flags & NV_SETUP_TX_RING) {
954 writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
956 } else {
957 if (rxtx_flags & NV_SETUP_RX_RING) {
958 writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
959 writel(dma_high(np->ring_addr), base + NvRegRxRingPhysAddrHigh);
961 if (rxtx_flags & NV_SETUP_TX_RING) {
962 writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
963 writel(dma_high(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddrHigh);
968 static void free_rings(struct net_device *dev)
970 struct fe_priv *np = get_nvpriv(dev);
972 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
973 if (np->rx_ring.orig)
974 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
975 np->rx_ring.orig, np->ring_addr);
976 } else {
977 if (np->rx_ring.ex)
978 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
979 np->rx_ring.ex, np->ring_addr);
981 if (np->rx_skb)
982 kfree(np->rx_skb);
983 if (np->tx_skb)
984 kfree(np->tx_skb);
987 static int using_multi_irqs(struct net_device *dev)
989 struct fe_priv *np = get_nvpriv(dev);
991 if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
992 ((np->msi_flags & NV_MSI_X_ENABLED) &&
993 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1)))
994 return 0;
995 else
996 return 1;
999 static void nv_enable_irq(struct net_device *dev)
1001 struct fe_priv *np = get_nvpriv(dev);
1003 if (!using_multi_irqs(dev)) {
1004 if (np->msi_flags & NV_MSI_X_ENABLED)
1005 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1006 else
1007 enable_irq(np->pci_dev->irq);
1008 } else {
1009 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1010 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
1011 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
1015 static void nv_disable_irq(struct net_device *dev)
1017 struct fe_priv *np = get_nvpriv(dev);
1019 if (!using_multi_irqs(dev)) {
1020 if (np->msi_flags & NV_MSI_X_ENABLED)
1021 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1022 else
1023 disable_irq(np->pci_dev->irq);
1024 } else {
1025 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1026 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
1027 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
1031 /* In MSIX mode, a write to irqmask behaves as XOR */
1032 static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask)
1034 u8 __iomem *base = get_hwbase(dev);
1036 writel(mask, base + NvRegIrqMask);
1039 static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask)
1041 struct fe_priv *np = get_nvpriv(dev);
1042 u8 __iomem *base = get_hwbase(dev);
1044 if (np->msi_flags & NV_MSI_X_ENABLED) {
1045 writel(mask, base + NvRegIrqMask);
1046 } else {
1047 if (np->msi_flags & NV_MSI_ENABLED)
1048 writel(0, base + NvRegMSIIrqMask);
1049 writel(0, base + NvRegIrqMask);
1053 #define MII_READ (-1)
1054 /* mii_rw: read/write a register on the PHY.
1056 * Caller must guarantee serialization
1058 static int mii_rw(struct net_device *dev, int addr, int miireg, int value)
1060 u8 __iomem *base = get_hwbase(dev);
1061 u32 reg;
1062 int retval;
1064 writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
1066 reg = readl(base + NvRegMIIControl);
1067 if (reg & NVREG_MIICTL_INUSE) {
1068 writel(NVREG_MIICTL_INUSE, base + NvRegMIIControl);
1069 udelay(NV_MIIBUSY_DELAY);
1072 reg = (addr << NVREG_MIICTL_ADDRSHIFT) | miireg;
1073 if (value != MII_READ) {
1074 writel(value, base + NvRegMIIData);
1075 reg |= NVREG_MIICTL_WRITE;
1077 writel(reg, base + NvRegMIIControl);
1079 if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0,
1080 NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX, NULL)) {
1081 dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d timed out.\n",
1082 dev->name, miireg, addr);
1083 retval = -1;
1084 } else if (value != MII_READ) {
1085 /* it was a write operation - fewer failures are detectable */
1086 dprintk(KERN_DEBUG "%s: mii_rw wrote 0x%x to reg %d at PHY %d\n",
1087 dev->name, value, miireg, addr);
1088 retval = 0;
1089 } else if (readl(base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) {
1090 dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d failed.\n",
1091 dev->name, miireg, addr);
1092 retval = -1;
1093 } else {
1094 retval = readl(base + NvRegMIIData);
1095 dprintk(KERN_DEBUG "%s: mii_rw read from reg %d at PHY %d: 0x%x.\n",
1096 dev->name, miireg, addr, retval);
1099 return retval;
1102 static int phy_reset(struct net_device *dev, u32 bmcr_setup)
1104 struct fe_priv *np = netdev_priv(dev);
1105 u32 miicontrol;
1106 unsigned int tries = 0;
1108 miicontrol = BMCR_RESET | bmcr_setup;
1109 if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) {
1110 return -1;
1113 /* wait for 500ms */
1114 msleep(500);
1116 /* must wait till reset is deasserted */
1117 while (miicontrol & BMCR_RESET) {
1118 msleep(10);
1119 miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1120 /* FIXME: 100 tries seem excessive */
1121 if (tries++ > 100)
1122 return -1;
1124 return 0;
1127 static int phy_init(struct net_device *dev)
1129 struct fe_priv *np = get_nvpriv(dev);
1130 u8 __iomem *base = get_hwbase(dev);
1131 u32 phyinterface, phy_reserved, mii_status, mii_control, mii_control_1000,reg;
1133 /* phy errata for E3016 phy */
1134 if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
1135 reg = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
1136 reg &= ~PHY_MARVELL_E3016_INITMASK;
1137 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, reg)) {
1138 printk(KERN_INFO "%s: phy write to errata reg failed.\n", pci_name(np->pci_dev));
1139 return PHY_ERROR;
1142 if (np->phy_oui == PHY_OUI_REALTEK) {
1143 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
1144 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1145 return PHY_ERROR;
1147 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) {
1148 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1149 return PHY_ERROR;
1151 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) {
1152 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1153 return PHY_ERROR;
1155 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) {
1156 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1157 return PHY_ERROR;
1159 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
1160 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1161 return PHY_ERROR;
1165 /* set advertise register */
1166 reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
1167 reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|ADVERTISE_PAUSE_ASYM|ADVERTISE_PAUSE_CAP);
1168 if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) {
1169 printk(KERN_INFO "%s: phy write to advertise failed.\n", pci_name(np->pci_dev));
1170 return PHY_ERROR;
1173 /* get phy interface type */
1174 phyinterface = readl(base + NvRegPhyInterface);
1176 /* see if gigabit phy */
1177 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
1178 if (mii_status & PHY_GIGABIT) {
1179 np->gigabit = PHY_GIGABIT;
1180 mii_control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
1181 mii_control_1000 &= ~ADVERTISE_1000HALF;
1182 if (phyinterface & PHY_RGMII)
1183 mii_control_1000 |= ADVERTISE_1000FULL;
1184 else
1185 mii_control_1000 &= ~ADVERTISE_1000FULL;
1187 if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) {
1188 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1189 return PHY_ERROR;
1192 else
1193 np->gigabit = 0;
1195 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1196 mii_control |= BMCR_ANENABLE;
1198 /* reset the phy
1199 * (certain phys need bmcr to be setup with reset)
1201 if (phy_reset(dev, mii_control)) {
1202 printk(KERN_INFO "%s: phy reset failed\n", pci_name(np->pci_dev));
1203 return PHY_ERROR;
1206 /* phy vendor specific configuration */
1207 if ((np->phy_oui == PHY_OUI_CICADA) && (phyinterface & PHY_RGMII) ) {
1208 phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ);
1209 phy_reserved &= ~(PHY_CICADA_INIT1 | PHY_CICADA_INIT2);
1210 phy_reserved |= (PHY_CICADA_INIT3 | PHY_CICADA_INIT4);
1211 if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved)) {
1212 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1213 return PHY_ERROR;
1215 phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
1216 phy_reserved |= PHY_CICADA_INIT5;
1217 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved)) {
1218 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1219 return PHY_ERROR;
1222 if (np->phy_oui == PHY_OUI_CICADA) {
1223 phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ);
1224 phy_reserved |= PHY_CICADA_INIT6;
1225 if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved)) {
1226 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1227 return PHY_ERROR;
1230 if (np->phy_oui == PHY_OUI_VITESSE) {
1231 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT1)) {
1232 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1233 return PHY_ERROR;
1235 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT2)) {
1236 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1237 return PHY_ERROR;
1239 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
1240 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
1241 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1242 return PHY_ERROR;
1244 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
1245 phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
1246 phy_reserved |= PHY_VITESSE_INIT3;
1247 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
1248 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1249 return PHY_ERROR;
1251 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT4)) {
1252 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1253 return PHY_ERROR;
1255 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT5)) {
1256 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1257 return PHY_ERROR;
1259 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
1260 phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
1261 phy_reserved |= PHY_VITESSE_INIT3;
1262 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
1263 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1264 return PHY_ERROR;
1266 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
1267 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
1268 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1269 return PHY_ERROR;
1271 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT6)) {
1272 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1273 return PHY_ERROR;
1275 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT7)) {
1276 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1277 return PHY_ERROR;
1279 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
1280 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
1281 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1282 return PHY_ERROR;
1284 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
1285 phy_reserved &= ~PHY_VITESSE_INIT_MSK2;
1286 phy_reserved |= PHY_VITESSE_INIT8;
1287 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
1288 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1289 return PHY_ERROR;
1291 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT9)) {
1292 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1293 return PHY_ERROR;
1295 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT10)) {
1296 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1297 return PHY_ERROR;
1300 if (np->phy_oui == PHY_OUI_REALTEK) {
1301 /* reset could have cleared these out, set them back */
1302 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
1303 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1304 return PHY_ERROR;
1306 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) {
1307 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1308 return PHY_ERROR;
1310 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) {
1311 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1312 return PHY_ERROR;
1314 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) {
1315 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1316 return PHY_ERROR;
1318 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
1319 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1320 return PHY_ERROR;
1324 /* some phys clear out pause advertisment on reset, set it back */
1325 mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg);
1327 /* restart auto negotiation */
1328 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1329 mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
1330 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
1331 return PHY_ERROR;
1334 return 0;
1337 static void nv_start_rx(struct net_device *dev)
1339 struct fe_priv *np = netdev_priv(dev);
1340 u8 __iomem *base = get_hwbase(dev);
1341 u32 rx_ctrl = readl(base + NvRegReceiverControl);
1343 dprintk(KERN_DEBUG "%s: nv_start_rx\n", dev->name);
1344 /* Already running? Stop it. */
1345 if ((readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) && !np->mac_in_use) {
1346 rx_ctrl &= ~NVREG_RCVCTL_START;
1347 writel(rx_ctrl, base + NvRegReceiverControl);
1348 pci_push(base);
1350 writel(np->linkspeed, base + NvRegLinkSpeed);
1351 pci_push(base);
1352 rx_ctrl |= NVREG_RCVCTL_START;
1353 if (np->mac_in_use)
1354 rx_ctrl &= ~NVREG_RCVCTL_RX_PATH_EN;
1355 writel(rx_ctrl, base + NvRegReceiverControl);
1356 dprintk(KERN_DEBUG "%s: nv_start_rx to duplex %d, speed 0x%08x.\n",
1357 dev->name, np->duplex, np->linkspeed);
1358 pci_push(base);
1361 static void nv_stop_rx(struct net_device *dev)
1363 struct fe_priv *np = netdev_priv(dev);
1364 u8 __iomem *base = get_hwbase(dev);
1365 u32 rx_ctrl = readl(base + NvRegReceiverControl);
1367 dprintk(KERN_DEBUG "%s: nv_stop_rx\n", dev->name);
1368 if (!np->mac_in_use)
1369 rx_ctrl &= ~NVREG_RCVCTL_START;
1370 else
1371 rx_ctrl |= NVREG_RCVCTL_RX_PATH_EN;
1372 writel(rx_ctrl, base + NvRegReceiverControl);
1373 reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0,
1374 NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX,
1375 KERN_INFO "nv_stop_rx: ReceiverStatus remained busy");
1377 udelay(NV_RXSTOP_DELAY2);
1378 if (!np->mac_in_use)
1379 writel(0, base + NvRegLinkSpeed);
1382 static void nv_start_tx(struct net_device *dev)
1384 struct fe_priv *np = netdev_priv(dev);
1385 u8 __iomem *base = get_hwbase(dev);
1386 u32 tx_ctrl = readl(base + NvRegTransmitterControl);
1388 dprintk(KERN_DEBUG "%s: nv_start_tx\n", dev->name);
1389 tx_ctrl |= NVREG_XMITCTL_START;
1390 if (np->mac_in_use)
1391 tx_ctrl &= ~NVREG_XMITCTL_TX_PATH_EN;
1392 writel(tx_ctrl, base + NvRegTransmitterControl);
1393 pci_push(base);
1396 static void nv_stop_tx(struct net_device *dev)
1398 struct fe_priv *np = netdev_priv(dev);
1399 u8 __iomem *base = get_hwbase(dev);
1400 u32 tx_ctrl = readl(base + NvRegTransmitterControl);
1402 dprintk(KERN_DEBUG "%s: nv_stop_tx\n", dev->name);
1403 if (!np->mac_in_use)
1404 tx_ctrl &= ~NVREG_XMITCTL_START;
1405 else
1406 tx_ctrl |= NVREG_XMITCTL_TX_PATH_EN;
1407 writel(tx_ctrl, base + NvRegTransmitterControl);
1408 reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0,
1409 NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX,
1410 KERN_INFO "nv_stop_tx: TransmitterStatus remained busy");
1412 udelay(NV_TXSTOP_DELAY2);
1413 if (!np->mac_in_use)
1414 writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV,
1415 base + NvRegTransmitPoll);
1418 static void nv_txrx_reset(struct net_device *dev)
1420 struct fe_priv *np = netdev_priv(dev);
1421 u8 __iomem *base = get_hwbase(dev);
1423 dprintk(KERN_DEBUG "%s: nv_txrx_reset\n", dev->name);
1424 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
1425 pci_push(base);
1426 udelay(NV_TXRX_RESET_DELAY);
1427 writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
1428 pci_push(base);
1431 static void nv_mac_reset(struct net_device *dev)
1433 struct fe_priv *np = netdev_priv(dev);
1434 u8 __iomem *base = get_hwbase(dev);
1436 dprintk(KERN_DEBUG "%s: nv_mac_reset\n", dev->name);
1437 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
1438 pci_push(base);
1439 writel(NVREG_MAC_RESET_ASSERT, base + NvRegMacReset);
1440 pci_push(base);
1441 udelay(NV_MAC_RESET_DELAY);
1442 writel(0, base + NvRegMacReset);
1443 pci_push(base);
1444 udelay(NV_MAC_RESET_DELAY);
1445 writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
1446 pci_push(base);
1449 static void nv_get_hw_stats(struct net_device *dev)
1451 struct fe_priv *np = netdev_priv(dev);
1452 u8 __iomem *base = get_hwbase(dev);
1454 np->estats.tx_bytes += readl(base + NvRegTxCnt);
1455 np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt);
1456 np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt);
1457 np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt);
1458 np->estats.tx_late_collision += readl(base + NvRegTxLateCol);
1459 np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow);
1460 np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier);
1461 np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef);
1462 np->estats.tx_retry_error += readl(base + NvRegTxRetryErr);
1463 np->estats.rx_frame_error += readl(base + NvRegRxFrameErr);
1464 np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte);
1465 np->estats.rx_late_collision += readl(base + NvRegRxLateCol);
1466 np->estats.rx_runt += readl(base + NvRegRxRunt);
1467 np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong);
1468 np->estats.rx_over_errors += readl(base + NvRegRxOverflow);
1469 np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr);
1470 np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr);
1471 np->estats.rx_length_error += readl(base + NvRegRxLenErr);
1472 np->estats.rx_unicast += readl(base + NvRegRxUnicast);
1473 np->estats.rx_multicast += readl(base + NvRegRxMulticast);
1474 np->estats.rx_broadcast += readl(base + NvRegRxBroadcast);
1475 np->estats.rx_packets =
1476 np->estats.rx_unicast +
1477 np->estats.rx_multicast +
1478 np->estats.rx_broadcast;
1479 np->estats.rx_errors_total =
1480 np->estats.rx_crc_errors +
1481 np->estats.rx_over_errors +
1482 np->estats.rx_frame_error +
1483 (np->estats.rx_frame_align_error - np->estats.rx_extra_byte) +
1484 np->estats.rx_late_collision +
1485 np->estats.rx_runt +
1486 np->estats.rx_frame_too_long;
1487 np->estats.tx_errors_total =
1488 np->estats.tx_late_collision +
1489 np->estats.tx_fifo_errors +
1490 np->estats.tx_carrier_errors +
1491 np->estats.tx_excess_deferral +
1492 np->estats.tx_retry_error;
1494 if (np->driver_data & DEV_HAS_STATISTICS_V2) {
1495 np->estats.tx_deferral += readl(base + NvRegTxDef);
1496 np->estats.tx_packets += readl(base + NvRegTxFrame);
1497 np->estats.rx_bytes += readl(base + NvRegRxCnt);
1498 np->estats.tx_pause += readl(base + NvRegTxPause);
1499 np->estats.rx_pause += readl(base + NvRegRxPause);
1500 np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame);
1505 * nv_get_stats: dev->get_stats function
1506 * Get latest stats value from the nic.
1507 * Called with read_lock(&dev_base_lock) held for read -
1508 * only synchronized against unregister_netdevice.
1510 static struct net_device_stats *nv_get_stats(struct net_device *dev)
1512 struct fe_priv *np = netdev_priv(dev);
1514 /* If the nic supports hw counters then retrieve latest values */
1515 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2)) {
1516 nv_get_hw_stats(dev);
1518 /* copy to net_device stats */
1519 dev->stats.tx_bytes = np->estats.tx_bytes;
1520 dev->stats.tx_fifo_errors = np->estats.tx_fifo_errors;
1521 dev->stats.tx_carrier_errors = np->estats.tx_carrier_errors;
1522 dev->stats.rx_crc_errors = np->estats.rx_crc_errors;
1523 dev->stats.rx_over_errors = np->estats.rx_over_errors;
1524 dev->stats.rx_errors = np->estats.rx_errors_total;
1525 dev->stats.tx_errors = np->estats.tx_errors_total;
1528 return &dev->stats;
1532 * nv_alloc_rx: fill rx ring entries.
1533 * Return 1 if the allocations for the skbs failed and the
1534 * rx engine is without Available descriptors
1536 static int nv_alloc_rx(struct net_device *dev)
1538 struct fe_priv *np = netdev_priv(dev);
1539 struct ring_desc* less_rx;
1541 less_rx = np->get_rx.orig;
1542 if (less_rx-- == np->first_rx.orig)
1543 less_rx = np->last_rx.orig;
1545 while (np->put_rx.orig != less_rx) {
1546 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
1547 if (skb) {
1548 np->put_rx_ctx->skb = skb;
1549 np->put_rx_ctx->dma = pci_map_single(np->pci_dev,
1550 skb->data,
1551 skb_tailroom(skb),
1552 PCI_DMA_FROMDEVICE);
1553 np->put_rx_ctx->dma_len = skb_tailroom(skb);
1554 np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma);
1555 wmb();
1556 np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL);
1557 if (unlikely(np->put_rx.orig++ == np->last_rx.orig))
1558 np->put_rx.orig = np->first_rx.orig;
1559 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
1560 np->put_rx_ctx = np->first_rx_ctx;
1561 } else {
1562 return 1;
1565 return 0;
1568 static int nv_alloc_rx_optimized(struct net_device *dev)
1570 struct fe_priv *np = netdev_priv(dev);
1571 struct ring_desc_ex* less_rx;
1573 less_rx = np->get_rx.ex;
1574 if (less_rx-- == np->first_rx.ex)
1575 less_rx = np->last_rx.ex;
1577 while (np->put_rx.ex != less_rx) {
1578 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
1579 if (skb) {
1580 np->put_rx_ctx->skb = skb;
1581 np->put_rx_ctx->dma = pci_map_single(np->pci_dev,
1582 skb->data,
1583 skb_tailroom(skb),
1584 PCI_DMA_FROMDEVICE);
1585 np->put_rx_ctx->dma_len = skb_tailroom(skb);
1586 np->put_rx.ex->bufhigh = cpu_to_le32(dma_high(np->put_rx_ctx->dma));
1587 np->put_rx.ex->buflow = cpu_to_le32(dma_low(np->put_rx_ctx->dma));
1588 wmb();
1589 np->put_rx.ex->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL);
1590 if (unlikely(np->put_rx.ex++ == np->last_rx.ex))
1591 np->put_rx.ex = np->first_rx.ex;
1592 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
1593 np->put_rx_ctx = np->first_rx_ctx;
1594 } else {
1595 return 1;
1598 return 0;
1601 /* If rx bufs are exhausted called after 50ms to attempt to refresh */
1602 #ifdef CONFIG_FORCEDETH_NAPI
1603 static void nv_do_rx_refill(unsigned long data)
1605 struct net_device *dev = (struct net_device *) data;
1606 struct fe_priv *np = netdev_priv(dev);
1608 /* Just reschedule NAPI rx processing */
1609 netif_rx_schedule(dev, &np->napi);
1611 #else
1612 static void nv_do_rx_refill(unsigned long data)
1614 struct net_device *dev = (struct net_device *) data;
1615 struct fe_priv *np = netdev_priv(dev);
1616 int retcode;
1618 if (!using_multi_irqs(dev)) {
1619 if (np->msi_flags & NV_MSI_X_ENABLED)
1620 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1621 else
1622 disable_irq(np->pci_dev->irq);
1623 } else {
1624 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1626 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1627 retcode = nv_alloc_rx(dev);
1628 else
1629 retcode = nv_alloc_rx_optimized(dev);
1630 if (retcode) {
1631 spin_lock_irq(&np->lock);
1632 if (!np->in_shutdown)
1633 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
1634 spin_unlock_irq(&np->lock);
1636 if (!using_multi_irqs(dev)) {
1637 if (np->msi_flags & NV_MSI_X_ENABLED)
1638 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1639 else
1640 enable_irq(np->pci_dev->irq);
1641 } else {
1642 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1645 #endif
1647 static void nv_init_rx(struct net_device *dev)
1649 struct fe_priv *np = netdev_priv(dev);
1650 int i;
1651 np->get_rx = np->put_rx = np->first_rx = np->rx_ring;
1652 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1653 np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1];
1654 else
1655 np->last_rx.ex = &np->rx_ring.ex[np->rx_ring_size-1];
1656 np->get_rx_ctx = np->put_rx_ctx = np->first_rx_ctx = np->rx_skb;
1657 np->last_rx_ctx = &np->rx_skb[np->rx_ring_size-1];
1659 for (i = 0; i < np->rx_ring_size; i++) {
1660 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1661 np->rx_ring.orig[i].flaglen = 0;
1662 np->rx_ring.orig[i].buf = 0;
1663 } else {
1664 np->rx_ring.ex[i].flaglen = 0;
1665 np->rx_ring.ex[i].txvlan = 0;
1666 np->rx_ring.ex[i].bufhigh = 0;
1667 np->rx_ring.ex[i].buflow = 0;
1669 np->rx_skb[i].skb = NULL;
1670 np->rx_skb[i].dma = 0;
1674 static void nv_init_tx(struct net_device *dev)
1676 struct fe_priv *np = netdev_priv(dev);
1677 int i;
1678 np->get_tx = np->put_tx = np->first_tx = np->tx_ring;
1679 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1680 np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1];
1681 else
1682 np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1];
1683 np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx = np->tx_skb;
1684 np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1];
1686 for (i = 0; i < np->tx_ring_size; i++) {
1687 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1688 np->tx_ring.orig[i].flaglen = 0;
1689 np->tx_ring.orig[i].buf = 0;
1690 } else {
1691 np->tx_ring.ex[i].flaglen = 0;
1692 np->tx_ring.ex[i].txvlan = 0;
1693 np->tx_ring.ex[i].bufhigh = 0;
1694 np->tx_ring.ex[i].buflow = 0;
1696 np->tx_skb[i].skb = NULL;
1697 np->tx_skb[i].dma = 0;
1701 static int nv_init_ring(struct net_device *dev)
1703 struct fe_priv *np = netdev_priv(dev);
1705 nv_init_tx(dev);
1706 nv_init_rx(dev);
1707 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1708 return nv_alloc_rx(dev);
1709 else
1710 return nv_alloc_rx_optimized(dev);
1713 static int nv_release_txskb(struct net_device *dev, struct nv_skb_map* tx_skb)
1715 struct fe_priv *np = netdev_priv(dev);
1717 if (tx_skb->dma) {
1718 pci_unmap_page(np->pci_dev, tx_skb->dma,
1719 tx_skb->dma_len,
1720 PCI_DMA_TODEVICE);
1721 tx_skb->dma = 0;
1723 if (tx_skb->skb) {
1724 dev_kfree_skb_any(tx_skb->skb);
1725 tx_skb->skb = NULL;
1726 return 1;
1727 } else {
1728 return 0;
1732 static void nv_drain_tx(struct net_device *dev)
1734 struct fe_priv *np = netdev_priv(dev);
1735 unsigned int i;
1737 for (i = 0; i < np->tx_ring_size; i++) {
1738 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1739 np->tx_ring.orig[i].flaglen = 0;
1740 np->tx_ring.orig[i].buf = 0;
1741 } else {
1742 np->tx_ring.ex[i].flaglen = 0;
1743 np->tx_ring.ex[i].txvlan = 0;
1744 np->tx_ring.ex[i].bufhigh = 0;
1745 np->tx_ring.ex[i].buflow = 0;
1747 if (nv_release_txskb(dev, &np->tx_skb[i]))
1748 dev->stats.tx_dropped++;
1752 static void nv_drain_rx(struct net_device *dev)
1754 struct fe_priv *np = netdev_priv(dev);
1755 int i;
1757 for (i = 0; i < np->rx_ring_size; i++) {
1758 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1759 np->rx_ring.orig[i].flaglen = 0;
1760 np->rx_ring.orig[i].buf = 0;
1761 } else {
1762 np->rx_ring.ex[i].flaglen = 0;
1763 np->rx_ring.ex[i].txvlan = 0;
1764 np->rx_ring.ex[i].bufhigh = 0;
1765 np->rx_ring.ex[i].buflow = 0;
1767 wmb();
1768 if (np->rx_skb[i].skb) {
1769 pci_unmap_single(np->pci_dev, np->rx_skb[i].dma,
1770 (skb_end_pointer(np->rx_skb[i].skb) -
1771 np->rx_skb[i].skb->data),
1772 PCI_DMA_FROMDEVICE);
1773 dev_kfree_skb(np->rx_skb[i].skb);
1774 np->rx_skb[i].skb = NULL;
1779 static void drain_ring(struct net_device *dev)
1781 nv_drain_tx(dev);
1782 nv_drain_rx(dev);
1785 static inline u32 nv_get_empty_tx_slots(struct fe_priv *np)
1787 return (u32)(np->tx_ring_size - ((np->tx_ring_size + (np->put_tx_ctx - np->get_tx_ctx)) % np->tx_ring_size));
1791 * nv_start_xmit: dev->hard_start_xmit function
1792 * Called with netif_tx_lock held.
1794 static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1796 struct fe_priv *np = netdev_priv(dev);
1797 u32 tx_flags = 0;
1798 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
1799 unsigned int fragments = skb_shinfo(skb)->nr_frags;
1800 unsigned int i;
1801 u32 offset = 0;
1802 u32 bcnt;
1803 u32 size = skb->len-skb->data_len;
1804 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
1805 u32 empty_slots;
1806 struct ring_desc* put_tx;
1807 struct ring_desc* start_tx;
1808 struct ring_desc* prev_tx;
1809 struct nv_skb_map* prev_tx_ctx;
1811 /* add fragments to entries count */
1812 for (i = 0; i < fragments; i++) {
1813 entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) +
1814 ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
1817 empty_slots = nv_get_empty_tx_slots(np);
1818 if (unlikely(empty_slots <= entries)) {
1819 spin_lock_irq(&np->lock);
1820 netif_stop_queue(dev);
1821 np->tx_stop = 1;
1822 spin_unlock_irq(&np->lock);
1823 return NETDEV_TX_BUSY;
1826 start_tx = put_tx = np->put_tx.orig;
1828 /* setup the header buffer */
1829 do {
1830 prev_tx = put_tx;
1831 prev_tx_ctx = np->put_tx_ctx;
1832 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
1833 np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
1834 PCI_DMA_TODEVICE);
1835 np->put_tx_ctx->dma_len = bcnt;
1836 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
1837 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1839 tx_flags = np->tx_flags;
1840 offset += bcnt;
1841 size -= bcnt;
1842 if (unlikely(put_tx++ == np->last_tx.orig))
1843 put_tx = np->first_tx.orig;
1844 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
1845 np->put_tx_ctx = np->first_tx_ctx;
1846 } while (size);
1848 /* setup the fragments */
1849 for (i = 0; i < fragments; i++) {
1850 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1851 u32 size = frag->size;
1852 offset = 0;
1854 do {
1855 prev_tx = put_tx;
1856 prev_tx_ctx = np->put_tx_ctx;
1857 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
1858 np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,
1859 PCI_DMA_TODEVICE);
1860 np->put_tx_ctx->dma_len = bcnt;
1861 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
1862 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1864 offset += bcnt;
1865 size -= bcnt;
1866 if (unlikely(put_tx++ == np->last_tx.orig))
1867 put_tx = np->first_tx.orig;
1868 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
1869 np->put_tx_ctx = np->first_tx_ctx;
1870 } while (size);
1873 /* set last fragment flag */
1874 prev_tx->flaglen |= cpu_to_le32(tx_flags_extra);
1876 /* save skb in this slot's context area */
1877 prev_tx_ctx->skb = skb;
1879 if (skb_is_gso(skb))
1880 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
1881 else
1882 tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
1883 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
1885 spin_lock_irq(&np->lock);
1887 /* set tx flags */
1888 start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
1889 np->put_tx.orig = put_tx;
1891 spin_unlock_irq(&np->lock);
1893 dprintk(KERN_DEBUG "%s: nv_start_xmit: entries %d queued for transmission. tx_flags_extra: %x\n",
1894 dev->name, entries, tx_flags_extra);
1896 int j;
1897 for (j=0; j<64; j++) {
1898 if ((j%16) == 0)
1899 dprintk("\n%03x:", j);
1900 dprintk(" %02x", ((unsigned char*)skb->data)[j]);
1902 dprintk("\n");
1905 dev->trans_start = jiffies;
1906 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
1907 return NETDEV_TX_OK;
1910 static int nv_start_xmit_optimized(struct sk_buff *skb, struct net_device *dev)
1912 struct fe_priv *np = netdev_priv(dev);
1913 u32 tx_flags = 0;
1914 u32 tx_flags_extra;
1915 unsigned int fragments = skb_shinfo(skb)->nr_frags;
1916 unsigned int i;
1917 u32 offset = 0;
1918 u32 bcnt;
1919 u32 size = skb->len-skb->data_len;
1920 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
1921 u32 empty_slots;
1922 struct ring_desc_ex* put_tx;
1923 struct ring_desc_ex* start_tx;
1924 struct ring_desc_ex* prev_tx;
1925 struct nv_skb_map* prev_tx_ctx;
1927 /* add fragments to entries count */
1928 for (i = 0; i < fragments; i++) {
1929 entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) +
1930 ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
1933 empty_slots = nv_get_empty_tx_slots(np);
1934 if (unlikely(empty_slots <= entries)) {
1935 spin_lock_irq(&np->lock);
1936 netif_stop_queue(dev);
1937 np->tx_stop = 1;
1938 spin_unlock_irq(&np->lock);
1939 return NETDEV_TX_BUSY;
1942 start_tx = put_tx = np->put_tx.ex;
1944 /* setup the header buffer */
1945 do {
1946 prev_tx = put_tx;
1947 prev_tx_ctx = np->put_tx_ctx;
1948 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
1949 np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
1950 PCI_DMA_TODEVICE);
1951 np->put_tx_ctx->dma_len = bcnt;
1952 put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma));
1953 put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma));
1954 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1956 tx_flags = NV_TX2_VALID;
1957 offset += bcnt;
1958 size -= bcnt;
1959 if (unlikely(put_tx++ == np->last_tx.ex))
1960 put_tx = np->first_tx.ex;
1961 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
1962 np->put_tx_ctx = np->first_tx_ctx;
1963 } while (size);
1965 /* setup the fragments */
1966 for (i = 0; i < fragments; i++) {
1967 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1968 u32 size = frag->size;
1969 offset = 0;
1971 do {
1972 prev_tx = put_tx;
1973 prev_tx_ctx = np->put_tx_ctx;
1974 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
1975 np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,
1976 PCI_DMA_TODEVICE);
1977 np->put_tx_ctx->dma_len = bcnt;
1978 put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma));
1979 put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma));
1980 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1982 offset += bcnt;
1983 size -= bcnt;
1984 if (unlikely(put_tx++ == np->last_tx.ex))
1985 put_tx = np->first_tx.ex;
1986 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
1987 np->put_tx_ctx = np->first_tx_ctx;
1988 } while (size);
1991 /* set last fragment flag */
1992 prev_tx->flaglen |= cpu_to_le32(NV_TX2_LASTPACKET);
1994 /* save skb in this slot's context area */
1995 prev_tx_ctx->skb = skb;
1997 if (skb_is_gso(skb))
1998 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
1999 else
2000 tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
2001 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
2003 /* vlan tag */
2004 if (likely(!np->vlangrp)) {
2005 start_tx->txvlan = 0;
2006 } else {
2007 if (vlan_tx_tag_present(skb))
2008 start_tx->txvlan = cpu_to_le32(NV_TX3_VLAN_TAG_PRESENT | vlan_tx_tag_get(skb));
2009 else
2010 start_tx->txvlan = 0;
2013 spin_lock_irq(&np->lock);
2015 /* set tx flags */
2016 start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
2017 np->put_tx.ex = put_tx;
2019 spin_unlock_irq(&np->lock);
2021 dprintk(KERN_DEBUG "%s: nv_start_xmit_optimized: entries %d queued for transmission. tx_flags_extra: %x\n",
2022 dev->name, entries, tx_flags_extra);
2024 int j;
2025 for (j=0; j<64; j++) {
2026 if ((j%16) == 0)
2027 dprintk("\n%03x:", j);
2028 dprintk(" %02x", ((unsigned char*)skb->data)[j]);
2030 dprintk("\n");
2033 dev->trans_start = jiffies;
2034 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
2035 return NETDEV_TX_OK;
2039 * nv_tx_done: check for completed packets, release the skbs.
2041 * Caller must own np->lock.
2043 static void nv_tx_done(struct net_device *dev)
2045 struct fe_priv *np = netdev_priv(dev);
2046 u32 flags;
2047 struct ring_desc* orig_get_tx = np->get_tx.orig;
2049 while ((np->get_tx.orig != np->put_tx.orig) &&
2050 !((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID)) {
2052 dprintk(KERN_DEBUG "%s: nv_tx_done: flags 0x%x.\n",
2053 dev->name, flags);
2055 pci_unmap_page(np->pci_dev, np->get_tx_ctx->dma,
2056 np->get_tx_ctx->dma_len,
2057 PCI_DMA_TODEVICE);
2058 np->get_tx_ctx->dma = 0;
2060 if (np->desc_ver == DESC_VER_1) {
2061 if (flags & NV_TX_LASTPACKET) {
2062 if (flags & NV_TX_ERROR) {
2063 if (flags & NV_TX_UNDERFLOW)
2064 dev->stats.tx_fifo_errors++;
2065 if (flags & NV_TX_CARRIERLOST)
2066 dev->stats.tx_carrier_errors++;
2067 dev->stats.tx_errors++;
2068 } else {
2069 dev->stats.tx_packets++;
2070 dev->stats.tx_bytes += np->get_tx_ctx->skb->len;
2072 dev_kfree_skb_any(np->get_tx_ctx->skb);
2073 np->get_tx_ctx->skb = NULL;
2075 } else {
2076 if (flags & NV_TX2_LASTPACKET) {
2077 if (flags & NV_TX2_ERROR) {
2078 if (flags & NV_TX2_UNDERFLOW)
2079 dev->stats.tx_fifo_errors++;
2080 if (flags & NV_TX2_CARRIERLOST)
2081 dev->stats.tx_carrier_errors++;
2082 dev->stats.tx_errors++;
2083 } else {
2084 dev->stats.tx_packets++;
2085 dev->stats.tx_bytes += np->get_tx_ctx->skb->len;
2087 dev_kfree_skb_any(np->get_tx_ctx->skb);
2088 np->get_tx_ctx->skb = NULL;
2091 if (unlikely(np->get_tx.orig++ == np->last_tx.orig))
2092 np->get_tx.orig = np->first_tx.orig;
2093 if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
2094 np->get_tx_ctx = np->first_tx_ctx;
2096 if (unlikely((np->tx_stop == 1) && (np->get_tx.orig != orig_get_tx))) {
2097 np->tx_stop = 0;
2098 netif_wake_queue(dev);
2102 static void nv_tx_done_optimized(struct net_device *dev, int limit)
2104 struct fe_priv *np = netdev_priv(dev);
2105 u32 flags;
2106 struct ring_desc_ex* orig_get_tx = np->get_tx.ex;
2108 while ((np->get_tx.ex != np->put_tx.ex) &&
2109 !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX_VALID) &&
2110 (limit-- > 0)) {
2112 dprintk(KERN_DEBUG "%s: nv_tx_done_optimized: flags 0x%x.\n",
2113 dev->name, flags);
2115 pci_unmap_page(np->pci_dev, np->get_tx_ctx->dma,
2116 np->get_tx_ctx->dma_len,
2117 PCI_DMA_TODEVICE);
2118 np->get_tx_ctx->dma = 0;
2120 if (flags & NV_TX2_LASTPACKET) {
2121 if (!(flags & NV_TX2_ERROR))
2122 dev->stats.tx_packets++;
2123 dev_kfree_skb_any(np->get_tx_ctx->skb);
2124 np->get_tx_ctx->skb = NULL;
2126 if (unlikely(np->get_tx.ex++ == np->last_tx.ex))
2127 np->get_tx.ex = np->first_tx.ex;
2128 if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
2129 np->get_tx_ctx = np->first_tx_ctx;
2131 if (unlikely((np->tx_stop == 1) && (np->get_tx.ex != orig_get_tx))) {
2132 np->tx_stop = 0;
2133 netif_wake_queue(dev);
2138 * nv_tx_timeout: dev->tx_timeout function
2139 * Called with netif_tx_lock held.
2141 static void nv_tx_timeout(struct net_device *dev)
2143 struct fe_priv *np = netdev_priv(dev);
2144 u8 __iomem *base = get_hwbase(dev);
2145 u32 status;
2147 if (np->msi_flags & NV_MSI_X_ENABLED)
2148 status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
2149 else
2150 status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
2152 printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name, status);
2155 int i;
2157 printk(KERN_INFO "%s: Ring at %lx\n",
2158 dev->name, (unsigned long)np->ring_addr);
2159 printk(KERN_INFO "%s: Dumping tx registers\n", dev->name);
2160 for (i=0;i<=np->register_size;i+= 32) {
2161 printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
2163 readl(base + i + 0), readl(base + i + 4),
2164 readl(base + i + 8), readl(base + i + 12),
2165 readl(base + i + 16), readl(base + i + 20),
2166 readl(base + i + 24), readl(base + i + 28));
2168 printk(KERN_INFO "%s: Dumping tx ring\n", dev->name);
2169 for (i=0;i<np->tx_ring_size;i+= 4) {
2170 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
2171 printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
2173 le32_to_cpu(np->tx_ring.orig[i].buf),
2174 le32_to_cpu(np->tx_ring.orig[i].flaglen),
2175 le32_to_cpu(np->tx_ring.orig[i+1].buf),
2176 le32_to_cpu(np->tx_ring.orig[i+1].flaglen),
2177 le32_to_cpu(np->tx_ring.orig[i+2].buf),
2178 le32_to_cpu(np->tx_ring.orig[i+2].flaglen),
2179 le32_to_cpu(np->tx_ring.orig[i+3].buf),
2180 le32_to_cpu(np->tx_ring.orig[i+3].flaglen));
2181 } else {
2182 printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n",
2184 le32_to_cpu(np->tx_ring.ex[i].bufhigh),
2185 le32_to_cpu(np->tx_ring.ex[i].buflow),
2186 le32_to_cpu(np->tx_ring.ex[i].flaglen),
2187 le32_to_cpu(np->tx_ring.ex[i+1].bufhigh),
2188 le32_to_cpu(np->tx_ring.ex[i+1].buflow),
2189 le32_to_cpu(np->tx_ring.ex[i+1].flaglen),
2190 le32_to_cpu(np->tx_ring.ex[i+2].bufhigh),
2191 le32_to_cpu(np->tx_ring.ex[i+2].buflow),
2192 le32_to_cpu(np->tx_ring.ex[i+2].flaglen),
2193 le32_to_cpu(np->tx_ring.ex[i+3].bufhigh),
2194 le32_to_cpu(np->tx_ring.ex[i+3].buflow),
2195 le32_to_cpu(np->tx_ring.ex[i+3].flaglen));
2200 spin_lock_irq(&np->lock);
2202 /* 1) stop tx engine */
2203 nv_stop_tx(dev);
2205 /* 2) check that the packets were not sent already: */
2206 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
2207 nv_tx_done(dev);
2208 else
2209 nv_tx_done_optimized(dev, np->tx_ring_size);
2211 /* 3) if there are dead entries: clear everything */
2212 if (np->get_tx_ctx != np->put_tx_ctx) {
2213 printk(KERN_DEBUG "%s: tx_timeout: dead entries!\n", dev->name);
2214 nv_drain_tx(dev);
2215 nv_init_tx(dev);
2216 setup_hw_rings(dev, NV_SETUP_TX_RING);
2219 netif_wake_queue(dev);
2221 /* 4) restart tx engine */
2222 nv_start_tx(dev);
2223 spin_unlock_irq(&np->lock);
2227 * Called when the nic notices a mismatch between the actual data len on the
2228 * wire and the len indicated in the 802 header
2230 static int nv_getlen(struct net_device *dev, void *packet, int datalen)
2232 int hdrlen; /* length of the 802 header */
2233 int protolen; /* length as stored in the proto field */
2235 /* 1) calculate len according to header */
2236 if ( ((struct vlan_ethhdr *)packet)->h_vlan_proto == htons(ETH_P_8021Q)) {
2237 protolen = ntohs( ((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto );
2238 hdrlen = VLAN_HLEN;
2239 } else {
2240 protolen = ntohs( ((struct ethhdr *)packet)->h_proto);
2241 hdrlen = ETH_HLEN;
2243 dprintk(KERN_DEBUG "%s: nv_getlen: datalen %d, protolen %d, hdrlen %d\n",
2244 dev->name, datalen, protolen, hdrlen);
2245 if (protolen > ETH_DATA_LEN)
2246 return datalen; /* Value in proto field not a len, no checks possible */
2248 protolen += hdrlen;
2249 /* consistency checks: */
2250 if (datalen > ETH_ZLEN) {
2251 if (datalen >= protolen) {
2252 /* more data on wire than in 802 header, trim of
2253 * additional data.
2255 dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n",
2256 dev->name, protolen);
2257 return protolen;
2258 } else {
2259 /* less data on wire than mentioned in header.
2260 * Discard the packet.
2262 dprintk(KERN_DEBUG "%s: nv_getlen: discarding long packet.\n",
2263 dev->name);
2264 return -1;
2266 } else {
2267 /* short packet. Accept only if 802 values are also short */
2268 if (protolen > ETH_ZLEN) {
2269 dprintk(KERN_DEBUG "%s: nv_getlen: discarding short packet.\n",
2270 dev->name);
2271 return -1;
2273 dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n",
2274 dev->name, datalen);
2275 return datalen;
2279 static int nv_rx_process(struct net_device *dev, int limit)
2281 struct fe_priv *np = netdev_priv(dev);
2282 u32 flags;
2283 int rx_work = 0;
2284 struct sk_buff *skb;
2285 int len;
2287 while((np->get_rx.orig != np->put_rx.orig) &&
2288 !((flags = le32_to_cpu(np->get_rx.orig->flaglen)) & NV_RX_AVAIL) &&
2289 (rx_work < limit)) {
2291 dprintk(KERN_DEBUG "%s: nv_rx_process: flags 0x%x.\n",
2292 dev->name, flags);
2295 * the packet is for us - immediately tear down the pci mapping.
2296 * TODO: check if a prefetch of the first cacheline improves
2297 * the performance.
2299 pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma,
2300 np->get_rx_ctx->dma_len,
2301 PCI_DMA_FROMDEVICE);
2302 skb = np->get_rx_ctx->skb;
2303 np->get_rx_ctx->skb = NULL;
2306 int j;
2307 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags);
2308 for (j=0; j<64; j++) {
2309 if ((j%16) == 0)
2310 dprintk("\n%03x:", j);
2311 dprintk(" %02x", ((unsigned char*)skb->data)[j]);
2313 dprintk("\n");
2315 /* look at what we actually got: */
2316 if (np->desc_ver == DESC_VER_1) {
2317 if (likely(flags & NV_RX_DESCRIPTORVALID)) {
2318 len = flags & LEN_MASK_V1;
2319 if (unlikely(flags & NV_RX_ERROR)) {
2320 if (flags & NV_RX_ERROR4) {
2321 len = nv_getlen(dev, skb->data, len);
2322 if (len < 0) {
2323 dev->stats.rx_errors++;
2324 dev_kfree_skb(skb);
2325 goto next_pkt;
2328 /* framing errors are soft errors */
2329 else if (flags & NV_RX_FRAMINGERR) {
2330 if (flags & NV_RX_SUBSTRACT1) {
2331 len--;
2334 /* the rest are hard errors */
2335 else {
2336 if (flags & NV_RX_MISSEDFRAME)
2337 dev->stats.rx_missed_errors++;
2338 if (flags & NV_RX_CRCERR)
2339 dev->stats.rx_crc_errors++;
2340 if (flags & NV_RX_OVERFLOW)
2341 dev->stats.rx_over_errors++;
2342 dev->stats.rx_errors++;
2343 dev_kfree_skb(skb);
2344 goto next_pkt;
2347 } else {
2348 dev_kfree_skb(skb);
2349 goto next_pkt;
2351 } else {
2352 if (likely(flags & NV_RX2_DESCRIPTORVALID)) {
2353 len = flags & LEN_MASK_V2;
2354 if (unlikely(flags & NV_RX2_ERROR)) {
2355 if (flags & NV_RX2_ERROR4) {
2356 len = nv_getlen(dev, skb->data, len);
2357 if (len < 0) {
2358 dev->stats.rx_errors++;
2359 dev_kfree_skb(skb);
2360 goto next_pkt;
2363 /* framing errors are soft errors */
2364 else if (flags & NV_RX2_FRAMINGERR) {
2365 if (flags & NV_RX2_SUBSTRACT1) {
2366 len--;
2369 /* the rest are hard errors */
2370 else {
2371 if (flags & NV_RX2_CRCERR)
2372 dev->stats.rx_crc_errors++;
2373 if (flags & NV_RX2_OVERFLOW)
2374 dev->stats.rx_over_errors++;
2375 dev->stats.rx_errors++;
2376 dev_kfree_skb(skb);
2377 goto next_pkt;
2380 if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */
2381 ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP)) /*ip and udp */
2382 skb->ip_summed = CHECKSUM_UNNECESSARY;
2383 } else {
2384 dev_kfree_skb(skb);
2385 goto next_pkt;
2388 /* got a valid packet - forward it to the network core */
2389 skb_put(skb, len);
2390 skb->protocol = eth_type_trans(skb, dev);
2391 dprintk(KERN_DEBUG "%s: nv_rx_process: %d bytes, proto %d accepted.\n",
2392 dev->name, len, skb->protocol);
2393 #ifdef CONFIG_FORCEDETH_NAPI
2394 netif_receive_skb(skb);
2395 #else
2396 netif_rx(skb);
2397 #endif
2398 dev->last_rx = jiffies;
2399 dev->stats.rx_packets++;
2400 dev->stats.rx_bytes += len;
2401 next_pkt:
2402 if (unlikely(np->get_rx.orig++ == np->last_rx.orig))
2403 np->get_rx.orig = np->first_rx.orig;
2404 if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
2405 np->get_rx_ctx = np->first_rx_ctx;
2407 rx_work++;
2410 return rx_work;
2413 static int nv_rx_process_optimized(struct net_device *dev, int limit)
2415 struct fe_priv *np = netdev_priv(dev);
2416 u32 flags;
2417 u32 vlanflags = 0;
2418 int rx_work = 0;
2419 struct sk_buff *skb;
2420 int len;
2422 while((np->get_rx.ex != np->put_rx.ex) &&
2423 !((flags = le32_to_cpu(np->get_rx.ex->flaglen)) & NV_RX2_AVAIL) &&
2424 (rx_work < limit)) {
2426 dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: flags 0x%x.\n",
2427 dev->name, flags);
2430 * the packet is for us - immediately tear down the pci mapping.
2431 * TODO: check if a prefetch of the first cacheline improves
2432 * the performance.
2434 pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma,
2435 np->get_rx_ctx->dma_len,
2436 PCI_DMA_FROMDEVICE);
2437 skb = np->get_rx_ctx->skb;
2438 np->get_rx_ctx->skb = NULL;
2441 int j;
2442 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags);
2443 for (j=0; j<64; j++) {
2444 if ((j%16) == 0)
2445 dprintk("\n%03x:", j);
2446 dprintk(" %02x", ((unsigned char*)skb->data)[j]);
2448 dprintk("\n");
2450 /* look at what we actually got: */
2451 if (likely(flags & NV_RX2_DESCRIPTORVALID)) {
2452 len = flags & LEN_MASK_V2;
2453 if (unlikely(flags & NV_RX2_ERROR)) {
2454 if (flags & NV_RX2_ERROR4) {
2455 len = nv_getlen(dev, skb->data, len);
2456 if (len < 0) {
2457 dev_kfree_skb(skb);
2458 goto next_pkt;
2461 /* framing errors are soft errors */
2462 else if (flags & NV_RX2_FRAMINGERR) {
2463 if (flags & NV_RX2_SUBSTRACT1) {
2464 len--;
2467 /* the rest are hard errors */
2468 else {
2469 dev_kfree_skb(skb);
2470 goto next_pkt;
2474 if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */
2475 ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP)) /*ip and udp */
2476 skb->ip_summed = CHECKSUM_UNNECESSARY;
2478 /* got a valid packet - forward it to the network core */
2479 skb_put(skb, len);
2480 skb->protocol = eth_type_trans(skb, dev);
2481 prefetch(skb->data);
2483 dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: %d bytes, proto %d accepted.\n",
2484 dev->name, len, skb->protocol);
2486 if (likely(!np->vlangrp)) {
2487 #ifdef CONFIG_FORCEDETH_NAPI
2488 netif_receive_skb(skb);
2489 #else
2490 netif_rx(skb);
2491 #endif
2492 } else {
2493 vlanflags = le32_to_cpu(np->get_rx.ex->buflow);
2494 if (vlanflags & NV_RX3_VLAN_TAG_PRESENT) {
2495 #ifdef CONFIG_FORCEDETH_NAPI
2496 vlan_hwaccel_receive_skb(skb, np->vlangrp,
2497 vlanflags & NV_RX3_VLAN_TAG_MASK);
2498 #else
2499 vlan_hwaccel_rx(skb, np->vlangrp,
2500 vlanflags & NV_RX3_VLAN_TAG_MASK);
2501 #endif
2502 } else {
2503 #ifdef CONFIG_FORCEDETH_NAPI
2504 netif_receive_skb(skb);
2505 #else
2506 netif_rx(skb);
2507 #endif
2511 dev->last_rx = jiffies;
2512 dev->stats.rx_packets++;
2513 dev->stats.rx_bytes += len;
2514 } else {
2515 dev_kfree_skb(skb);
2517 next_pkt:
2518 if (unlikely(np->get_rx.ex++ == np->last_rx.ex))
2519 np->get_rx.ex = np->first_rx.ex;
2520 if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
2521 np->get_rx_ctx = np->first_rx_ctx;
2523 rx_work++;
2526 return rx_work;
2529 static void set_bufsize(struct net_device *dev)
2531 struct fe_priv *np = netdev_priv(dev);
2533 if (dev->mtu <= ETH_DATA_LEN)
2534 np->rx_buf_sz = ETH_DATA_LEN + NV_RX_HEADERS;
2535 else
2536 np->rx_buf_sz = dev->mtu + NV_RX_HEADERS;
2540 * nv_change_mtu: dev->change_mtu function
2541 * Called with dev_base_lock held for read.
2543 static int nv_change_mtu(struct net_device *dev, int new_mtu)
2545 struct fe_priv *np = netdev_priv(dev);
2546 int old_mtu;
2548 if (new_mtu < 64 || new_mtu > np->pkt_limit)
2549 return -EINVAL;
2551 old_mtu = dev->mtu;
2552 dev->mtu = new_mtu;
2554 /* return early if the buffer sizes will not change */
2555 if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
2556 return 0;
2557 if (old_mtu == new_mtu)
2558 return 0;
2560 /* synchronized against open : rtnl_lock() held by caller */
2561 if (netif_running(dev)) {
2562 u8 __iomem *base = get_hwbase(dev);
2564 * It seems that the nic preloads valid ring entries into an
2565 * internal buffer. The procedure for flushing everything is
2566 * guessed, there is probably a simpler approach.
2567 * Changing the MTU is a rare event, it shouldn't matter.
2569 nv_disable_irq(dev);
2570 netif_tx_lock_bh(dev);
2571 spin_lock(&np->lock);
2572 /* stop engines */
2573 nv_stop_rx(dev);
2574 nv_stop_tx(dev);
2575 nv_txrx_reset(dev);
2576 /* drain rx queue */
2577 nv_drain_rx(dev);
2578 nv_drain_tx(dev);
2579 /* reinit driver view of the rx queue */
2580 set_bufsize(dev);
2581 if (nv_init_ring(dev)) {
2582 if (!np->in_shutdown)
2583 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
2585 /* reinit nic view of the rx queue */
2586 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
2587 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
2588 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
2589 base + NvRegRingSizes);
2590 pci_push(base);
2591 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
2592 pci_push(base);
2594 /* restart rx engine */
2595 nv_start_rx(dev);
2596 nv_start_tx(dev);
2597 spin_unlock(&np->lock);
2598 netif_tx_unlock_bh(dev);
2599 nv_enable_irq(dev);
2601 return 0;
2604 static void nv_copy_mac_to_hw(struct net_device *dev)
2606 u8 __iomem *base = get_hwbase(dev);
2607 u32 mac[2];
2609 mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) +
2610 (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24);
2611 mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8);
2613 writel(mac[0], base + NvRegMacAddrA);
2614 writel(mac[1], base + NvRegMacAddrB);
2618 * nv_set_mac_address: dev->set_mac_address function
2619 * Called with rtnl_lock() held.
2621 static int nv_set_mac_address(struct net_device *dev, void *addr)
2623 struct fe_priv *np = netdev_priv(dev);
2624 struct sockaddr *macaddr = (struct sockaddr*)addr;
2626 if (!is_valid_ether_addr(macaddr->sa_data))
2627 return -EADDRNOTAVAIL;
2629 /* synchronized against open : rtnl_lock() held by caller */
2630 memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN);
2632 if (netif_running(dev)) {
2633 netif_tx_lock_bh(dev);
2634 spin_lock_irq(&np->lock);
2636 /* stop rx engine */
2637 nv_stop_rx(dev);
2639 /* set mac address */
2640 nv_copy_mac_to_hw(dev);
2642 /* restart rx engine */
2643 nv_start_rx(dev);
2644 spin_unlock_irq(&np->lock);
2645 netif_tx_unlock_bh(dev);
2646 } else {
2647 nv_copy_mac_to_hw(dev);
2649 return 0;
2653 * nv_set_multicast: dev->set_multicast function
2654 * Called with netif_tx_lock held.
2656 static void nv_set_multicast(struct net_device *dev)
2658 struct fe_priv *np = netdev_priv(dev);
2659 u8 __iomem *base = get_hwbase(dev);
2660 u32 addr[2];
2661 u32 mask[2];
2662 u32 pff = readl(base + NvRegPacketFilterFlags) & NVREG_PFF_PAUSE_RX;
2664 memset(addr, 0, sizeof(addr));
2665 memset(mask, 0, sizeof(mask));
2667 if (dev->flags & IFF_PROMISC) {
2668 pff |= NVREG_PFF_PROMISC;
2669 } else {
2670 pff |= NVREG_PFF_MYADDR;
2672 if (dev->flags & IFF_ALLMULTI || dev->mc_list) {
2673 u32 alwaysOff[2];
2674 u32 alwaysOn[2];
2676 alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0xffffffff;
2677 if (dev->flags & IFF_ALLMULTI) {
2678 alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0;
2679 } else {
2680 struct dev_mc_list *walk;
2682 walk = dev->mc_list;
2683 while (walk != NULL) {
2684 u32 a, b;
2685 a = le32_to_cpu(*(__le32 *) walk->dmi_addr);
2686 b = le16_to_cpu(*(__le16 *) (&walk->dmi_addr[4]));
2687 alwaysOn[0] &= a;
2688 alwaysOff[0] &= ~a;
2689 alwaysOn[1] &= b;
2690 alwaysOff[1] &= ~b;
2691 walk = walk->next;
2694 addr[0] = alwaysOn[0];
2695 addr[1] = alwaysOn[1];
2696 mask[0] = alwaysOn[0] | alwaysOff[0];
2697 mask[1] = alwaysOn[1] | alwaysOff[1];
2698 } else {
2699 mask[0] = NVREG_MCASTMASKA_NONE;
2700 mask[1] = NVREG_MCASTMASKB_NONE;
2703 addr[0] |= NVREG_MCASTADDRA_FORCE;
2704 pff |= NVREG_PFF_ALWAYS;
2705 spin_lock_irq(&np->lock);
2706 nv_stop_rx(dev);
2707 writel(addr[0], base + NvRegMulticastAddrA);
2708 writel(addr[1], base + NvRegMulticastAddrB);
2709 writel(mask[0], base + NvRegMulticastMaskA);
2710 writel(mask[1], base + NvRegMulticastMaskB);
2711 writel(pff, base + NvRegPacketFilterFlags);
2712 dprintk(KERN_INFO "%s: reconfiguration for multicast lists.\n",
2713 dev->name);
2714 nv_start_rx(dev);
2715 spin_unlock_irq(&np->lock);
2718 static void nv_update_pause(struct net_device *dev, u32 pause_flags)
2720 struct fe_priv *np = netdev_priv(dev);
2721 u8 __iomem *base = get_hwbase(dev);
2723 np->pause_flags &= ~(NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE);
2725 if (np->pause_flags & NV_PAUSEFRAME_RX_CAPABLE) {
2726 u32 pff = readl(base + NvRegPacketFilterFlags) & ~NVREG_PFF_PAUSE_RX;
2727 if (pause_flags & NV_PAUSEFRAME_RX_ENABLE) {
2728 writel(pff|NVREG_PFF_PAUSE_RX, base + NvRegPacketFilterFlags);
2729 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
2730 } else {
2731 writel(pff, base + NvRegPacketFilterFlags);
2734 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) {
2735 u32 regmisc = readl(base + NvRegMisc1) & ~NVREG_MISC1_PAUSE_TX;
2736 if (pause_flags & NV_PAUSEFRAME_TX_ENABLE) {
2737 writel(NVREG_TX_PAUSEFRAME_ENABLE, base + NvRegTxPauseFrame);
2738 writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1);
2739 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
2740 } else {
2741 writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame);
2742 writel(regmisc, base + NvRegMisc1);
2748 * nv_update_linkspeed: Setup the MAC according to the link partner
2749 * @dev: Network device to be configured
2751 * The function queries the PHY and checks if there is a link partner.
2752 * If yes, then it sets up the MAC accordingly. Otherwise, the MAC is
2753 * set to 10 MBit HD.
2755 * The function returns 0 if there is no link partner and 1 if there is
2756 * a good link partner.
2758 static int nv_update_linkspeed(struct net_device *dev)
2760 struct fe_priv *np = netdev_priv(dev);
2761 u8 __iomem *base = get_hwbase(dev);
2762 int adv = 0;
2763 int lpa = 0;
2764 int adv_lpa, adv_pause, lpa_pause;
2765 int newls = np->linkspeed;
2766 int newdup = np->duplex;
2767 int mii_status;
2768 int retval = 0;
2769 u32 control_1000, status_1000, phyreg, pause_flags, txreg;
2771 /* BMSR_LSTATUS is latched, read it twice:
2772 * we want the current value.
2774 mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
2775 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
2777 if (!(mii_status & BMSR_LSTATUS)) {
2778 dprintk(KERN_DEBUG "%s: no link detected by phy - falling back to 10HD.\n",
2779 dev->name);
2780 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
2781 newdup = 0;
2782 retval = 0;
2783 goto set_speed;
2786 if (np->autoneg == 0) {
2787 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: autoneg off, PHY set to 0x%04x.\n",
2788 dev->name, np->fixed_mode);
2789 if (np->fixed_mode & LPA_100FULL) {
2790 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
2791 newdup = 1;
2792 } else if (np->fixed_mode & LPA_100HALF) {
2793 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
2794 newdup = 0;
2795 } else if (np->fixed_mode & LPA_10FULL) {
2796 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
2797 newdup = 1;
2798 } else {
2799 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
2800 newdup = 0;
2802 retval = 1;
2803 goto set_speed;
2805 /* check auto negotiation is complete */
2806 if (!(mii_status & BMSR_ANEGCOMPLETE)) {
2807 /* still in autonegotiation - configure nic for 10 MBit HD and wait. */
2808 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
2809 newdup = 0;
2810 retval = 0;
2811 dprintk(KERN_DEBUG "%s: autoneg not completed - falling back to 10HD.\n", dev->name);
2812 goto set_speed;
2815 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
2816 lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ);
2817 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: PHY advertises 0x%04x, lpa 0x%04x.\n",
2818 dev->name, adv, lpa);
2820 retval = 1;
2821 if (np->gigabit == PHY_GIGABIT) {
2822 control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
2823 status_1000 = mii_rw(dev, np->phyaddr, MII_STAT1000, MII_READ);
2825 if ((control_1000 & ADVERTISE_1000FULL) &&
2826 (status_1000 & LPA_1000FULL)) {
2827 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: GBit ethernet detected.\n",
2828 dev->name);
2829 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_1000;
2830 newdup = 1;
2831 goto set_speed;
2835 /* FIXME: handle parallel detection properly */
2836 adv_lpa = lpa & adv;
2837 if (adv_lpa & LPA_100FULL) {
2838 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
2839 newdup = 1;
2840 } else if (adv_lpa & LPA_100HALF) {
2841 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
2842 newdup = 0;
2843 } else if (adv_lpa & LPA_10FULL) {
2844 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
2845 newdup = 1;
2846 } else if (adv_lpa & LPA_10HALF) {
2847 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
2848 newdup = 0;
2849 } else {
2850 dprintk(KERN_DEBUG "%s: bad ability %04x - falling back to 10HD.\n", dev->name, adv_lpa);
2851 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
2852 newdup = 0;
2855 set_speed:
2856 if (np->duplex == newdup && np->linkspeed == newls)
2857 return retval;
2859 dprintk(KERN_INFO "%s: changing link setting from %d/%d to %d/%d.\n",
2860 dev->name, np->linkspeed, np->duplex, newls, newdup);
2862 np->duplex = newdup;
2863 np->linkspeed = newls;
2865 if (np->gigabit == PHY_GIGABIT) {
2866 phyreg = readl(base + NvRegRandomSeed);
2867 phyreg &= ~(0x3FF00);
2868 if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10)
2869 phyreg |= NVREG_RNDSEED_FORCE3;
2870 else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100)
2871 phyreg |= NVREG_RNDSEED_FORCE2;
2872 else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000)
2873 phyreg |= NVREG_RNDSEED_FORCE;
2874 writel(phyreg, base + NvRegRandomSeed);
2877 phyreg = readl(base + NvRegPhyInterface);
2878 phyreg &= ~(PHY_HALF|PHY_100|PHY_1000);
2879 if (np->duplex == 0)
2880 phyreg |= PHY_HALF;
2881 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100)
2882 phyreg |= PHY_100;
2883 else if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
2884 phyreg |= PHY_1000;
2885 writel(phyreg, base + NvRegPhyInterface);
2887 if (phyreg & PHY_RGMII) {
2888 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
2889 txreg = NVREG_TX_DEFERRAL_RGMII_1000;
2890 else
2891 txreg = NVREG_TX_DEFERRAL_RGMII_10_100;
2892 } else {
2893 txreg = NVREG_TX_DEFERRAL_DEFAULT;
2895 writel(txreg, base + NvRegTxDeferral);
2897 if (np->desc_ver == DESC_VER_1) {
2898 txreg = NVREG_TX_WM_DESC1_DEFAULT;
2899 } else {
2900 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
2901 txreg = NVREG_TX_WM_DESC2_3_1000;
2902 else
2903 txreg = NVREG_TX_WM_DESC2_3_DEFAULT;
2905 writel(txreg, base + NvRegTxWatermark);
2907 writel(NVREG_MISC1_FORCE | ( np->duplex ? 0 : NVREG_MISC1_HD),
2908 base + NvRegMisc1);
2909 pci_push(base);
2910 writel(np->linkspeed, base + NvRegLinkSpeed);
2911 pci_push(base);
2913 pause_flags = 0;
2914 /* setup pause frame */
2915 if (np->duplex != 0) {
2916 if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) {
2917 adv_pause = adv & (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM);
2918 lpa_pause = lpa & (LPA_PAUSE_CAP| LPA_PAUSE_ASYM);
2920 switch (adv_pause) {
2921 case ADVERTISE_PAUSE_CAP:
2922 if (lpa_pause & LPA_PAUSE_CAP) {
2923 pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
2924 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
2925 pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
2927 break;
2928 case ADVERTISE_PAUSE_ASYM:
2929 if (lpa_pause == (LPA_PAUSE_CAP| LPA_PAUSE_ASYM))
2931 pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
2933 break;
2934 case ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM:
2935 if (lpa_pause & LPA_PAUSE_CAP)
2937 pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
2938 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
2939 pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
2941 if (lpa_pause == LPA_PAUSE_ASYM)
2943 pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
2945 break;
2947 } else {
2948 pause_flags = np->pause_flags;
2951 nv_update_pause(dev, pause_flags);
2953 return retval;
2956 static void nv_linkchange(struct net_device *dev)
2958 if (nv_update_linkspeed(dev)) {
2959 if (!netif_carrier_ok(dev)) {
2960 netif_carrier_on(dev);
2961 printk(KERN_INFO "%s: link up.\n", dev->name);
2962 nv_start_rx(dev);
2964 } else {
2965 if (netif_carrier_ok(dev)) {
2966 netif_carrier_off(dev);
2967 printk(KERN_INFO "%s: link down.\n", dev->name);
2968 nv_stop_rx(dev);
2973 static void nv_link_irq(struct net_device *dev)
2975 u8 __iomem *base = get_hwbase(dev);
2976 u32 miistat;
2978 miistat = readl(base + NvRegMIIStatus);
2979 writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
2980 dprintk(KERN_INFO "%s: link change irq, status 0x%x.\n", dev->name, miistat);
2982 if (miistat & (NVREG_MIISTAT_LINKCHANGE))
2983 nv_linkchange(dev);
2984 dprintk(KERN_DEBUG "%s: link change notification done.\n", dev->name);
2987 static irqreturn_t nv_nic_irq(int foo, void *data)
2989 struct net_device *dev = (struct net_device *) data;
2990 struct fe_priv *np = netdev_priv(dev);
2991 u8 __iomem *base = get_hwbase(dev);
2992 u32 events;
2993 int i;
2995 dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name);
2997 for (i=0; ; i++) {
2998 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
2999 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
3000 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
3001 } else {
3002 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
3003 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
3005 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
3006 if (!(events & np->irqmask))
3007 break;
3009 spin_lock(&np->lock);
3010 nv_tx_done(dev);
3011 spin_unlock(&np->lock);
3013 #ifdef CONFIG_FORCEDETH_NAPI
3014 if (events & NVREG_IRQ_RX_ALL) {
3015 netif_rx_schedule(dev, &np->napi);
3017 /* Disable furthur receive irq's */
3018 spin_lock(&np->lock);
3019 np->irqmask &= ~NVREG_IRQ_RX_ALL;
3021 if (np->msi_flags & NV_MSI_X_ENABLED)
3022 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
3023 else
3024 writel(np->irqmask, base + NvRegIrqMask);
3025 spin_unlock(&np->lock);
3027 #else
3028 if (nv_rx_process(dev, RX_WORK_PER_LOOP)) {
3029 if (unlikely(nv_alloc_rx(dev))) {
3030 spin_lock(&np->lock);
3031 if (!np->in_shutdown)
3032 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3033 spin_unlock(&np->lock);
3036 #endif
3037 if (unlikely(events & NVREG_IRQ_LINK)) {
3038 spin_lock(&np->lock);
3039 nv_link_irq(dev);
3040 spin_unlock(&np->lock);
3042 if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
3043 spin_lock(&np->lock);
3044 nv_linkchange(dev);
3045 spin_unlock(&np->lock);
3046 np->link_timeout = jiffies + LINK_TIMEOUT;
3048 if (unlikely(events & (NVREG_IRQ_TX_ERR))) {
3049 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
3050 dev->name, events);
3052 if (unlikely(events & (NVREG_IRQ_UNKNOWN))) {
3053 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n",
3054 dev->name, events);
3056 if (unlikely(events & NVREG_IRQ_RECOVER_ERROR)) {
3057 spin_lock(&np->lock);
3058 /* disable interrupts on the nic */
3059 if (!(np->msi_flags & NV_MSI_X_ENABLED))
3060 writel(0, base + NvRegIrqMask);
3061 else
3062 writel(np->irqmask, base + NvRegIrqMask);
3063 pci_push(base);
3065 if (!np->in_shutdown) {
3066 np->nic_poll_irq = np->irqmask;
3067 np->recover_error = 1;
3068 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3070 spin_unlock(&np->lock);
3071 break;
3073 if (unlikely(i > max_interrupt_work)) {
3074 spin_lock(&np->lock);
3075 /* disable interrupts on the nic */
3076 if (!(np->msi_flags & NV_MSI_X_ENABLED))
3077 writel(0, base + NvRegIrqMask);
3078 else
3079 writel(np->irqmask, base + NvRegIrqMask);
3080 pci_push(base);
3082 if (!np->in_shutdown) {
3083 np->nic_poll_irq = np->irqmask;
3084 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3086 spin_unlock(&np->lock);
3087 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i);
3088 break;
3092 dprintk(KERN_DEBUG "%s: nv_nic_irq completed\n", dev->name);
3094 return IRQ_RETVAL(i);
3098 * All _optimized functions are used to help increase performance
3099 * (reduce CPU and increase throughput). They use descripter version 3,
3100 * compiler directives, and reduce memory accesses.
3102 static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
3104 struct net_device *dev = (struct net_device *) data;
3105 struct fe_priv *np = netdev_priv(dev);
3106 u8 __iomem *base = get_hwbase(dev);
3107 u32 events;
3108 int i;
3110 dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized\n", dev->name);
3112 for (i=0; ; i++) {
3113 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3114 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
3115 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
3116 } else {
3117 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
3118 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
3120 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
3121 if (!(events & np->irqmask))
3122 break;
3124 spin_lock(&np->lock);
3125 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
3126 spin_unlock(&np->lock);
3128 #ifdef CONFIG_FORCEDETH_NAPI
3129 if (events & NVREG_IRQ_RX_ALL) {
3130 netif_rx_schedule(dev, &np->napi);
3132 /* Disable furthur receive irq's */
3133 spin_lock(&np->lock);
3134 np->irqmask &= ~NVREG_IRQ_RX_ALL;
3136 if (np->msi_flags & NV_MSI_X_ENABLED)
3137 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
3138 else
3139 writel(np->irqmask, base + NvRegIrqMask);
3140 spin_unlock(&np->lock);
3142 #else
3143 if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) {
3144 if (unlikely(nv_alloc_rx_optimized(dev))) {
3145 spin_lock(&np->lock);
3146 if (!np->in_shutdown)
3147 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3148 spin_unlock(&np->lock);
3151 #endif
3152 if (unlikely(events & NVREG_IRQ_LINK)) {
3153 spin_lock(&np->lock);
3154 nv_link_irq(dev);
3155 spin_unlock(&np->lock);
3157 if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
3158 spin_lock(&np->lock);
3159 nv_linkchange(dev);
3160 spin_unlock(&np->lock);
3161 np->link_timeout = jiffies + LINK_TIMEOUT;
3163 if (unlikely(events & (NVREG_IRQ_TX_ERR))) {
3164 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
3165 dev->name, events);
3167 if (unlikely(events & (NVREG_IRQ_UNKNOWN))) {
3168 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n",
3169 dev->name, events);
3171 if (unlikely(events & NVREG_IRQ_RECOVER_ERROR)) {
3172 spin_lock(&np->lock);
3173 /* disable interrupts on the nic */
3174 if (!(np->msi_flags & NV_MSI_X_ENABLED))
3175 writel(0, base + NvRegIrqMask);
3176 else
3177 writel(np->irqmask, base + NvRegIrqMask);
3178 pci_push(base);
3180 if (!np->in_shutdown) {
3181 np->nic_poll_irq = np->irqmask;
3182 np->recover_error = 1;
3183 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3185 spin_unlock(&np->lock);
3186 break;
3189 if (unlikely(i > max_interrupt_work)) {
3190 spin_lock(&np->lock);
3191 /* disable interrupts on the nic */
3192 if (!(np->msi_flags & NV_MSI_X_ENABLED))
3193 writel(0, base + NvRegIrqMask);
3194 else
3195 writel(np->irqmask, base + NvRegIrqMask);
3196 pci_push(base);
3198 if (!np->in_shutdown) {
3199 np->nic_poll_irq = np->irqmask;
3200 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3202 spin_unlock(&np->lock);
3203 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i);
3204 break;
3208 dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized completed\n", dev->name);
3210 return IRQ_RETVAL(i);
3213 static irqreturn_t nv_nic_irq_tx(int foo, void *data)
3215 struct net_device *dev = (struct net_device *) data;
3216 struct fe_priv *np = netdev_priv(dev);
3217 u8 __iomem *base = get_hwbase(dev);
3218 u32 events;
3219 int i;
3220 unsigned long flags;
3222 dprintk(KERN_DEBUG "%s: nv_nic_irq_tx\n", dev->name);
3224 for (i=0; ; i++) {
3225 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL;
3226 writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus);
3227 dprintk(KERN_DEBUG "%s: tx irq: %08x\n", dev->name, events);
3228 if (!(events & np->irqmask))
3229 break;
3231 spin_lock_irqsave(&np->lock, flags);
3232 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
3233 spin_unlock_irqrestore(&np->lock, flags);
3235 if (unlikely(events & (NVREG_IRQ_TX_ERR))) {
3236 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
3237 dev->name, events);
3239 if (unlikely(i > max_interrupt_work)) {
3240 spin_lock_irqsave(&np->lock, flags);
3241 /* disable interrupts on the nic */
3242 writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask);
3243 pci_push(base);
3245 if (!np->in_shutdown) {
3246 np->nic_poll_irq |= NVREG_IRQ_TX_ALL;
3247 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3249 spin_unlock_irqrestore(&np->lock, flags);
3250 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i);
3251 break;
3255 dprintk(KERN_DEBUG "%s: nv_nic_irq_tx completed\n", dev->name);
3257 return IRQ_RETVAL(i);
3260 #ifdef CONFIG_FORCEDETH_NAPI
3261 static int nv_napi_poll(struct napi_struct *napi, int budget)
3263 struct fe_priv *np = container_of(napi, struct fe_priv, napi);
3264 struct net_device *dev = np->dev;
3265 u8 __iomem *base = get_hwbase(dev);
3266 unsigned long flags;
3267 int pkts, retcode;
3269 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
3270 pkts = nv_rx_process(dev, budget);
3271 retcode = nv_alloc_rx(dev);
3272 } else {
3273 pkts = nv_rx_process_optimized(dev, budget);
3274 retcode = nv_alloc_rx_optimized(dev);
3277 if (retcode) {
3278 spin_lock_irqsave(&np->lock, flags);
3279 if (!np->in_shutdown)
3280 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3281 spin_unlock_irqrestore(&np->lock, flags);
3284 if (pkts < budget) {
3285 /* re-enable receive interrupts */
3286 spin_lock_irqsave(&np->lock, flags);
3288 __netif_rx_complete(dev, napi);
3290 np->irqmask |= NVREG_IRQ_RX_ALL;
3291 if (np->msi_flags & NV_MSI_X_ENABLED)
3292 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
3293 else
3294 writel(np->irqmask, base + NvRegIrqMask);
3296 spin_unlock_irqrestore(&np->lock, flags);
3298 return pkts;
3300 #endif
3302 #ifdef CONFIG_FORCEDETH_NAPI
3303 static irqreturn_t nv_nic_irq_rx(int foo, void *data)
3305 struct net_device *dev = (struct net_device *) data;
3306 struct fe_priv *np = netdev_priv(dev);
3307 u8 __iomem *base = get_hwbase(dev);
3308 u32 events;
3310 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
3311 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus);
3313 if (events) {
3314 netif_rx_schedule(dev, &np->napi);
3315 /* disable receive interrupts on the nic */
3316 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
3317 pci_push(base);
3319 return IRQ_HANDLED;
3321 #else
3322 static irqreturn_t nv_nic_irq_rx(int foo, void *data)
3324 struct net_device *dev = (struct net_device *) data;
3325 struct fe_priv *np = netdev_priv(dev);
3326 u8 __iomem *base = get_hwbase(dev);
3327 u32 events;
3328 int i;
3329 unsigned long flags;
3331 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx\n", dev->name);
3333 for (i=0; ; i++) {
3334 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
3335 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus);
3336 dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events);
3337 if (!(events & np->irqmask))
3338 break;
3340 if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) {
3341 if (unlikely(nv_alloc_rx_optimized(dev))) {
3342 spin_lock_irqsave(&np->lock, flags);
3343 if (!np->in_shutdown)
3344 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3345 spin_unlock_irqrestore(&np->lock, flags);
3349 if (unlikely(i > max_interrupt_work)) {
3350 spin_lock_irqsave(&np->lock, flags);
3351 /* disable interrupts on the nic */
3352 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
3353 pci_push(base);
3355 if (!np->in_shutdown) {
3356 np->nic_poll_irq |= NVREG_IRQ_RX_ALL;
3357 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3359 spin_unlock_irqrestore(&np->lock, flags);
3360 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i);
3361 break;
3364 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx completed\n", dev->name);
3366 return IRQ_RETVAL(i);
3368 #endif
3370 static irqreturn_t nv_nic_irq_other(int foo, void *data)
3372 struct net_device *dev = (struct net_device *) data;
3373 struct fe_priv *np = netdev_priv(dev);
3374 u8 __iomem *base = get_hwbase(dev);
3375 u32 events;
3376 int i;
3377 unsigned long flags;
3379 dprintk(KERN_DEBUG "%s: nv_nic_irq_other\n", dev->name);
3381 for (i=0; ; i++) {
3382 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER;
3383 writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus);
3384 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
3385 if (!(events & np->irqmask))
3386 break;
3388 /* check tx in case we reached max loop limit in tx isr */
3389 spin_lock_irqsave(&np->lock, flags);
3390 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
3391 spin_unlock_irqrestore(&np->lock, flags);
3393 if (events & NVREG_IRQ_LINK) {
3394 spin_lock_irqsave(&np->lock, flags);
3395 nv_link_irq(dev);
3396 spin_unlock_irqrestore(&np->lock, flags);
3398 if (np->need_linktimer && time_after(jiffies, np->link_timeout)) {
3399 spin_lock_irqsave(&np->lock, flags);
3400 nv_linkchange(dev);
3401 spin_unlock_irqrestore(&np->lock, flags);
3402 np->link_timeout = jiffies + LINK_TIMEOUT;
3404 if (events & NVREG_IRQ_RECOVER_ERROR) {
3405 spin_lock_irq(&np->lock);
3406 /* disable interrupts on the nic */
3407 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
3408 pci_push(base);
3410 if (!np->in_shutdown) {
3411 np->nic_poll_irq |= NVREG_IRQ_OTHER;
3412 np->recover_error = 1;
3413 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3415 spin_unlock_irq(&np->lock);
3416 break;
3418 if (events & (NVREG_IRQ_UNKNOWN)) {
3419 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n",
3420 dev->name, events);
3422 if (unlikely(i > max_interrupt_work)) {
3423 spin_lock_irqsave(&np->lock, flags);
3424 /* disable interrupts on the nic */
3425 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
3426 pci_push(base);
3428 if (!np->in_shutdown) {
3429 np->nic_poll_irq |= NVREG_IRQ_OTHER;
3430 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3432 spin_unlock_irqrestore(&np->lock, flags);
3433 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i);
3434 break;
3438 dprintk(KERN_DEBUG "%s: nv_nic_irq_other completed\n", dev->name);
3440 return IRQ_RETVAL(i);
3443 static irqreturn_t nv_nic_irq_test(int foo, void *data)
3445 struct net_device *dev = (struct net_device *) data;
3446 struct fe_priv *np = netdev_priv(dev);
3447 u8 __iomem *base = get_hwbase(dev);
3448 u32 events;
3450 dprintk(KERN_DEBUG "%s: nv_nic_irq_test\n", dev->name);
3452 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3453 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
3454 writel(NVREG_IRQ_TIMER, base + NvRegIrqStatus);
3455 } else {
3456 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
3457 writel(NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus);
3459 pci_push(base);
3460 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
3461 if (!(events & NVREG_IRQ_TIMER))
3462 return IRQ_RETVAL(0);
3464 spin_lock(&np->lock);
3465 np->intr_test = 1;
3466 spin_unlock(&np->lock);
3468 dprintk(KERN_DEBUG "%s: nv_nic_irq_test completed\n", dev->name);
3470 return IRQ_RETVAL(1);
3473 static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask)
3475 u8 __iomem *base = get_hwbase(dev);
3476 int i;
3477 u32 msixmap = 0;
3479 /* Each interrupt bit can be mapped to a MSIX vector (4 bits).
3480 * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents
3481 * the remaining 8 interrupts.
3483 for (i = 0; i < 8; i++) {
3484 if ((irqmask >> i) & 0x1) {
3485 msixmap |= vector << (i << 2);
3488 writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0);
3490 msixmap = 0;
3491 for (i = 0; i < 8; i++) {
3492 if ((irqmask >> (i + 8)) & 0x1) {
3493 msixmap |= vector << (i << 2);
3496 writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1);
3499 static int nv_request_irq(struct net_device *dev, int intr_test)
3501 struct fe_priv *np = get_nvpriv(dev);
3502 u8 __iomem *base = get_hwbase(dev);
3503 int ret = 1;
3504 int i;
3505 irqreturn_t (*handler)(int foo, void *data);
3507 if (intr_test) {
3508 handler = nv_nic_irq_test;
3509 } else {
3510 if (np->desc_ver == DESC_VER_3)
3511 handler = nv_nic_irq_optimized;
3512 else
3513 handler = nv_nic_irq;
3516 if (np->msi_flags & NV_MSI_X_CAPABLE) {
3517 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
3518 np->msi_x_entry[i].entry = i;
3520 if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) {
3521 np->msi_flags |= NV_MSI_X_ENABLED;
3522 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) {
3523 /* Request irq for rx handling */
3524 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, IRQF_SHARED, dev->name, dev) != 0) {
3525 printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret);
3526 pci_disable_msix(np->pci_dev);
3527 np->msi_flags &= ~NV_MSI_X_ENABLED;
3528 goto out_err;
3530 /* Request irq for tx handling */
3531 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, IRQF_SHARED, dev->name, dev) != 0) {
3532 printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret);
3533 pci_disable_msix(np->pci_dev);
3534 np->msi_flags &= ~NV_MSI_X_ENABLED;
3535 goto out_free_rx;
3537 /* Request irq for link and timer handling */
3538 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, IRQF_SHARED, dev->name, dev) != 0) {
3539 printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret);
3540 pci_disable_msix(np->pci_dev);
3541 np->msi_flags &= ~NV_MSI_X_ENABLED;
3542 goto out_free_tx;
3544 /* map interrupts to their respective vector */
3545 writel(0, base + NvRegMSIXMap0);
3546 writel(0, base + NvRegMSIXMap1);
3547 set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL);
3548 set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL);
3549 set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER);
3550 } else {
3551 /* Request irq for all interrupts */
3552 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, handler, IRQF_SHARED, dev->name, dev) != 0) {
3553 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
3554 pci_disable_msix(np->pci_dev);
3555 np->msi_flags &= ~NV_MSI_X_ENABLED;
3556 goto out_err;
3559 /* map interrupts to vector 0 */
3560 writel(0, base + NvRegMSIXMap0);
3561 writel(0, base + NvRegMSIXMap1);
3565 if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
3566 if ((ret = pci_enable_msi(np->pci_dev)) == 0) {
3567 np->msi_flags |= NV_MSI_ENABLED;
3568 dev->irq = np->pci_dev->irq;
3569 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) {
3570 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
3571 pci_disable_msi(np->pci_dev);
3572 np->msi_flags &= ~NV_MSI_ENABLED;
3573 dev->irq = np->pci_dev->irq;
3574 goto out_err;
3577 /* map interrupts to vector 0 */
3578 writel(0, base + NvRegMSIMap0);
3579 writel(0, base + NvRegMSIMap1);
3580 /* enable msi vector 0 */
3581 writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
3584 if (ret != 0) {
3585 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0)
3586 goto out_err;
3590 return 0;
3591 out_free_tx:
3592 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev);
3593 out_free_rx:
3594 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev);
3595 out_err:
3596 return 1;
3599 static void nv_free_irq(struct net_device *dev)
3601 struct fe_priv *np = get_nvpriv(dev);
3602 int i;
3604 if (np->msi_flags & NV_MSI_X_ENABLED) {
3605 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
3606 free_irq(np->msi_x_entry[i].vector, dev);
3608 pci_disable_msix(np->pci_dev);
3609 np->msi_flags &= ~NV_MSI_X_ENABLED;
3610 } else {
3611 free_irq(np->pci_dev->irq, dev);
3612 if (np->msi_flags & NV_MSI_ENABLED) {
3613 pci_disable_msi(np->pci_dev);
3614 np->msi_flags &= ~NV_MSI_ENABLED;
3619 static void nv_do_nic_poll(unsigned long data)
3621 struct net_device *dev = (struct net_device *) data;
3622 struct fe_priv *np = netdev_priv(dev);
3623 u8 __iomem *base = get_hwbase(dev);
3624 u32 mask = 0;
3627 * First disable irq(s) and then
3628 * reenable interrupts on the nic, we have to do this before calling
3629 * nv_nic_irq because that may decide to do otherwise
3632 if (!using_multi_irqs(dev)) {
3633 if (np->msi_flags & NV_MSI_X_ENABLED)
3634 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
3635 else
3636 disable_irq_lockdep(np->pci_dev->irq);
3637 mask = np->irqmask;
3638 } else {
3639 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
3640 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
3641 mask |= NVREG_IRQ_RX_ALL;
3643 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
3644 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
3645 mask |= NVREG_IRQ_TX_ALL;
3647 if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
3648 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
3649 mask |= NVREG_IRQ_OTHER;
3652 np->nic_poll_irq = 0;
3654 /* disable_irq() contains synchronize_irq, thus no irq handler can run now */
3656 if (np->recover_error) {
3657 np->recover_error = 0;
3658 printk(KERN_INFO "forcedeth: MAC in recoverable error state\n");
3659 if (netif_running(dev)) {
3660 netif_tx_lock_bh(dev);
3661 spin_lock(&np->lock);
3662 /* stop engines */
3663 nv_stop_rx(dev);
3664 nv_stop_tx(dev);
3665 nv_txrx_reset(dev);
3666 /* drain rx queue */
3667 nv_drain_rx(dev);
3668 nv_drain_tx(dev);
3669 /* reinit driver view of the rx queue */
3670 set_bufsize(dev);
3671 if (nv_init_ring(dev)) {
3672 if (!np->in_shutdown)
3673 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3675 /* reinit nic view of the rx queue */
3676 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
3677 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
3678 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
3679 base + NvRegRingSizes);
3680 pci_push(base);
3681 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
3682 pci_push(base);
3684 /* restart rx engine */
3685 nv_start_rx(dev);
3686 nv_start_tx(dev);
3687 spin_unlock(&np->lock);
3688 netif_tx_unlock_bh(dev);
3693 writel(mask, base + NvRegIrqMask);
3694 pci_push(base);
3696 if (!using_multi_irqs(dev)) {
3697 if (np->desc_ver == DESC_VER_3)
3698 nv_nic_irq_optimized(0, dev);
3699 else
3700 nv_nic_irq(0, dev);
3701 if (np->msi_flags & NV_MSI_X_ENABLED)
3702 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
3703 else
3704 enable_irq_lockdep(np->pci_dev->irq);
3705 } else {
3706 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
3707 nv_nic_irq_rx(0, dev);
3708 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
3710 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
3711 nv_nic_irq_tx(0, dev);
3712 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
3714 if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
3715 nv_nic_irq_other(0, dev);
3716 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
3721 #ifdef CONFIG_NET_POLL_CONTROLLER
3722 static void nv_poll_controller(struct net_device *dev)
3724 nv_do_nic_poll((unsigned long) dev);
3726 #endif
3728 static void nv_do_stats_poll(unsigned long data)
3730 struct net_device *dev = (struct net_device *) data;
3731 struct fe_priv *np = netdev_priv(dev);
3733 nv_get_hw_stats(dev);
3735 if (!np->in_shutdown)
3736 mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL);
3739 static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
3741 struct fe_priv *np = netdev_priv(dev);
3742 strcpy(info->driver, DRV_NAME);
3743 strcpy(info->version, FORCEDETH_VERSION);
3744 strcpy(info->bus_info, pci_name(np->pci_dev));
3747 static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
3749 struct fe_priv *np = netdev_priv(dev);
3750 wolinfo->supported = WAKE_MAGIC;
3752 spin_lock_irq(&np->lock);
3753 if (np->wolenabled)
3754 wolinfo->wolopts = WAKE_MAGIC;
3755 spin_unlock_irq(&np->lock);
3758 static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
3760 struct fe_priv *np = netdev_priv(dev);
3761 u8 __iomem *base = get_hwbase(dev);
3762 u32 flags = 0;
3764 if (wolinfo->wolopts == 0) {
3765 np->wolenabled = 0;
3766 } else if (wolinfo->wolopts & WAKE_MAGIC) {
3767 np->wolenabled = 1;
3768 flags = NVREG_WAKEUPFLAGS_ENABLE;
3770 if (netif_running(dev)) {
3771 spin_lock_irq(&np->lock);
3772 writel(flags, base + NvRegWakeUpFlags);
3773 spin_unlock_irq(&np->lock);
3775 return 0;
3778 static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
3780 struct fe_priv *np = netdev_priv(dev);
3781 int adv;
3783 spin_lock_irq(&np->lock);
3784 ecmd->port = PORT_MII;
3785 if (!netif_running(dev)) {
3786 /* We do not track link speed / duplex setting if the
3787 * interface is disabled. Force a link check */
3788 if (nv_update_linkspeed(dev)) {
3789 if (!netif_carrier_ok(dev))
3790 netif_carrier_on(dev);
3791 } else {
3792 if (netif_carrier_ok(dev))
3793 netif_carrier_off(dev);
3797 if (netif_carrier_ok(dev)) {
3798 switch(np->linkspeed & (NVREG_LINKSPEED_MASK)) {
3799 case NVREG_LINKSPEED_10:
3800 ecmd->speed = SPEED_10;
3801 break;
3802 case NVREG_LINKSPEED_100:
3803 ecmd->speed = SPEED_100;
3804 break;
3805 case NVREG_LINKSPEED_1000:
3806 ecmd->speed = SPEED_1000;
3807 break;
3809 ecmd->duplex = DUPLEX_HALF;
3810 if (np->duplex)
3811 ecmd->duplex = DUPLEX_FULL;
3812 } else {
3813 ecmd->speed = -1;
3814 ecmd->duplex = -1;
3817 ecmd->autoneg = np->autoneg;
3819 ecmd->advertising = ADVERTISED_MII;
3820 if (np->autoneg) {
3821 ecmd->advertising |= ADVERTISED_Autoneg;
3822 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
3823 if (adv & ADVERTISE_10HALF)
3824 ecmd->advertising |= ADVERTISED_10baseT_Half;
3825 if (adv & ADVERTISE_10FULL)
3826 ecmd->advertising |= ADVERTISED_10baseT_Full;
3827 if (adv & ADVERTISE_100HALF)
3828 ecmd->advertising |= ADVERTISED_100baseT_Half;
3829 if (adv & ADVERTISE_100FULL)
3830 ecmd->advertising |= ADVERTISED_100baseT_Full;
3831 if (np->gigabit == PHY_GIGABIT) {
3832 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
3833 if (adv & ADVERTISE_1000FULL)
3834 ecmd->advertising |= ADVERTISED_1000baseT_Full;
3837 ecmd->supported = (SUPPORTED_Autoneg |
3838 SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
3839 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
3840 SUPPORTED_MII);
3841 if (np->gigabit == PHY_GIGABIT)
3842 ecmd->supported |= SUPPORTED_1000baseT_Full;
3844 ecmd->phy_address = np->phyaddr;
3845 ecmd->transceiver = XCVR_EXTERNAL;
3847 /* ignore maxtxpkt, maxrxpkt for now */
3848 spin_unlock_irq(&np->lock);
3849 return 0;
3852 static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
3854 struct fe_priv *np = netdev_priv(dev);
3856 if (ecmd->port != PORT_MII)
3857 return -EINVAL;
3858 if (ecmd->transceiver != XCVR_EXTERNAL)
3859 return -EINVAL;
3860 if (ecmd->phy_address != np->phyaddr) {
3861 /* TODO: support switching between multiple phys. Should be
3862 * trivial, but not enabled due to lack of test hardware. */
3863 return -EINVAL;
3865 if (ecmd->autoneg == AUTONEG_ENABLE) {
3866 u32 mask;
3868 mask = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
3869 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full;
3870 if (np->gigabit == PHY_GIGABIT)
3871 mask |= ADVERTISED_1000baseT_Full;
3873 if ((ecmd->advertising & mask) == 0)
3874 return -EINVAL;
3876 } else if (ecmd->autoneg == AUTONEG_DISABLE) {
3877 /* Note: autonegotiation disable, speed 1000 intentionally
3878 * forbidden - noone should need that. */
3880 if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100)
3881 return -EINVAL;
3882 if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
3883 return -EINVAL;
3884 } else {
3885 return -EINVAL;
3888 netif_carrier_off(dev);
3889 if (netif_running(dev)) {
3890 nv_disable_irq(dev);
3891 netif_tx_lock_bh(dev);
3892 spin_lock(&np->lock);
3893 /* stop engines */
3894 nv_stop_rx(dev);
3895 nv_stop_tx(dev);
3896 spin_unlock(&np->lock);
3897 netif_tx_unlock_bh(dev);
3900 if (ecmd->autoneg == AUTONEG_ENABLE) {
3901 int adv, bmcr;
3903 np->autoneg = 1;
3905 /* advertise only what has been requested */
3906 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
3907 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3908 if (ecmd->advertising & ADVERTISED_10baseT_Half)
3909 adv |= ADVERTISE_10HALF;
3910 if (ecmd->advertising & ADVERTISED_10baseT_Full)
3911 adv |= ADVERTISE_10FULL;
3912 if (ecmd->advertising & ADVERTISED_100baseT_Half)
3913 adv |= ADVERTISE_100HALF;
3914 if (ecmd->advertising & ADVERTISED_100baseT_Full)
3915 adv |= ADVERTISE_100FULL;
3916 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */
3917 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
3918 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
3919 adv |= ADVERTISE_PAUSE_ASYM;
3920 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
3922 if (np->gigabit == PHY_GIGABIT) {
3923 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
3924 adv &= ~ADVERTISE_1000FULL;
3925 if (ecmd->advertising & ADVERTISED_1000baseT_Full)
3926 adv |= ADVERTISE_1000FULL;
3927 mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
3930 if (netif_running(dev))
3931 printk(KERN_INFO "%s: link down.\n", dev->name);
3932 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
3933 if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
3934 bmcr |= BMCR_ANENABLE;
3935 /* reset the phy in order for settings to stick,
3936 * and cause autoneg to start */
3937 if (phy_reset(dev, bmcr)) {
3938 printk(KERN_INFO "%s: phy reset failed\n", dev->name);
3939 return -EINVAL;
3941 } else {
3942 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
3943 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
3945 } else {
3946 int adv, bmcr;
3948 np->autoneg = 0;
3950 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
3951 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3952 if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF)
3953 adv |= ADVERTISE_10HALF;
3954 if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL)
3955 adv |= ADVERTISE_10FULL;
3956 if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF)
3957 adv |= ADVERTISE_100HALF;
3958 if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL)
3959 adv |= ADVERTISE_100FULL;
3960 np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
3961 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) {/* for rx we set both advertisments but disable tx pause */
3962 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
3963 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
3965 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) {
3966 adv |= ADVERTISE_PAUSE_ASYM;
3967 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3969 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
3970 np->fixed_mode = adv;
3972 if (np->gigabit == PHY_GIGABIT) {
3973 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
3974 adv &= ~ADVERTISE_1000FULL;
3975 mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
3978 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
3979 bmcr &= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_SPEED1000|BMCR_FULLDPLX);
3980 if (np->fixed_mode & (ADVERTISE_10FULL|ADVERTISE_100FULL))
3981 bmcr |= BMCR_FULLDPLX;
3982 if (np->fixed_mode & (ADVERTISE_100HALF|ADVERTISE_100FULL))
3983 bmcr |= BMCR_SPEED100;
3984 if (np->phy_oui == PHY_OUI_MARVELL) {
3985 /* reset the phy in order for forced mode settings to stick */
3986 if (phy_reset(dev, bmcr)) {
3987 printk(KERN_INFO "%s: phy reset failed\n", dev->name);
3988 return -EINVAL;
3990 } else {
3991 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
3992 if (netif_running(dev)) {
3993 /* Wait a bit and then reconfigure the nic. */
3994 udelay(10);
3995 nv_linkchange(dev);
4000 if (netif_running(dev)) {
4001 nv_start_rx(dev);
4002 nv_start_tx(dev);
4003 nv_enable_irq(dev);
4006 return 0;
4009 #define FORCEDETH_REGS_VER 1
4011 static int nv_get_regs_len(struct net_device *dev)
4013 struct fe_priv *np = netdev_priv(dev);
4014 return np->register_size;
4017 static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
4019 struct fe_priv *np = netdev_priv(dev);
4020 u8 __iomem *base = get_hwbase(dev);
4021 u32 *rbuf = buf;
4022 int i;
4024 regs->version = FORCEDETH_REGS_VER;
4025 spin_lock_irq(&np->lock);
4026 for (i = 0;i <= np->register_size/sizeof(u32); i++)
4027 rbuf[i] = readl(base + i*sizeof(u32));
4028 spin_unlock_irq(&np->lock);
4031 static int nv_nway_reset(struct net_device *dev)
4033 struct fe_priv *np = netdev_priv(dev);
4034 int ret;
4036 if (np->autoneg) {
4037 int bmcr;
4039 netif_carrier_off(dev);
4040 if (netif_running(dev)) {
4041 nv_disable_irq(dev);
4042 netif_tx_lock_bh(dev);
4043 spin_lock(&np->lock);
4044 /* stop engines */
4045 nv_stop_rx(dev);
4046 nv_stop_tx(dev);
4047 spin_unlock(&np->lock);
4048 netif_tx_unlock_bh(dev);
4049 printk(KERN_INFO "%s: link down.\n", dev->name);
4052 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4053 if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
4054 bmcr |= BMCR_ANENABLE;
4055 /* reset the phy in order for settings to stick*/
4056 if (phy_reset(dev, bmcr)) {
4057 printk(KERN_INFO "%s: phy reset failed\n", dev->name);
4058 return -EINVAL;
4060 } else {
4061 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
4062 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4065 if (netif_running(dev)) {
4066 nv_start_rx(dev);
4067 nv_start_tx(dev);
4068 nv_enable_irq(dev);
4070 ret = 0;
4071 } else {
4072 ret = -EINVAL;
4075 return ret;
4078 static int nv_set_tso(struct net_device *dev, u32 value)
4080 struct fe_priv *np = netdev_priv(dev);
4082 if ((np->driver_data & DEV_HAS_CHECKSUM))
4083 return ethtool_op_set_tso(dev, value);
4084 else
4085 return -EOPNOTSUPP;
4088 static void nv_get_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
4090 struct fe_priv *np = netdev_priv(dev);
4092 ring->rx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
4093 ring->rx_mini_max_pending = 0;
4094 ring->rx_jumbo_max_pending = 0;
4095 ring->tx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
4097 ring->rx_pending = np->rx_ring_size;
4098 ring->rx_mini_pending = 0;
4099 ring->rx_jumbo_pending = 0;
4100 ring->tx_pending = np->tx_ring_size;
4103 static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
4105 struct fe_priv *np = netdev_priv(dev);
4106 u8 __iomem *base = get_hwbase(dev);
4107 u8 *rxtx_ring, *rx_skbuff, *tx_skbuff;
4108 dma_addr_t ring_addr;
4110 if (ring->rx_pending < RX_RING_MIN ||
4111 ring->tx_pending < TX_RING_MIN ||
4112 ring->rx_mini_pending != 0 ||
4113 ring->rx_jumbo_pending != 0 ||
4114 (np->desc_ver == DESC_VER_1 &&
4115 (ring->rx_pending > RING_MAX_DESC_VER_1 ||
4116 ring->tx_pending > RING_MAX_DESC_VER_1)) ||
4117 (np->desc_ver != DESC_VER_1 &&
4118 (ring->rx_pending > RING_MAX_DESC_VER_2_3 ||
4119 ring->tx_pending > RING_MAX_DESC_VER_2_3))) {
4120 return -EINVAL;
4123 /* allocate new rings */
4124 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
4125 rxtx_ring = pci_alloc_consistent(np->pci_dev,
4126 sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
4127 &ring_addr);
4128 } else {
4129 rxtx_ring = pci_alloc_consistent(np->pci_dev,
4130 sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
4131 &ring_addr);
4133 rx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->rx_pending, GFP_KERNEL);
4134 tx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->tx_pending, GFP_KERNEL);
4135 if (!rxtx_ring || !rx_skbuff || !tx_skbuff) {
4136 /* fall back to old rings */
4137 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
4138 if (rxtx_ring)
4139 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
4140 rxtx_ring, ring_addr);
4141 } else {
4142 if (rxtx_ring)
4143 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
4144 rxtx_ring, ring_addr);
4146 if (rx_skbuff)
4147 kfree(rx_skbuff);
4148 if (tx_skbuff)
4149 kfree(tx_skbuff);
4150 goto exit;
4153 if (netif_running(dev)) {
4154 nv_disable_irq(dev);
4155 netif_tx_lock_bh(dev);
4156 spin_lock(&np->lock);
4157 /* stop engines */
4158 nv_stop_rx(dev);
4159 nv_stop_tx(dev);
4160 nv_txrx_reset(dev);
4161 /* drain queues */
4162 nv_drain_rx(dev);
4163 nv_drain_tx(dev);
4164 /* delete queues */
4165 free_rings(dev);
4168 /* set new values */
4169 np->rx_ring_size = ring->rx_pending;
4170 np->tx_ring_size = ring->tx_pending;
4171 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
4172 np->rx_ring.orig = (struct ring_desc*)rxtx_ring;
4173 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
4174 } else {
4175 np->rx_ring.ex = (struct ring_desc_ex*)rxtx_ring;
4176 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
4178 np->rx_skb = (struct nv_skb_map*)rx_skbuff;
4179 np->tx_skb = (struct nv_skb_map*)tx_skbuff;
4180 np->ring_addr = ring_addr;
4182 memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size);
4183 memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size);
4185 if (netif_running(dev)) {
4186 /* reinit driver view of the queues */
4187 set_bufsize(dev);
4188 if (nv_init_ring(dev)) {
4189 if (!np->in_shutdown)
4190 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
4193 /* reinit nic view of the queues */
4194 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4195 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4196 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4197 base + NvRegRingSizes);
4198 pci_push(base);
4199 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4200 pci_push(base);
4202 /* restart engines */
4203 nv_start_rx(dev);
4204 nv_start_tx(dev);
4205 spin_unlock(&np->lock);
4206 netif_tx_unlock_bh(dev);
4207 nv_enable_irq(dev);
4209 return 0;
4210 exit:
4211 return -ENOMEM;
4214 static void nv_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
4216 struct fe_priv *np = netdev_priv(dev);
4218 pause->autoneg = (np->pause_flags & NV_PAUSEFRAME_AUTONEG) != 0;
4219 pause->rx_pause = (np->pause_flags & NV_PAUSEFRAME_RX_ENABLE) != 0;
4220 pause->tx_pause = (np->pause_flags & NV_PAUSEFRAME_TX_ENABLE) != 0;
4223 static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
4225 struct fe_priv *np = netdev_priv(dev);
4226 int adv, bmcr;
4228 if ((!np->autoneg && np->duplex == 0) ||
4229 (np->autoneg && !pause->autoneg && np->duplex == 0)) {
4230 printk(KERN_INFO "%s: can not set pause settings when forced link is in half duplex.\n",
4231 dev->name);
4232 return -EINVAL;
4234 if (pause->tx_pause && !(np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)) {
4235 printk(KERN_INFO "%s: hardware does not support tx pause frames.\n", dev->name);
4236 return -EINVAL;
4239 netif_carrier_off(dev);
4240 if (netif_running(dev)) {
4241 nv_disable_irq(dev);
4242 netif_tx_lock_bh(dev);
4243 spin_lock(&np->lock);
4244 /* stop engines */
4245 nv_stop_rx(dev);
4246 nv_stop_tx(dev);
4247 spin_unlock(&np->lock);
4248 netif_tx_unlock_bh(dev);
4251 np->pause_flags &= ~(NV_PAUSEFRAME_RX_REQ|NV_PAUSEFRAME_TX_REQ);
4252 if (pause->rx_pause)
4253 np->pause_flags |= NV_PAUSEFRAME_RX_REQ;
4254 if (pause->tx_pause)
4255 np->pause_flags |= NV_PAUSEFRAME_TX_REQ;
4257 if (np->autoneg && pause->autoneg) {
4258 np->pause_flags |= NV_PAUSEFRAME_AUTONEG;
4260 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4261 adv &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
4262 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */
4263 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4264 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
4265 adv |= ADVERTISE_PAUSE_ASYM;
4266 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
4268 if (netif_running(dev))
4269 printk(KERN_INFO "%s: link down.\n", dev->name);
4270 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4271 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
4272 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4273 } else {
4274 np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
4275 if (pause->rx_pause)
4276 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
4277 if (pause->tx_pause)
4278 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
4280 if (!netif_running(dev))
4281 nv_update_linkspeed(dev);
4282 else
4283 nv_update_pause(dev, np->pause_flags);
4286 if (netif_running(dev)) {
4287 nv_start_rx(dev);
4288 nv_start_tx(dev);
4289 nv_enable_irq(dev);
4291 return 0;
4294 static u32 nv_get_rx_csum(struct net_device *dev)
4296 struct fe_priv *np = netdev_priv(dev);
4297 return (np->rx_csum) != 0;
4300 static int nv_set_rx_csum(struct net_device *dev, u32 data)
4302 struct fe_priv *np = netdev_priv(dev);
4303 u8 __iomem *base = get_hwbase(dev);
4304 int retcode = 0;
4306 if (np->driver_data & DEV_HAS_CHECKSUM) {
4307 if (data) {
4308 np->rx_csum = 1;
4309 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
4310 } else {
4311 np->rx_csum = 0;
4312 /* vlan is dependent on rx checksum offload */
4313 if (!(np->vlanctl_bits & NVREG_VLANCONTROL_ENABLE))
4314 np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK;
4316 if (netif_running(dev)) {
4317 spin_lock_irq(&np->lock);
4318 writel(np->txrxctl_bits, base + NvRegTxRxControl);
4319 spin_unlock_irq(&np->lock);
4321 } else {
4322 return -EINVAL;
4325 return retcode;
4328 static int nv_set_tx_csum(struct net_device *dev, u32 data)
4330 struct fe_priv *np = netdev_priv(dev);
4332 if (np->driver_data & DEV_HAS_CHECKSUM)
4333 return ethtool_op_set_tx_hw_csum(dev, data);
4334 else
4335 return -EOPNOTSUPP;
4338 static int nv_set_sg(struct net_device *dev, u32 data)
4340 struct fe_priv *np = netdev_priv(dev);
4342 if (np->driver_data & DEV_HAS_CHECKSUM)
4343 return ethtool_op_set_sg(dev, data);
4344 else
4345 return -EOPNOTSUPP;
4348 static int nv_get_sset_count(struct net_device *dev, int sset)
4350 struct fe_priv *np = netdev_priv(dev);
4352 switch (sset) {
4353 case ETH_SS_TEST:
4354 if (np->driver_data & DEV_HAS_TEST_EXTENDED)
4355 return NV_TEST_COUNT_EXTENDED;
4356 else
4357 return NV_TEST_COUNT_BASE;
4358 case ETH_SS_STATS:
4359 if (np->driver_data & DEV_HAS_STATISTICS_V1)
4360 return NV_DEV_STATISTICS_V1_COUNT;
4361 else if (np->driver_data & DEV_HAS_STATISTICS_V2)
4362 return NV_DEV_STATISTICS_V2_COUNT;
4363 else
4364 return 0;
4365 default:
4366 return -EOPNOTSUPP;
4370 static void nv_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *estats, u64 *buffer)
4372 struct fe_priv *np = netdev_priv(dev);
4374 /* update stats */
4375 nv_do_stats_poll((unsigned long)dev);
4377 memcpy(buffer, &np->estats, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(u64));
4380 static int nv_link_test(struct net_device *dev)
4382 struct fe_priv *np = netdev_priv(dev);
4383 int mii_status;
4385 mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
4386 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
4388 /* check phy link status */
4389 if (!(mii_status & BMSR_LSTATUS))
4390 return 0;
4391 else
4392 return 1;
4395 static int nv_register_test(struct net_device *dev)
4397 u8 __iomem *base = get_hwbase(dev);
4398 int i = 0;
4399 u32 orig_read, new_read;
4401 do {
4402 orig_read = readl(base + nv_registers_test[i].reg);
4404 /* xor with mask to toggle bits */
4405 orig_read ^= nv_registers_test[i].mask;
4407 writel(orig_read, base + nv_registers_test[i].reg);
4409 new_read = readl(base + nv_registers_test[i].reg);
4411 if ((new_read & nv_registers_test[i].mask) != (orig_read & nv_registers_test[i].mask))
4412 return 0;
4414 /* restore original value */
4415 orig_read ^= nv_registers_test[i].mask;
4416 writel(orig_read, base + nv_registers_test[i].reg);
4418 } while (nv_registers_test[++i].reg != 0);
4420 return 1;
4423 static int nv_interrupt_test(struct net_device *dev)
4425 struct fe_priv *np = netdev_priv(dev);
4426 u8 __iomem *base = get_hwbase(dev);
4427 int ret = 1;
4428 int testcnt;
4429 u32 save_msi_flags, save_poll_interval = 0;
4431 if (netif_running(dev)) {
4432 /* free current irq */
4433 nv_free_irq(dev);
4434 save_poll_interval = readl(base+NvRegPollingInterval);
4437 /* flag to test interrupt handler */
4438 np->intr_test = 0;
4440 /* setup test irq */
4441 save_msi_flags = np->msi_flags;
4442 np->msi_flags &= ~NV_MSI_X_VECTORS_MASK;
4443 np->msi_flags |= 0x001; /* setup 1 vector */
4444 if (nv_request_irq(dev, 1))
4445 return 0;
4447 /* setup timer interrupt */
4448 writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
4449 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
4451 nv_enable_hw_interrupts(dev, NVREG_IRQ_TIMER);
4453 /* wait for at least one interrupt */
4454 msleep(100);
4456 spin_lock_irq(&np->lock);
4458 /* flag should be set within ISR */
4459 testcnt = np->intr_test;
4460 if (!testcnt)
4461 ret = 2;
4463 nv_disable_hw_interrupts(dev, NVREG_IRQ_TIMER);
4464 if (!(np->msi_flags & NV_MSI_X_ENABLED))
4465 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
4466 else
4467 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
4469 spin_unlock_irq(&np->lock);
4471 nv_free_irq(dev);
4473 np->msi_flags = save_msi_flags;
4475 if (netif_running(dev)) {
4476 writel(save_poll_interval, base + NvRegPollingInterval);
4477 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
4478 /* restore original irq */
4479 if (nv_request_irq(dev, 0))
4480 return 0;
4483 return ret;
4486 static int nv_loopback_test(struct net_device *dev)
4488 struct fe_priv *np = netdev_priv(dev);
4489 u8 __iomem *base = get_hwbase(dev);
4490 struct sk_buff *tx_skb, *rx_skb;
4491 dma_addr_t test_dma_addr;
4492 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
4493 u32 flags;
4494 int len, i, pkt_len;
4495 u8 *pkt_data;
4496 u32 filter_flags = 0;
4497 u32 misc1_flags = 0;
4498 int ret = 1;
4500 if (netif_running(dev)) {
4501 nv_disable_irq(dev);
4502 filter_flags = readl(base + NvRegPacketFilterFlags);
4503 misc1_flags = readl(base + NvRegMisc1);
4504 } else {
4505 nv_txrx_reset(dev);
4508 /* reinit driver view of the rx queue */
4509 set_bufsize(dev);
4510 nv_init_ring(dev);
4512 /* setup hardware for loopback */
4513 writel(NVREG_MISC1_FORCE, base + NvRegMisc1);
4514 writel(NVREG_PFF_ALWAYS | NVREG_PFF_LOOPBACK, base + NvRegPacketFilterFlags);
4516 /* reinit nic view of the rx queue */
4517 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4518 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4519 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4520 base + NvRegRingSizes);
4521 pci_push(base);
4523 /* restart rx engine */
4524 nv_start_rx(dev);
4525 nv_start_tx(dev);
4527 /* setup packet for tx */
4528 pkt_len = ETH_DATA_LEN;
4529 tx_skb = dev_alloc_skb(pkt_len);
4530 if (!tx_skb) {
4531 printk(KERN_ERR "dev_alloc_skb() failed during loopback test"
4532 " of %s\n", dev->name);
4533 ret = 0;
4534 goto out;
4536 test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data,
4537 skb_tailroom(tx_skb),
4538 PCI_DMA_FROMDEVICE);
4539 pkt_data = skb_put(tx_skb, pkt_len);
4540 for (i = 0; i < pkt_len; i++)
4541 pkt_data[i] = (u8)(i & 0xff);
4543 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
4544 np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr);
4545 np->tx_ring.orig[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
4546 } else {
4547 np->tx_ring.ex[0].bufhigh = cpu_to_le32(dma_high(test_dma_addr));
4548 np->tx_ring.ex[0].buflow = cpu_to_le32(dma_low(test_dma_addr));
4549 np->tx_ring.ex[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
4551 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4552 pci_push(get_hwbase(dev));
4554 msleep(500);
4556 /* check for rx of the packet */
4557 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
4558 flags = le32_to_cpu(np->rx_ring.orig[0].flaglen);
4559 len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver);
4561 } else {
4562 flags = le32_to_cpu(np->rx_ring.ex[0].flaglen);
4563 len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver);
4566 if (flags & NV_RX_AVAIL) {
4567 ret = 0;
4568 } else if (np->desc_ver == DESC_VER_1) {
4569 if (flags & NV_RX_ERROR)
4570 ret = 0;
4571 } else {
4572 if (flags & NV_RX2_ERROR) {
4573 ret = 0;
4577 if (ret) {
4578 if (len != pkt_len) {
4579 ret = 0;
4580 dprintk(KERN_DEBUG "%s: loopback len mismatch %d vs %d\n",
4581 dev->name, len, pkt_len);
4582 } else {
4583 rx_skb = np->rx_skb[0].skb;
4584 for (i = 0; i < pkt_len; i++) {
4585 if (rx_skb->data[i] != (u8)(i & 0xff)) {
4586 ret = 0;
4587 dprintk(KERN_DEBUG "%s: loopback pattern check failed on byte %d\n",
4588 dev->name, i);
4589 break;
4593 } else {
4594 dprintk(KERN_DEBUG "%s: loopback - did not receive test packet\n", dev->name);
4597 pci_unmap_page(np->pci_dev, test_dma_addr,
4598 (skb_end_pointer(tx_skb) - tx_skb->data),
4599 PCI_DMA_TODEVICE);
4600 dev_kfree_skb_any(tx_skb);
4601 out:
4602 /* stop engines */
4603 nv_stop_rx(dev);
4604 nv_stop_tx(dev);
4605 nv_txrx_reset(dev);
4606 /* drain rx queue */
4607 nv_drain_rx(dev);
4608 nv_drain_tx(dev);
4610 if (netif_running(dev)) {
4611 writel(misc1_flags, base + NvRegMisc1);
4612 writel(filter_flags, base + NvRegPacketFilterFlags);
4613 nv_enable_irq(dev);
4616 return ret;
4619 static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 *buffer)
4621 struct fe_priv *np = netdev_priv(dev);
4622 u8 __iomem *base = get_hwbase(dev);
4623 int result;
4624 memset(buffer, 0, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(u64));
4626 if (!nv_link_test(dev)) {
4627 test->flags |= ETH_TEST_FL_FAILED;
4628 buffer[0] = 1;
4631 if (test->flags & ETH_TEST_FL_OFFLINE) {
4632 if (netif_running(dev)) {
4633 netif_stop_queue(dev);
4634 #ifdef CONFIG_FORCEDETH_NAPI
4635 napi_disable(&np->napi);
4636 #endif
4637 netif_tx_lock_bh(dev);
4638 spin_lock_irq(&np->lock);
4639 nv_disable_hw_interrupts(dev, np->irqmask);
4640 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
4641 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
4642 } else {
4643 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
4645 /* stop engines */
4646 nv_stop_rx(dev);
4647 nv_stop_tx(dev);
4648 nv_txrx_reset(dev);
4649 /* drain rx queue */
4650 nv_drain_rx(dev);
4651 nv_drain_tx(dev);
4652 spin_unlock_irq(&np->lock);
4653 netif_tx_unlock_bh(dev);
4656 if (!nv_register_test(dev)) {
4657 test->flags |= ETH_TEST_FL_FAILED;
4658 buffer[1] = 1;
4661 result = nv_interrupt_test(dev);
4662 if (result != 1) {
4663 test->flags |= ETH_TEST_FL_FAILED;
4664 buffer[2] = 1;
4666 if (result == 0) {
4667 /* bail out */
4668 return;
4671 if (!nv_loopback_test(dev)) {
4672 test->flags |= ETH_TEST_FL_FAILED;
4673 buffer[3] = 1;
4676 if (netif_running(dev)) {
4677 /* reinit driver view of the rx queue */
4678 set_bufsize(dev);
4679 if (nv_init_ring(dev)) {
4680 if (!np->in_shutdown)
4681 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
4683 /* reinit nic view of the rx queue */
4684 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4685 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4686 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4687 base + NvRegRingSizes);
4688 pci_push(base);
4689 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4690 pci_push(base);
4691 /* restart rx engine */
4692 nv_start_rx(dev);
4693 nv_start_tx(dev);
4694 netif_start_queue(dev);
4695 #ifdef CONFIG_FORCEDETH_NAPI
4696 napi_enable(&np->napi);
4697 #endif
4698 nv_enable_hw_interrupts(dev, np->irqmask);
4703 static void nv_get_strings(struct net_device *dev, u32 stringset, u8 *buffer)
4705 switch (stringset) {
4706 case ETH_SS_STATS:
4707 memcpy(buffer, &nv_estats_str, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(struct nv_ethtool_str));
4708 break;
4709 case ETH_SS_TEST:
4710 memcpy(buffer, &nv_etests_str, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(struct nv_ethtool_str));
4711 break;
4715 static const struct ethtool_ops ops = {
4716 .get_drvinfo = nv_get_drvinfo,
4717 .get_link = ethtool_op_get_link,
4718 .get_wol = nv_get_wol,
4719 .set_wol = nv_set_wol,
4720 .get_settings = nv_get_settings,
4721 .set_settings = nv_set_settings,
4722 .get_regs_len = nv_get_regs_len,
4723 .get_regs = nv_get_regs,
4724 .nway_reset = nv_nway_reset,
4725 .set_tso = nv_set_tso,
4726 .get_ringparam = nv_get_ringparam,
4727 .set_ringparam = nv_set_ringparam,
4728 .get_pauseparam = nv_get_pauseparam,
4729 .set_pauseparam = nv_set_pauseparam,
4730 .get_rx_csum = nv_get_rx_csum,
4731 .set_rx_csum = nv_set_rx_csum,
4732 .set_tx_csum = nv_set_tx_csum,
4733 .set_sg = nv_set_sg,
4734 .get_strings = nv_get_strings,
4735 .get_ethtool_stats = nv_get_ethtool_stats,
4736 .get_sset_count = nv_get_sset_count,
4737 .self_test = nv_self_test,
4740 static void nv_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
4742 struct fe_priv *np = get_nvpriv(dev);
4744 spin_lock_irq(&np->lock);
4746 /* save vlan group */
4747 np->vlangrp = grp;
4749 if (grp) {
4750 /* enable vlan on MAC */
4751 np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP | NVREG_TXRXCTL_VLANINS;
4752 } else {
4753 /* disable vlan on MAC */
4754 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP;
4755 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS;
4758 writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4760 spin_unlock_irq(&np->lock);
4763 /* The mgmt unit and driver use a semaphore to access the phy during init */
4764 static int nv_mgmt_acquire_sema(struct net_device *dev)
4766 u8 __iomem *base = get_hwbase(dev);
4767 int i;
4768 u32 tx_ctrl, mgmt_sema;
4770 for (i = 0; i < 10; i++) {
4771 mgmt_sema = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_SEMA_MASK;
4772 if (mgmt_sema == NVREG_XMITCTL_MGMT_SEMA_FREE)
4773 break;
4774 msleep(500);
4777 if (mgmt_sema != NVREG_XMITCTL_MGMT_SEMA_FREE)
4778 return 0;
4780 for (i = 0; i < 2; i++) {
4781 tx_ctrl = readl(base + NvRegTransmitterControl);
4782 tx_ctrl |= NVREG_XMITCTL_HOST_SEMA_ACQ;
4783 writel(tx_ctrl, base + NvRegTransmitterControl);
4785 /* verify that semaphore was acquired */
4786 tx_ctrl = readl(base + NvRegTransmitterControl);
4787 if (((tx_ctrl & NVREG_XMITCTL_HOST_SEMA_MASK) == NVREG_XMITCTL_HOST_SEMA_ACQ) &&
4788 ((tx_ctrl & NVREG_XMITCTL_MGMT_SEMA_MASK) == NVREG_XMITCTL_MGMT_SEMA_FREE))
4789 return 1;
4790 else
4791 udelay(50);
4794 return 0;
4797 static int nv_open(struct net_device *dev)
4799 struct fe_priv *np = netdev_priv(dev);
4800 u8 __iomem *base = get_hwbase(dev);
4801 int ret = 1;
4802 int oom, i;
4804 dprintk(KERN_DEBUG "nv_open: begin\n");
4806 /* erase previous misconfiguration */
4807 if (np->driver_data & DEV_HAS_POWER_CNTRL)
4808 nv_mac_reset(dev);
4809 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
4810 writel(0, base + NvRegMulticastAddrB);
4811 writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA);
4812 writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB);
4813 writel(0, base + NvRegPacketFilterFlags);
4815 writel(0, base + NvRegTransmitterControl);
4816 writel(0, base + NvRegReceiverControl);
4818 writel(0, base + NvRegAdapterControl);
4820 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)
4821 writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame);
4823 /* initialize descriptor rings */
4824 set_bufsize(dev);
4825 oom = nv_init_ring(dev);
4827 writel(0, base + NvRegLinkSpeed);
4828 writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
4829 nv_txrx_reset(dev);
4830 writel(0, base + NvRegUnknownSetupReg6);
4832 np->in_shutdown = 0;
4834 /* give hw rings */
4835 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4836 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4837 base + NvRegRingSizes);
4839 writel(np->linkspeed, base + NvRegLinkSpeed);
4840 if (np->desc_ver == DESC_VER_1)
4841 writel(NVREG_TX_WM_DESC1_DEFAULT, base + NvRegTxWatermark);
4842 else
4843 writel(NVREG_TX_WM_DESC2_3_DEFAULT, base + NvRegTxWatermark);
4844 writel(np->txrxctl_bits, base + NvRegTxRxControl);
4845 writel(np->vlanctl_bits, base + NvRegVlanControl);
4846 pci_push(base);
4847 writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl);
4848 reg_delay(dev, NvRegUnknownSetupReg5, NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31,
4849 NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX,
4850 KERN_INFO "open: SetupReg5, Bit 31 remained off\n");
4852 writel(0, base + NvRegMIIMask);
4853 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
4854 writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus);
4856 writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1);
4857 writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus);
4858 writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags);
4859 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4861 writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus);
4862 get_random_bytes(&i, sizeof(i));
4863 writel(NVREG_RNDSEED_FORCE | (i&NVREG_RNDSEED_MASK), base + NvRegRandomSeed);
4864 writel(NVREG_TX_DEFERRAL_DEFAULT, base + NvRegTxDeferral);
4865 writel(NVREG_RX_DEFERRAL_DEFAULT, base + NvRegRxDeferral);
4866 if (poll_interval == -1) {
4867 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT)
4868 writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval);
4869 else
4870 writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
4872 else
4873 writel(poll_interval & 0xFFFF, base + NvRegPollingInterval);
4874 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
4875 writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING,
4876 base + NvRegAdapterControl);
4877 writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed);
4878 writel(NVREG_MII_LINKCHANGE, base + NvRegMIIMask);
4879 if (np->wolenabled)
4880 writel(NVREG_WAKEUPFLAGS_ENABLE , base + NvRegWakeUpFlags);
4882 i = readl(base + NvRegPowerState);
4883 if ( (i & NVREG_POWERSTATE_POWEREDUP) == 0)
4884 writel(NVREG_POWERSTATE_POWEREDUP|i, base + NvRegPowerState);
4886 pci_push(base);
4887 udelay(10);
4888 writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState);
4890 nv_disable_hw_interrupts(dev, np->irqmask);
4891 pci_push(base);
4892 writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus);
4893 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
4894 pci_push(base);
4896 if (nv_request_irq(dev, 0)) {
4897 goto out_drain;
4900 /* ask for interrupts */
4901 nv_enable_hw_interrupts(dev, np->irqmask);
4903 spin_lock_irq(&np->lock);
4904 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
4905 writel(0, base + NvRegMulticastAddrB);
4906 writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA);
4907 writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB);
4908 writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
4909 /* One manual link speed update: Interrupts are enabled, future link
4910 * speed changes cause interrupts and are handled by nv_link_irq().
4913 u32 miistat;
4914 miistat = readl(base + NvRegMIIStatus);
4915 writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
4916 dprintk(KERN_INFO "startup: got 0x%08x.\n", miistat);
4918 /* set linkspeed to invalid value, thus force nv_update_linkspeed
4919 * to init hw */
4920 np->linkspeed = 0;
4921 ret = nv_update_linkspeed(dev);
4922 nv_start_rx(dev);
4923 nv_start_tx(dev);
4924 netif_start_queue(dev);
4925 #ifdef CONFIG_FORCEDETH_NAPI
4926 napi_enable(&np->napi);
4927 #endif
4929 if (ret) {
4930 netif_carrier_on(dev);
4931 } else {
4932 printk(KERN_INFO "%s: no link during initialization.\n", dev->name);
4933 netif_carrier_off(dev);
4935 if (oom)
4936 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
4938 /* start statistics timer */
4939 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2))
4940 mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL);
4942 spin_unlock_irq(&np->lock);
4944 return 0;
4945 out_drain:
4946 drain_ring(dev);
4947 return ret;
4950 static int nv_close(struct net_device *dev)
4952 struct fe_priv *np = netdev_priv(dev);
4953 u8 __iomem *base;
4955 spin_lock_irq(&np->lock);
4956 np->in_shutdown = 1;
4957 spin_unlock_irq(&np->lock);
4958 #ifdef CONFIG_FORCEDETH_NAPI
4959 napi_disable(&np->napi);
4960 #endif
4961 synchronize_irq(np->pci_dev->irq);
4963 del_timer_sync(&np->oom_kick);
4964 del_timer_sync(&np->nic_poll);
4965 del_timer_sync(&np->stats_poll);
4967 netif_stop_queue(dev);
4968 spin_lock_irq(&np->lock);
4969 nv_stop_tx(dev);
4970 nv_stop_rx(dev);
4971 nv_txrx_reset(dev);
4973 /* disable interrupts on the nic or we will lock up */
4974 base = get_hwbase(dev);
4975 nv_disable_hw_interrupts(dev, np->irqmask);
4976 pci_push(base);
4977 dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name);
4979 spin_unlock_irq(&np->lock);
4981 nv_free_irq(dev);
4983 drain_ring(dev);
4985 if (np->wolenabled) {
4986 writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
4987 nv_start_rx(dev);
4990 /* FIXME: power down nic */
4992 return 0;
4995 static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
4997 struct net_device *dev;
4998 struct fe_priv *np;
4999 unsigned long addr;
5000 u8 __iomem *base;
5001 int err, i;
5002 u32 powerstate, txreg;
5003 u32 phystate_orig = 0, phystate;
5004 int phyinitialized = 0;
5005 DECLARE_MAC_BUF(mac);
5006 static int printed_version;
5008 if (!printed_version++)
5009 printk(KERN_INFO "%s: Reverse Engineered nForce ethernet"
5010 " driver. Version %s.\n", DRV_NAME, FORCEDETH_VERSION);
5012 dev = alloc_etherdev(sizeof(struct fe_priv));
5013 err = -ENOMEM;
5014 if (!dev)
5015 goto out;
5017 np = netdev_priv(dev);
5018 np->dev = dev;
5019 np->pci_dev = pci_dev;
5020 spin_lock_init(&np->lock);
5021 SET_NETDEV_DEV(dev, &pci_dev->dev);
5023 init_timer(&np->oom_kick);
5024 np->oom_kick.data = (unsigned long) dev;
5025 np->oom_kick.function = &nv_do_rx_refill; /* timer handler */
5026 init_timer(&np->nic_poll);
5027 np->nic_poll.data = (unsigned long) dev;
5028 np->nic_poll.function = &nv_do_nic_poll; /* timer handler */
5029 init_timer(&np->stats_poll);
5030 np->stats_poll.data = (unsigned long) dev;
5031 np->stats_poll.function = &nv_do_stats_poll; /* timer handler */
5033 err = pci_enable_device(pci_dev);
5034 if (err)
5035 goto out_free;
5037 pci_set_master(pci_dev);
5039 err = pci_request_regions(pci_dev, DRV_NAME);
5040 if (err < 0)
5041 goto out_disable;
5043 if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V2))
5044 np->register_size = NV_PCI_REGSZ_VER3;
5045 else if (id->driver_data & DEV_HAS_STATISTICS_V1)
5046 np->register_size = NV_PCI_REGSZ_VER2;
5047 else
5048 np->register_size = NV_PCI_REGSZ_VER1;
5050 err = -EINVAL;
5051 addr = 0;
5052 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
5053 dprintk(KERN_DEBUG "%s: resource %d start %p len %ld flags 0x%08lx.\n",
5054 pci_name(pci_dev), i, (void*)pci_resource_start(pci_dev, i),
5055 pci_resource_len(pci_dev, i),
5056 pci_resource_flags(pci_dev, i));
5057 if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM &&
5058 pci_resource_len(pci_dev, i) >= np->register_size) {
5059 addr = pci_resource_start(pci_dev, i);
5060 break;
5063 if (i == DEVICE_COUNT_RESOURCE) {
5064 dev_printk(KERN_INFO, &pci_dev->dev,
5065 "Couldn't find register window\n");
5066 goto out_relreg;
5069 /* copy of driver data */
5070 np->driver_data = id->driver_data;
5072 /* handle different descriptor versions */
5073 if (id->driver_data & DEV_HAS_HIGH_DMA) {
5074 /* packet format 3: supports 40-bit addressing */
5075 np->desc_ver = DESC_VER_3;
5076 np->txrxctl_bits = NVREG_TXRXCTL_DESC_3;
5077 if (dma_64bit) {
5078 if (pci_set_dma_mask(pci_dev, DMA_39BIT_MASK))
5079 dev_printk(KERN_INFO, &pci_dev->dev,
5080 "64-bit DMA failed, using 32-bit addressing\n");
5081 else
5082 dev->features |= NETIF_F_HIGHDMA;
5083 if (pci_set_consistent_dma_mask(pci_dev, DMA_39BIT_MASK)) {
5084 dev_printk(KERN_INFO, &pci_dev->dev,
5085 "64-bit DMA (consistent) failed, using 32-bit ring buffers\n");
5088 } else if (id->driver_data & DEV_HAS_LARGEDESC) {
5089 /* packet format 2: supports jumbo frames */
5090 np->desc_ver = DESC_VER_2;
5091 np->txrxctl_bits = NVREG_TXRXCTL_DESC_2;
5092 } else {
5093 /* original packet format */
5094 np->desc_ver = DESC_VER_1;
5095 np->txrxctl_bits = NVREG_TXRXCTL_DESC_1;
5098 np->pkt_limit = NV_PKTLIMIT_1;
5099 if (id->driver_data & DEV_HAS_LARGEDESC)
5100 np->pkt_limit = NV_PKTLIMIT_2;
5102 if (id->driver_data & DEV_HAS_CHECKSUM) {
5103 np->rx_csum = 1;
5104 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
5105 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
5106 dev->features |= NETIF_F_TSO;
5109 np->vlanctl_bits = 0;
5110 if (id->driver_data & DEV_HAS_VLAN) {
5111 np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE;
5112 dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX;
5113 dev->vlan_rx_register = nv_vlan_rx_register;
5116 np->msi_flags = 0;
5117 if ((id->driver_data & DEV_HAS_MSI) && msi) {
5118 np->msi_flags |= NV_MSI_CAPABLE;
5120 if ((id->driver_data & DEV_HAS_MSI_X) && msix) {
5121 np->msi_flags |= NV_MSI_X_CAPABLE;
5124 np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG;
5125 if (id->driver_data & DEV_HAS_PAUSEFRAME_TX) {
5126 np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE | NV_PAUSEFRAME_TX_REQ;
5130 err = -ENOMEM;
5131 np->base = ioremap(addr, np->register_size);
5132 if (!np->base)
5133 goto out_relreg;
5134 dev->base_addr = (unsigned long)np->base;
5136 dev->irq = pci_dev->irq;
5138 np->rx_ring_size = RX_RING_DEFAULT;
5139 np->tx_ring_size = TX_RING_DEFAULT;
5141 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
5142 np->rx_ring.orig = pci_alloc_consistent(pci_dev,
5143 sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
5144 &np->ring_addr);
5145 if (!np->rx_ring.orig)
5146 goto out_unmap;
5147 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
5148 } else {
5149 np->rx_ring.ex = pci_alloc_consistent(pci_dev,
5150 sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
5151 &np->ring_addr);
5152 if (!np->rx_ring.ex)
5153 goto out_unmap;
5154 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
5156 np->rx_skb = kcalloc(np->rx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL);
5157 np->tx_skb = kcalloc(np->tx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL);
5158 if (!np->rx_skb || !np->tx_skb)
5159 goto out_freering;
5161 dev->open = nv_open;
5162 dev->stop = nv_close;
5163 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
5164 dev->hard_start_xmit = nv_start_xmit;
5165 else
5166 dev->hard_start_xmit = nv_start_xmit_optimized;
5167 dev->get_stats = nv_get_stats;
5168 dev->change_mtu = nv_change_mtu;
5169 dev->set_mac_address = nv_set_mac_address;
5170 dev->set_multicast_list = nv_set_multicast;
5171 #ifdef CONFIG_NET_POLL_CONTROLLER
5172 dev->poll_controller = nv_poll_controller;
5173 #endif
5174 #ifdef CONFIG_FORCEDETH_NAPI
5175 netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP);
5176 #endif
5177 SET_ETHTOOL_OPS(dev, &ops);
5178 dev->tx_timeout = nv_tx_timeout;
5179 dev->watchdog_timeo = NV_WATCHDOG_TIMEO;
5181 pci_set_drvdata(pci_dev, dev);
5183 /* read the mac address */
5184 base = get_hwbase(dev);
5185 np->orig_mac[0] = readl(base + NvRegMacAddrA);
5186 np->orig_mac[1] = readl(base + NvRegMacAddrB);
5188 /* check the workaround bit for correct mac address order */
5189 txreg = readl(base + NvRegTransmitPoll);
5190 if ((txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV) ||
5191 (id->driver_data & DEV_HAS_CORRECT_MACADDR)) {
5192 /* mac address is already in correct order */
5193 dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff;
5194 dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff;
5195 dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff;
5196 dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff;
5197 dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff;
5198 dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff;
5199 } else {
5200 /* need to reverse mac address to correct order */
5201 dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff;
5202 dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff;
5203 dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff;
5204 dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff;
5205 dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff;
5206 dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff;
5207 writel(txreg|NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
5209 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
5211 if (!is_valid_ether_addr(dev->perm_addr)) {
5213 * Bad mac address. At least one bios sets the mac address
5214 * to 01:23:45:67:89:ab
5216 dev_printk(KERN_ERR, &pci_dev->dev,
5217 "Invalid Mac address detected: %s\n",
5218 print_mac(mac, dev->dev_addr));
5219 dev_printk(KERN_ERR, &pci_dev->dev,
5220 "Please complain to your hardware vendor. Switching to a random MAC.\n");
5221 dev->dev_addr[0] = 0x00;
5222 dev->dev_addr[1] = 0x00;
5223 dev->dev_addr[2] = 0x6c;
5224 get_random_bytes(&dev->dev_addr[3], 3);
5227 dprintk(KERN_DEBUG "%s: MAC Address %s\n",
5228 pci_name(pci_dev), print_mac(mac, dev->dev_addr));
5230 /* set mac address */
5231 nv_copy_mac_to_hw(dev);
5233 /* disable WOL */
5234 writel(0, base + NvRegWakeUpFlags);
5235 np->wolenabled = 0;
5237 if (id->driver_data & DEV_HAS_POWER_CNTRL) {
5239 /* take phy and nic out of low power mode */
5240 powerstate = readl(base + NvRegPowerState2);
5241 powerstate &= ~NVREG_POWERSTATE2_POWERUP_MASK;
5242 if ((id->device == PCI_DEVICE_ID_NVIDIA_NVENET_12 ||
5243 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_13) &&
5244 pci_dev->revision >= 0xA3)
5245 powerstate |= NVREG_POWERSTATE2_POWERUP_REV_A3;
5246 writel(powerstate, base + NvRegPowerState2);
5249 if (np->desc_ver == DESC_VER_1) {
5250 np->tx_flags = NV_TX_VALID;
5251 } else {
5252 np->tx_flags = NV_TX2_VALID;
5254 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) {
5255 np->irqmask = NVREG_IRQMASK_THROUGHPUT;
5256 if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */
5257 np->msi_flags |= 0x0003;
5258 } else {
5259 np->irqmask = NVREG_IRQMASK_CPU;
5260 if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */
5261 np->msi_flags |= 0x0001;
5264 if (id->driver_data & DEV_NEED_TIMERIRQ)
5265 np->irqmask |= NVREG_IRQ_TIMER;
5266 if (id->driver_data & DEV_NEED_LINKTIMER) {
5267 dprintk(KERN_INFO "%s: link timer on.\n", pci_name(pci_dev));
5268 np->need_linktimer = 1;
5269 np->link_timeout = jiffies + LINK_TIMEOUT;
5270 } else {
5271 dprintk(KERN_INFO "%s: link timer off.\n", pci_name(pci_dev));
5272 np->need_linktimer = 0;
5275 /* clear phy state and temporarily halt phy interrupts */
5276 writel(0, base + NvRegMIIMask);
5277 phystate = readl(base + NvRegAdapterControl);
5278 if (phystate & NVREG_ADAPTCTL_RUNNING) {
5279 phystate_orig = 1;
5280 phystate &= ~NVREG_ADAPTCTL_RUNNING;
5281 writel(phystate, base + NvRegAdapterControl);
5283 writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
5285 if (id->driver_data & DEV_HAS_MGMT_UNIT) {
5286 /* management unit running on the mac? */
5287 if (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_PHY_INIT) {
5288 np->mac_in_use = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_ST;
5289 dprintk(KERN_INFO "%s: mgmt unit is running. mac in use %x.\n", pci_name(pci_dev), np->mac_in_use);
5290 if (nv_mgmt_acquire_sema(dev)) {
5291 /* management unit setup the phy already? */
5292 if ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) ==
5293 NVREG_XMITCTL_SYNC_PHY_INIT) {
5294 /* phy is inited by mgmt unit */
5295 phyinitialized = 1;
5296 dprintk(KERN_INFO "%s: Phy already initialized by mgmt unit.\n", pci_name(pci_dev));
5297 } else {
5298 /* we need to init the phy */
5304 /* find a suitable phy */
5305 for (i = 1; i <= 32; i++) {
5306 int id1, id2;
5307 int phyaddr = i & 0x1F;
5309 spin_lock_irq(&np->lock);
5310 id1 = mii_rw(dev, phyaddr, MII_PHYSID1, MII_READ);
5311 spin_unlock_irq(&np->lock);
5312 if (id1 < 0 || id1 == 0xffff)
5313 continue;
5314 spin_lock_irq(&np->lock);
5315 id2 = mii_rw(dev, phyaddr, MII_PHYSID2, MII_READ);
5316 spin_unlock_irq(&np->lock);
5317 if (id2 < 0 || id2 == 0xffff)
5318 continue;
5320 np->phy_model = id2 & PHYID2_MODEL_MASK;
5321 id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT;
5322 id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT;
5323 dprintk(KERN_DEBUG "%s: open: Found PHY %04x:%04x at address %d.\n",
5324 pci_name(pci_dev), id1, id2, phyaddr);
5325 np->phyaddr = phyaddr;
5326 np->phy_oui = id1 | id2;
5327 break;
5329 if (i == 33) {
5330 dev_printk(KERN_INFO, &pci_dev->dev,
5331 "open: Could not find a valid PHY.\n");
5332 goto out_error;
5335 if (!phyinitialized) {
5336 /* reset it */
5337 phy_init(dev);
5338 } else {
5339 /* see if it is a gigabit phy */
5340 u32 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
5341 if (mii_status & PHY_GIGABIT) {
5342 np->gigabit = PHY_GIGABIT;
5346 /* set default link speed settings */
5347 np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
5348 np->duplex = 0;
5349 np->autoneg = 1;
5351 err = register_netdev(dev);
5352 if (err) {
5353 dev_printk(KERN_INFO, &pci_dev->dev,
5354 "unable to register netdev: %d\n", err);
5355 goto out_error;
5358 dev_printk(KERN_INFO, &pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, "
5359 "addr %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
5360 dev->name,
5361 np->phy_oui,
5362 np->phyaddr,
5363 dev->dev_addr[0],
5364 dev->dev_addr[1],
5365 dev->dev_addr[2],
5366 dev->dev_addr[3],
5367 dev->dev_addr[4],
5368 dev->dev_addr[5]);
5370 dev_printk(KERN_INFO, &pci_dev->dev, "%s%s%s%s%s%s%s%s%s%sdesc-v%u\n",
5371 dev->features & NETIF_F_HIGHDMA ? "highdma " : "",
5372 dev->features & (NETIF_F_HW_CSUM | NETIF_F_SG) ?
5373 "csum " : "",
5374 dev->features & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX) ?
5375 "vlan " : "",
5376 id->driver_data & DEV_HAS_POWER_CNTRL ? "pwrctl " : "",
5377 id->driver_data & DEV_HAS_MGMT_UNIT ? "mgmt " : "",
5378 id->driver_data & DEV_NEED_TIMERIRQ ? "timirq " : "",
5379 np->gigabit == PHY_GIGABIT ? "gbit " : "",
5380 np->need_linktimer ? "lnktim " : "",
5381 np->msi_flags & NV_MSI_CAPABLE ? "msi " : "",
5382 np->msi_flags & NV_MSI_X_CAPABLE ? "msi-x " : "",
5383 np->desc_ver);
5385 return 0;
5387 out_error:
5388 if (phystate_orig)
5389 writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl);
5390 pci_set_drvdata(pci_dev, NULL);
5391 out_freering:
5392 free_rings(dev);
5393 out_unmap:
5394 iounmap(get_hwbase(dev));
5395 out_relreg:
5396 pci_release_regions(pci_dev);
5397 out_disable:
5398 pci_disable_device(pci_dev);
5399 out_free:
5400 free_netdev(dev);
5401 out:
5402 return err;
5405 static void __devexit nv_remove(struct pci_dev *pci_dev)
5407 struct net_device *dev = pci_get_drvdata(pci_dev);
5408 struct fe_priv *np = netdev_priv(dev);
5409 u8 __iomem *base = get_hwbase(dev);
5411 unregister_netdev(dev);
5413 /* special op: write back the misordered MAC address - otherwise
5414 * the next nv_probe would see a wrong address.
5416 writel(np->orig_mac[0], base + NvRegMacAddrA);
5417 writel(np->orig_mac[1], base + NvRegMacAddrB);
5418 writel(readl(base + NvRegTransmitPoll) & ~NVREG_TRANSMITPOLL_MAC_ADDR_REV,
5419 base + NvRegTransmitPoll);
5421 /* free all structures */
5422 free_rings(dev);
5423 iounmap(get_hwbase(dev));
5424 pci_release_regions(pci_dev);
5425 pci_disable_device(pci_dev);
5426 free_netdev(dev);
5427 pci_set_drvdata(pci_dev, NULL);
5430 #ifdef CONFIG_PM
5431 static int nv_suspend(struct pci_dev *pdev, pm_message_t state)
5433 struct net_device *dev = pci_get_drvdata(pdev);
5434 struct fe_priv *np = netdev_priv(dev);
5436 if (!netif_running(dev))
5437 goto out;
5439 netif_device_detach(dev);
5441 // Gross.
5442 nv_close(dev);
5444 pci_save_state(pdev);
5445 pci_enable_wake(pdev, pci_choose_state(pdev, state), np->wolenabled);
5446 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5447 out:
5448 return 0;
5451 static int nv_resume(struct pci_dev *pdev)
5453 struct net_device *dev = pci_get_drvdata(pdev);
5454 int rc = 0;
5456 if (!netif_running(dev))
5457 goto out;
5459 netif_device_attach(dev);
5461 pci_set_power_state(pdev, PCI_D0);
5462 pci_restore_state(pdev);
5463 pci_enable_wake(pdev, PCI_D0, 0);
5465 rc = nv_open(dev);
5466 out:
5467 return rc;
5469 #else
5470 #define nv_suspend NULL
5471 #define nv_resume NULL
5472 #endif /* CONFIG_PM */
5474 static struct pci_device_id pci_tbl[] = {
5475 { /* nForce Ethernet Controller */
5476 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_1),
5477 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
5479 { /* nForce2 Ethernet Controller */
5480 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_2),
5481 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
5483 { /* nForce3 Ethernet Controller */
5484 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_3),
5485 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
5487 { /* nForce3 Ethernet Controller */
5488 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_4),
5489 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
5491 { /* nForce3 Ethernet Controller */
5492 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_5),
5493 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
5495 { /* nForce3 Ethernet Controller */
5496 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_6),
5497 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
5499 { /* nForce3 Ethernet Controller */
5500 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_7),
5501 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
5503 { /* CK804 Ethernet Controller */
5504 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_8),
5505 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1,
5507 { /* CK804 Ethernet Controller */
5508 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_9),
5509 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1,
5511 { /* MCP04 Ethernet Controller */
5512 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_10),
5513 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1,
5515 { /* MCP04 Ethernet Controller */
5516 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_11),
5517 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1,
5519 { /* MCP51 Ethernet Controller */
5520 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_12),
5521 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1,
5523 { /* MCP51 Ethernet Controller */
5524 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_13),
5525 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1,
5527 { /* MCP55 Ethernet Controller */
5528 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14),
5529 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
5531 { /* MCP55 Ethernet Controller */
5532 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15),
5533 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
5535 { /* MCP61 Ethernet Controller */
5536 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_16),
5537 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5539 { /* MCP61 Ethernet Controller */
5540 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_17),
5541 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5543 { /* MCP61 Ethernet Controller */
5544 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_18),
5545 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5547 { /* MCP61 Ethernet Controller */
5548 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_19),
5549 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5551 { /* MCP65 Ethernet Controller */
5552 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_20),
5553 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5555 { /* MCP65 Ethernet Controller */
5556 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_21),
5557 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5559 { /* MCP65 Ethernet Controller */
5560 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_22),
5561 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5563 { /* MCP65 Ethernet Controller */
5564 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_23),
5565 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5567 { /* MCP67 Ethernet Controller */
5568 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_24),
5569 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5571 { /* MCP67 Ethernet Controller */
5572 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_25),
5573 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5575 { /* MCP67 Ethernet Controller */
5576 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_26),
5577 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5579 { /* MCP67 Ethernet Controller */
5580 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_27),
5581 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5583 { /* MCP73 Ethernet Controller */
5584 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_28),
5585 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5587 { /* MCP73 Ethernet Controller */
5588 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_29),
5589 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5591 { /* MCP73 Ethernet Controller */
5592 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_30),
5593 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5595 { /* MCP73 Ethernet Controller */
5596 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_31),
5597 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5599 { /* MCP77 Ethernet Controller */
5600 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_32),
5601 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5603 { /* MCP77 Ethernet Controller */
5604 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_33),
5605 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5607 { /* MCP77 Ethernet Controller */
5608 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_34),
5609 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5611 { /* MCP77 Ethernet Controller */
5612 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_35),
5613 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5615 { /* MCP79 Ethernet Controller */
5616 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_36),
5617 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5619 { /* MCP79 Ethernet Controller */
5620 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_37),
5621 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5623 { /* MCP79 Ethernet Controller */
5624 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_38),
5625 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5627 { /* MCP79 Ethernet Controller */
5628 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_39),
5629 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5631 {0,},
5634 static struct pci_driver driver = {
5635 .name = DRV_NAME,
5636 .id_table = pci_tbl,
5637 .probe = nv_probe,
5638 .remove = __devexit_p(nv_remove),
5639 .suspend = nv_suspend,
5640 .resume = nv_resume,
5643 static int __init init_nic(void)
5645 return pci_register_driver(&driver);
5648 static void __exit exit_nic(void)
5650 pci_unregister_driver(&driver);
5653 module_param(max_interrupt_work, int, 0);
5654 MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt");
5655 module_param(optimization_mode, int, 0);
5656 MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer.");
5657 module_param(poll_interval, int, 0);
5658 MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535.");
5659 module_param(msi, int, 0);
5660 MODULE_PARM_DESC(msi, "MSI interrupts are enabled by setting to 1 and disabled by setting to 0.");
5661 module_param(msix, int, 0);
5662 MODULE_PARM_DESC(msix, "MSIX interrupts are enabled by setting to 1 and disabled by setting to 0.");
5663 module_param(dma_64bit, int, 0);
5664 MODULE_PARM_DESC(dma_64bit, "High DMA is enabled by setting to 1 and disabled by setting to 0.");
5666 MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>");
5667 MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");
5668 MODULE_LICENSE("GPL");
5670 MODULE_DEVICE_TABLE(pci, pci_tbl);
5672 module_init(init_nic);
5673 module_exit(exit_nic);