befs: annotate fs32 on tests for superblock endianness
[linux-2.6/mini2440.git] / drivers / net / forcedeth.c
blobcc7328b1552136f0fa7c8e8f5587b61e110f216b
1 /*
2 * forcedeth: Ethernet driver for NVIDIA nForce media access controllers.
4 * Note: This driver is a cleanroom reimplementation based on reverse
5 * engineered documentation written by Carl-Daniel Hailfinger
6 * and Andrew de Quincey.
8 * NVIDIA, nForce and other NVIDIA marks are trademarks or registered
9 * trademarks of NVIDIA Corporation in the United States and other
10 * countries.
12 * Copyright (C) 2003,4,5 Manfred Spraul
13 * Copyright (C) 2004 Andrew de Quincey (wol support)
14 * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane
15 * IRQ rate fixes, bigendian fixes, cleanups, verification)
16 * Copyright (c) 2004,2005,2006,2007,2008 NVIDIA Corporation
18 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License as published by
20 * the Free Software Foundation; either version 2 of the License, or
21 * (at your option) any later version.
23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * GNU General Public License for more details.
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, write to the Free Software
30 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
32 * Known bugs:
33 * We suspect that on some hardware no TX done interrupts are generated.
34 * This means recovery from netif_stop_queue only happens if the hw timer
35 * interrupt fires (100 times/second, configurable with NVREG_POLL_DEFAULT)
36 * and the timer is active in the IRQMask, or if a rx packet arrives by chance.
37 * If your hardware reliably generates tx done interrupts, then you can remove
38 * DEV_NEED_TIMERIRQ from the driver_data flags.
39 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
40 * superfluous timer interrupts from the nic.
42 #define FORCEDETH_VERSION "0.61"
43 #define DRV_NAME "forcedeth"
45 #include <linux/module.h>
46 #include <linux/types.h>
47 #include <linux/pci.h>
48 #include <linux/interrupt.h>
49 #include <linux/netdevice.h>
50 #include <linux/etherdevice.h>
51 #include <linux/delay.h>
52 #include <linux/spinlock.h>
53 #include <linux/ethtool.h>
54 #include <linux/timer.h>
55 #include <linux/skbuff.h>
56 #include <linux/mii.h>
57 #include <linux/random.h>
58 #include <linux/init.h>
59 #include <linux/if_vlan.h>
60 #include <linux/dma-mapping.h>
62 #include <asm/irq.h>
63 #include <asm/io.h>
64 #include <asm/uaccess.h>
65 #include <asm/system.h>
67 #if 0
68 #define dprintk printk
69 #else
70 #define dprintk(x...) do { } while (0)
71 #endif
73 #define TX_WORK_PER_LOOP 64
74 #define RX_WORK_PER_LOOP 64
77 * Hardware access:
80 #define DEV_NEED_TIMERIRQ 0x000001 /* set the timer irq flag in the irq mask */
81 #define DEV_NEED_LINKTIMER 0x000002 /* poll link settings. Relies on the timer irq */
82 #define DEV_HAS_LARGEDESC 0x000004 /* device supports jumbo frames and needs packet format 2 */
83 #define DEV_HAS_HIGH_DMA 0x000008 /* device supports 64bit dma */
84 #define DEV_HAS_CHECKSUM 0x000010 /* device supports tx and rx checksum offloads */
85 #define DEV_HAS_VLAN 0x000020 /* device supports vlan tagging and striping */
86 #define DEV_HAS_MSI 0x000040 /* device supports MSI */
87 #define DEV_HAS_MSI_X 0x000080 /* device supports MSI-X */
88 #define DEV_HAS_POWER_CNTRL 0x000100 /* device supports power savings */
89 #define DEV_HAS_STATISTICS_V1 0x000200 /* device supports hw statistics version 1 */
90 #define DEV_HAS_STATISTICS_V2 0x000400 /* device supports hw statistics version 2 */
91 #define DEV_HAS_STATISTICS_V3 0x000800 /* device supports hw statistics version 3 */
92 #define DEV_HAS_TEST_EXTENDED 0x001000 /* device supports extended diagnostic test */
93 #define DEV_HAS_MGMT_UNIT 0x002000 /* device supports management unit */
94 #define DEV_HAS_CORRECT_MACADDR 0x004000 /* device supports correct mac address order */
95 #define DEV_HAS_COLLISION_FIX 0x008000 /* device supports tx collision fix */
96 #define DEV_HAS_PAUSEFRAME_TX_V1 0x010000 /* device supports tx pause frames version 1 */
97 #define DEV_HAS_PAUSEFRAME_TX_V2 0x020000 /* device supports tx pause frames version 2 */
98 #define DEV_HAS_PAUSEFRAME_TX_V3 0x040000 /* device supports tx pause frames version 3 */
99 #define DEV_NEED_TX_LIMIT 0x080000 /* device needs to limit tx */
100 #define DEV_HAS_GEAR_MODE 0x100000 /* device supports gear mode */
102 enum {
103 NvRegIrqStatus = 0x000,
104 #define NVREG_IRQSTAT_MIIEVENT 0x040
105 #define NVREG_IRQSTAT_MASK 0x81ff
106 NvRegIrqMask = 0x004,
107 #define NVREG_IRQ_RX_ERROR 0x0001
108 #define NVREG_IRQ_RX 0x0002
109 #define NVREG_IRQ_RX_NOBUF 0x0004
110 #define NVREG_IRQ_TX_ERR 0x0008
111 #define NVREG_IRQ_TX_OK 0x0010
112 #define NVREG_IRQ_TIMER 0x0020
113 #define NVREG_IRQ_LINK 0x0040
114 #define NVREG_IRQ_RX_FORCED 0x0080
115 #define NVREG_IRQ_TX_FORCED 0x0100
116 #define NVREG_IRQ_RECOVER_ERROR 0x8000
117 #define NVREG_IRQMASK_THROUGHPUT 0x00df
118 #define NVREG_IRQMASK_CPU 0x0060
119 #define NVREG_IRQ_TX_ALL (NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED)
120 #define NVREG_IRQ_RX_ALL (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED)
121 #define NVREG_IRQ_OTHER (NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RECOVER_ERROR)
123 #define NVREG_IRQ_UNKNOWN (~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR| \
124 NVREG_IRQ_TX_OK|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RX_FORCED| \
125 NVREG_IRQ_TX_FORCED|NVREG_IRQ_RECOVER_ERROR))
127 NvRegUnknownSetupReg6 = 0x008,
128 #define NVREG_UNKSETUP6_VAL 3
131 * NVREG_POLL_DEFAULT is the interval length of the timer source on the nic
132 * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms
134 NvRegPollingInterval = 0x00c,
135 #define NVREG_POLL_DEFAULT_THROUGHPUT 970 /* backup tx cleanup if loop max reached */
136 #define NVREG_POLL_DEFAULT_CPU 13
137 NvRegMSIMap0 = 0x020,
138 NvRegMSIMap1 = 0x024,
139 NvRegMSIIrqMask = 0x030,
140 #define NVREG_MSI_VECTOR_0_ENABLED 0x01
141 NvRegMisc1 = 0x080,
142 #define NVREG_MISC1_PAUSE_TX 0x01
143 #define NVREG_MISC1_HD 0x02
144 #define NVREG_MISC1_FORCE 0x3b0f3c
146 NvRegMacReset = 0x34,
147 #define NVREG_MAC_RESET_ASSERT 0x0F3
148 NvRegTransmitterControl = 0x084,
149 #define NVREG_XMITCTL_START 0x01
150 #define NVREG_XMITCTL_MGMT_ST 0x40000000
151 #define NVREG_XMITCTL_SYNC_MASK 0x000f0000
152 #define NVREG_XMITCTL_SYNC_NOT_READY 0x0
153 #define NVREG_XMITCTL_SYNC_PHY_INIT 0x00040000
154 #define NVREG_XMITCTL_MGMT_SEMA_MASK 0x00000f00
155 #define NVREG_XMITCTL_MGMT_SEMA_FREE 0x0
156 #define NVREG_XMITCTL_HOST_SEMA_MASK 0x0000f000
157 #define NVREG_XMITCTL_HOST_SEMA_ACQ 0x0000f000
158 #define NVREG_XMITCTL_HOST_LOADED 0x00004000
159 #define NVREG_XMITCTL_TX_PATH_EN 0x01000000
160 NvRegTransmitterStatus = 0x088,
161 #define NVREG_XMITSTAT_BUSY 0x01
163 NvRegPacketFilterFlags = 0x8c,
164 #define NVREG_PFF_PAUSE_RX 0x08
165 #define NVREG_PFF_ALWAYS 0x7F0000
166 #define NVREG_PFF_PROMISC 0x80
167 #define NVREG_PFF_MYADDR 0x20
168 #define NVREG_PFF_LOOPBACK 0x10
170 NvRegOffloadConfig = 0x90,
171 #define NVREG_OFFLOAD_HOMEPHY 0x601
172 #define NVREG_OFFLOAD_NORMAL RX_NIC_BUFSIZE
173 NvRegReceiverControl = 0x094,
174 #define NVREG_RCVCTL_START 0x01
175 #define NVREG_RCVCTL_RX_PATH_EN 0x01000000
176 NvRegReceiverStatus = 0x98,
177 #define NVREG_RCVSTAT_BUSY 0x01
179 NvRegSlotTime = 0x9c,
180 #define NVREG_SLOTTIME_LEGBF_ENABLED 0x80000000
181 #define NVREG_SLOTTIME_10_100_FULL 0x00007f00
182 #define NVREG_SLOTTIME_1000_FULL 0x0003ff00
183 #define NVREG_SLOTTIME_HALF 0x0000ff00
184 #define NVREG_SLOTTIME_DEFAULT 0x00007f00
185 #define NVREG_SLOTTIME_MASK 0x000000ff
187 NvRegTxDeferral = 0xA0,
188 #define NVREG_TX_DEFERRAL_DEFAULT 0x15050f
189 #define NVREG_TX_DEFERRAL_RGMII_10_100 0x16070f
190 #define NVREG_TX_DEFERRAL_RGMII_1000 0x14050f
191 #define NVREG_TX_DEFERRAL_RGMII_STRETCH_10 0x16190f
192 #define NVREG_TX_DEFERRAL_RGMII_STRETCH_100 0x16300f
193 #define NVREG_TX_DEFERRAL_MII_STRETCH 0x152000
194 NvRegRxDeferral = 0xA4,
195 #define NVREG_RX_DEFERRAL_DEFAULT 0x16
196 NvRegMacAddrA = 0xA8,
197 NvRegMacAddrB = 0xAC,
198 NvRegMulticastAddrA = 0xB0,
199 #define NVREG_MCASTADDRA_FORCE 0x01
200 NvRegMulticastAddrB = 0xB4,
201 NvRegMulticastMaskA = 0xB8,
202 #define NVREG_MCASTMASKA_NONE 0xffffffff
203 NvRegMulticastMaskB = 0xBC,
204 #define NVREG_MCASTMASKB_NONE 0xffff
206 NvRegPhyInterface = 0xC0,
207 #define PHY_RGMII 0x10000000
208 NvRegBackOffControl = 0xC4,
209 #define NVREG_BKOFFCTRL_DEFAULT 0x70000000
210 #define NVREG_BKOFFCTRL_SEED_MASK 0x000003ff
211 #define NVREG_BKOFFCTRL_SELECT 24
212 #define NVREG_BKOFFCTRL_GEAR 12
214 NvRegTxRingPhysAddr = 0x100,
215 NvRegRxRingPhysAddr = 0x104,
216 NvRegRingSizes = 0x108,
217 #define NVREG_RINGSZ_TXSHIFT 0
218 #define NVREG_RINGSZ_RXSHIFT 16
219 NvRegTransmitPoll = 0x10c,
220 #define NVREG_TRANSMITPOLL_MAC_ADDR_REV 0x00008000
221 NvRegLinkSpeed = 0x110,
222 #define NVREG_LINKSPEED_FORCE 0x10000
223 #define NVREG_LINKSPEED_10 1000
224 #define NVREG_LINKSPEED_100 100
225 #define NVREG_LINKSPEED_1000 50
226 #define NVREG_LINKSPEED_MASK (0xFFF)
227 NvRegUnknownSetupReg5 = 0x130,
228 #define NVREG_UNKSETUP5_BIT31 (1<<31)
229 NvRegTxWatermark = 0x13c,
230 #define NVREG_TX_WM_DESC1_DEFAULT 0x0200010
231 #define NVREG_TX_WM_DESC2_3_DEFAULT 0x1e08000
232 #define NVREG_TX_WM_DESC2_3_1000 0xfe08000
233 NvRegTxRxControl = 0x144,
234 #define NVREG_TXRXCTL_KICK 0x0001
235 #define NVREG_TXRXCTL_BIT1 0x0002
236 #define NVREG_TXRXCTL_BIT2 0x0004
237 #define NVREG_TXRXCTL_IDLE 0x0008
238 #define NVREG_TXRXCTL_RESET 0x0010
239 #define NVREG_TXRXCTL_RXCHECK 0x0400
240 #define NVREG_TXRXCTL_DESC_1 0
241 #define NVREG_TXRXCTL_DESC_2 0x002100
242 #define NVREG_TXRXCTL_DESC_3 0xc02200
243 #define NVREG_TXRXCTL_VLANSTRIP 0x00040
244 #define NVREG_TXRXCTL_VLANINS 0x00080
245 NvRegTxRingPhysAddrHigh = 0x148,
246 NvRegRxRingPhysAddrHigh = 0x14C,
247 NvRegTxPauseFrame = 0x170,
248 #define NVREG_TX_PAUSEFRAME_DISABLE 0x0fff0080
249 #define NVREG_TX_PAUSEFRAME_ENABLE_V1 0x01800010
250 #define NVREG_TX_PAUSEFRAME_ENABLE_V2 0x056003f0
251 #define NVREG_TX_PAUSEFRAME_ENABLE_V3 0x09f00880
252 NvRegTxPauseFrameLimit = 0x174,
253 #define NVREG_TX_PAUSEFRAMELIMIT_ENABLE 0x00010000
254 NvRegMIIStatus = 0x180,
255 #define NVREG_MIISTAT_ERROR 0x0001
256 #define NVREG_MIISTAT_LINKCHANGE 0x0008
257 #define NVREG_MIISTAT_MASK_RW 0x0007
258 #define NVREG_MIISTAT_MASK_ALL 0x000f
259 NvRegMIIMask = 0x184,
260 #define NVREG_MII_LINKCHANGE 0x0008
262 NvRegAdapterControl = 0x188,
263 #define NVREG_ADAPTCTL_START 0x02
264 #define NVREG_ADAPTCTL_LINKUP 0x04
265 #define NVREG_ADAPTCTL_PHYVALID 0x40000
266 #define NVREG_ADAPTCTL_RUNNING 0x100000
267 #define NVREG_ADAPTCTL_PHYSHIFT 24
268 NvRegMIISpeed = 0x18c,
269 #define NVREG_MIISPEED_BIT8 (1<<8)
270 #define NVREG_MIIDELAY 5
271 NvRegMIIControl = 0x190,
272 #define NVREG_MIICTL_INUSE 0x08000
273 #define NVREG_MIICTL_WRITE 0x00400
274 #define NVREG_MIICTL_ADDRSHIFT 5
275 NvRegMIIData = 0x194,
276 NvRegTxUnicast = 0x1a0,
277 NvRegTxMulticast = 0x1a4,
278 NvRegTxBroadcast = 0x1a8,
279 NvRegWakeUpFlags = 0x200,
280 #define NVREG_WAKEUPFLAGS_VAL 0x7770
281 #define NVREG_WAKEUPFLAGS_BUSYSHIFT 24
282 #define NVREG_WAKEUPFLAGS_ENABLESHIFT 16
283 #define NVREG_WAKEUPFLAGS_D3SHIFT 12
284 #define NVREG_WAKEUPFLAGS_D2SHIFT 8
285 #define NVREG_WAKEUPFLAGS_D1SHIFT 4
286 #define NVREG_WAKEUPFLAGS_D0SHIFT 0
287 #define NVREG_WAKEUPFLAGS_ACCEPT_MAGPAT 0x01
288 #define NVREG_WAKEUPFLAGS_ACCEPT_WAKEUPPAT 0x02
289 #define NVREG_WAKEUPFLAGS_ACCEPT_LINKCHANGE 0x04
290 #define NVREG_WAKEUPFLAGS_ENABLE 0x1111
292 NvRegPatternCRC = 0x204,
293 NvRegPatternMask = 0x208,
294 NvRegPowerCap = 0x268,
295 #define NVREG_POWERCAP_D3SUPP (1<<30)
296 #define NVREG_POWERCAP_D2SUPP (1<<26)
297 #define NVREG_POWERCAP_D1SUPP (1<<25)
298 NvRegPowerState = 0x26c,
299 #define NVREG_POWERSTATE_POWEREDUP 0x8000
300 #define NVREG_POWERSTATE_VALID 0x0100
301 #define NVREG_POWERSTATE_MASK 0x0003
302 #define NVREG_POWERSTATE_D0 0x0000
303 #define NVREG_POWERSTATE_D1 0x0001
304 #define NVREG_POWERSTATE_D2 0x0002
305 #define NVREG_POWERSTATE_D3 0x0003
306 NvRegTxCnt = 0x280,
307 NvRegTxZeroReXmt = 0x284,
308 NvRegTxOneReXmt = 0x288,
309 NvRegTxManyReXmt = 0x28c,
310 NvRegTxLateCol = 0x290,
311 NvRegTxUnderflow = 0x294,
312 NvRegTxLossCarrier = 0x298,
313 NvRegTxExcessDef = 0x29c,
314 NvRegTxRetryErr = 0x2a0,
315 NvRegRxFrameErr = 0x2a4,
316 NvRegRxExtraByte = 0x2a8,
317 NvRegRxLateCol = 0x2ac,
318 NvRegRxRunt = 0x2b0,
319 NvRegRxFrameTooLong = 0x2b4,
320 NvRegRxOverflow = 0x2b8,
321 NvRegRxFCSErr = 0x2bc,
322 NvRegRxFrameAlignErr = 0x2c0,
323 NvRegRxLenErr = 0x2c4,
324 NvRegRxUnicast = 0x2c8,
325 NvRegRxMulticast = 0x2cc,
326 NvRegRxBroadcast = 0x2d0,
327 NvRegTxDef = 0x2d4,
328 NvRegTxFrame = 0x2d8,
329 NvRegRxCnt = 0x2dc,
330 NvRegTxPause = 0x2e0,
331 NvRegRxPause = 0x2e4,
332 NvRegRxDropFrame = 0x2e8,
333 NvRegVlanControl = 0x300,
334 #define NVREG_VLANCONTROL_ENABLE 0x2000
335 NvRegMSIXMap0 = 0x3e0,
336 NvRegMSIXMap1 = 0x3e4,
337 NvRegMSIXIrqStatus = 0x3f0,
339 NvRegPowerState2 = 0x600,
340 #define NVREG_POWERSTATE2_POWERUP_MASK 0x0F15
341 #define NVREG_POWERSTATE2_POWERUP_REV_A3 0x0001
342 #define NVREG_POWERSTATE2_PHY_RESET 0x0004
345 /* Big endian: should work, but is untested */
346 struct ring_desc {
347 __le32 buf;
348 __le32 flaglen;
351 struct ring_desc_ex {
352 __le32 bufhigh;
353 __le32 buflow;
354 __le32 txvlan;
355 __le32 flaglen;
358 union ring_type {
359 struct ring_desc* orig;
360 struct ring_desc_ex* ex;
363 #define FLAG_MASK_V1 0xffff0000
364 #define FLAG_MASK_V2 0xffffc000
365 #define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1)
366 #define LEN_MASK_V2 (0xffffffff ^ FLAG_MASK_V2)
368 #define NV_TX_LASTPACKET (1<<16)
369 #define NV_TX_RETRYERROR (1<<19)
370 #define NV_TX_RETRYCOUNT_MASK (0xF<<20)
371 #define NV_TX_FORCED_INTERRUPT (1<<24)
372 #define NV_TX_DEFERRED (1<<26)
373 #define NV_TX_CARRIERLOST (1<<27)
374 #define NV_TX_LATECOLLISION (1<<28)
375 #define NV_TX_UNDERFLOW (1<<29)
376 #define NV_TX_ERROR (1<<30)
377 #define NV_TX_VALID (1<<31)
379 #define NV_TX2_LASTPACKET (1<<29)
380 #define NV_TX2_RETRYERROR (1<<18)
381 #define NV_TX2_RETRYCOUNT_MASK (0xF<<19)
382 #define NV_TX2_FORCED_INTERRUPT (1<<30)
383 #define NV_TX2_DEFERRED (1<<25)
384 #define NV_TX2_CARRIERLOST (1<<26)
385 #define NV_TX2_LATECOLLISION (1<<27)
386 #define NV_TX2_UNDERFLOW (1<<28)
387 /* error and valid are the same for both */
388 #define NV_TX2_ERROR (1<<30)
389 #define NV_TX2_VALID (1<<31)
390 #define NV_TX2_TSO (1<<28)
391 #define NV_TX2_TSO_SHIFT 14
392 #define NV_TX2_TSO_MAX_SHIFT 14
393 #define NV_TX2_TSO_MAX_SIZE (1<<NV_TX2_TSO_MAX_SHIFT)
394 #define NV_TX2_CHECKSUM_L3 (1<<27)
395 #define NV_TX2_CHECKSUM_L4 (1<<26)
397 #define NV_TX3_VLAN_TAG_PRESENT (1<<18)
399 #define NV_RX_DESCRIPTORVALID (1<<16)
400 #define NV_RX_MISSEDFRAME (1<<17)
401 #define NV_RX_SUBSTRACT1 (1<<18)
402 #define NV_RX_ERROR1 (1<<23)
403 #define NV_RX_ERROR2 (1<<24)
404 #define NV_RX_ERROR3 (1<<25)
405 #define NV_RX_ERROR4 (1<<26)
406 #define NV_RX_CRCERR (1<<27)
407 #define NV_RX_OVERFLOW (1<<28)
408 #define NV_RX_FRAMINGERR (1<<29)
409 #define NV_RX_ERROR (1<<30)
410 #define NV_RX_AVAIL (1<<31)
411 #define NV_RX_ERROR_MASK (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3|NV_RX_ERROR4|NV_RX_CRCERR|NV_RX_OVERFLOW|NV_RX_FRAMINGERR)
413 #define NV_RX2_CHECKSUMMASK (0x1C000000)
414 #define NV_RX2_CHECKSUM_IP (0x10000000)
415 #define NV_RX2_CHECKSUM_IP_TCP (0x14000000)
416 #define NV_RX2_CHECKSUM_IP_UDP (0x18000000)
417 #define NV_RX2_DESCRIPTORVALID (1<<29)
418 #define NV_RX2_SUBSTRACT1 (1<<25)
419 #define NV_RX2_ERROR1 (1<<18)
420 #define NV_RX2_ERROR2 (1<<19)
421 #define NV_RX2_ERROR3 (1<<20)
422 #define NV_RX2_ERROR4 (1<<21)
423 #define NV_RX2_CRCERR (1<<22)
424 #define NV_RX2_OVERFLOW (1<<23)
425 #define NV_RX2_FRAMINGERR (1<<24)
426 /* error and avail are the same for both */
427 #define NV_RX2_ERROR (1<<30)
428 #define NV_RX2_AVAIL (1<<31)
429 #define NV_RX2_ERROR_MASK (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3|NV_RX2_ERROR4|NV_RX2_CRCERR|NV_RX2_OVERFLOW|NV_RX2_FRAMINGERR)
431 #define NV_RX3_VLAN_TAG_PRESENT (1<<16)
432 #define NV_RX3_VLAN_TAG_MASK (0x0000FFFF)
434 /* Miscelaneous hardware related defines: */
435 #define NV_PCI_REGSZ_VER1 0x270
436 #define NV_PCI_REGSZ_VER2 0x2d4
437 #define NV_PCI_REGSZ_VER3 0x604
438 #define NV_PCI_REGSZ_MAX 0x604
440 /* various timeout delays: all in usec */
441 #define NV_TXRX_RESET_DELAY 4
442 #define NV_TXSTOP_DELAY1 10
443 #define NV_TXSTOP_DELAY1MAX 500000
444 #define NV_TXSTOP_DELAY2 100
445 #define NV_RXSTOP_DELAY1 10
446 #define NV_RXSTOP_DELAY1MAX 500000
447 #define NV_RXSTOP_DELAY2 100
448 #define NV_SETUP5_DELAY 5
449 #define NV_SETUP5_DELAYMAX 50000
450 #define NV_POWERUP_DELAY 5
451 #define NV_POWERUP_DELAYMAX 5000
452 #define NV_MIIBUSY_DELAY 50
453 #define NV_MIIPHY_DELAY 10
454 #define NV_MIIPHY_DELAYMAX 10000
455 #define NV_MAC_RESET_DELAY 64
457 #define NV_WAKEUPPATTERNS 5
458 #define NV_WAKEUPMASKENTRIES 4
460 /* General driver defaults */
461 #define NV_WATCHDOG_TIMEO (5*HZ)
463 #define RX_RING_DEFAULT 128
464 #define TX_RING_DEFAULT 256
465 #define RX_RING_MIN 128
466 #define TX_RING_MIN 64
467 #define RING_MAX_DESC_VER_1 1024
468 #define RING_MAX_DESC_VER_2_3 16384
470 /* rx/tx mac addr + type + vlan + align + slack*/
471 #define NV_RX_HEADERS (64)
472 /* even more slack. */
473 #define NV_RX_ALLOC_PAD (64)
475 /* maximum mtu size */
476 #define NV_PKTLIMIT_1 ETH_DATA_LEN /* hard limit not known */
477 #define NV_PKTLIMIT_2 9100 /* Actual limit according to NVidia: 9202 */
479 #define OOM_REFILL (1+HZ/20)
480 #define POLL_WAIT (1+HZ/100)
481 #define LINK_TIMEOUT (3*HZ)
482 #define STATS_INTERVAL (10*HZ)
485 * desc_ver values:
486 * The nic supports three different descriptor types:
487 * - DESC_VER_1: Original
488 * - DESC_VER_2: support for jumbo frames.
489 * - DESC_VER_3: 64-bit format.
491 #define DESC_VER_1 1
492 #define DESC_VER_2 2
493 #define DESC_VER_3 3
495 /* PHY defines */
496 #define PHY_OUI_MARVELL 0x5043
497 #define PHY_OUI_CICADA 0x03f1
498 #define PHY_OUI_VITESSE 0x01c1
499 #define PHY_OUI_REALTEK 0x0732
500 #define PHY_OUI_REALTEK2 0x0020
501 #define PHYID1_OUI_MASK 0x03ff
502 #define PHYID1_OUI_SHFT 6
503 #define PHYID2_OUI_MASK 0xfc00
504 #define PHYID2_OUI_SHFT 10
505 #define PHYID2_MODEL_MASK 0x03f0
506 #define PHY_MODEL_REALTEK_8211 0x0110
507 #define PHY_REV_MASK 0x0001
508 #define PHY_REV_REALTEK_8211B 0x0000
509 #define PHY_REV_REALTEK_8211C 0x0001
510 #define PHY_MODEL_REALTEK_8201 0x0200
511 #define PHY_MODEL_MARVELL_E3016 0x0220
512 #define PHY_MARVELL_E3016_INITMASK 0x0300
513 #define PHY_CICADA_INIT1 0x0f000
514 #define PHY_CICADA_INIT2 0x0e00
515 #define PHY_CICADA_INIT3 0x01000
516 #define PHY_CICADA_INIT4 0x0200
517 #define PHY_CICADA_INIT5 0x0004
518 #define PHY_CICADA_INIT6 0x02000
519 #define PHY_VITESSE_INIT_REG1 0x1f
520 #define PHY_VITESSE_INIT_REG2 0x10
521 #define PHY_VITESSE_INIT_REG3 0x11
522 #define PHY_VITESSE_INIT_REG4 0x12
523 #define PHY_VITESSE_INIT_MSK1 0xc
524 #define PHY_VITESSE_INIT_MSK2 0x0180
525 #define PHY_VITESSE_INIT1 0x52b5
526 #define PHY_VITESSE_INIT2 0xaf8a
527 #define PHY_VITESSE_INIT3 0x8
528 #define PHY_VITESSE_INIT4 0x8f8a
529 #define PHY_VITESSE_INIT5 0xaf86
530 #define PHY_VITESSE_INIT6 0x8f86
531 #define PHY_VITESSE_INIT7 0xaf82
532 #define PHY_VITESSE_INIT8 0x0100
533 #define PHY_VITESSE_INIT9 0x8f82
534 #define PHY_VITESSE_INIT10 0x0
535 #define PHY_REALTEK_INIT_REG1 0x1f
536 #define PHY_REALTEK_INIT_REG2 0x19
537 #define PHY_REALTEK_INIT_REG3 0x13
538 #define PHY_REALTEK_INIT_REG4 0x14
539 #define PHY_REALTEK_INIT_REG5 0x18
540 #define PHY_REALTEK_INIT_REG6 0x11
541 #define PHY_REALTEK_INIT_REG7 0x01
542 #define PHY_REALTEK_INIT1 0x0000
543 #define PHY_REALTEK_INIT2 0x8e00
544 #define PHY_REALTEK_INIT3 0x0001
545 #define PHY_REALTEK_INIT4 0xad17
546 #define PHY_REALTEK_INIT5 0xfb54
547 #define PHY_REALTEK_INIT6 0xf5c7
548 #define PHY_REALTEK_INIT7 0x1000
549 #define PHY_REALTEK_INIT8 0x0003
550 #define PHY_REALTEK_INIT9 0x0008
551 #define PHY_REALTEK_INIT10 0x0005
552 #define PHY_REALTEK_INIT11 0x0200
553 #define PHY_REALTEK_INIT_MSK1 0x0003
555 #define PHY_GIGABIT 0x0100
557 #define PHY_TIMEOUT 0x1
558 #define PHY_ERROR 0x2
560 #define PHY_100 0x1
561 #define PHY_1000 0x2
562 #define PHY_HALF 0x100
564 #define NV_PAUSEFRAME_RX_CAPABLE 0x0001
565 #define NV_PAUSEFRAME_TX_CAPABLE 0x0002
566 #define NV_PAUSEFRAME_RX_ENABLE 0x0004
567 #define NV_PAUSEFRAME_TX_ENABLE 0x0008
568 #define NV_PAUSEFRAME_RX_REQ 0x0010
569 #define NV_PAUSEFRAME_TX_REQ 0x0020
570 #define NV_PAUSEFRAME_AUTONEG 0x0040
572 /* MSI/MSI-X defines */
573 #define NV_MSI_X_MAX_VECTORS 8
574 #define NV_MSI_X_VECTORS_MASK 0x000f
575 #define NV_MSI_CAPABLE 0x0010
576 #define NV_MSI_X_CAPABLE 0x0020
577 #define NV_MSI_ENABLED 0x0040
578 #define NV_MSI_X_ENABLED 0x0080
580 #define NV_MSI_X_VECTOR_ALL 0x0
581 #define NV_MSI_X_VECTOR_RX 0x0
582 #define NV_MSI_X_VECTOR_TX 0x1
583 #define NV_MSI_X_VECTOR_OTHER 0x2
585 #define NV_RESTART_TX 0x1
586 #define NV_RESTART_RX 0x2
588 #define NV_TX_LIMIT_COUNT 16
590 /* statistics */
591 struct nv_ethtool_str {
592 char name[ETH_GSTRING_LEN];
595 static const struct nv_ethtool_str nv_estats_str[] = {
596 { "tx_bytes" },
597 { "tx_zero_rexmt" },
598 { "tx_one_rexmt" },
599 { "tx_many_rexmt" },
600 { "tx_late_collision" },
601 { "tx_fifo_errors" },
602 { "tx_carrier_errors" },
603 { "tx_excess_deferral" },
604 { "tx_retry_error" },
605 { "rx_frame_error" },
606 { "rx_extra_byte" },
607 { "rx_late_collision" },
608 { "rx_runt" },
609 { "rx_frame_too_long" },
610 { "rx_over_errors" },
611 { "rx_crc_errors" },
612 { "rx_frame_align_error" },
613 { "rx_length_error" },
614 { "rx_unicast" },
615 { "rx_multicast" },
616 { "rx_broadcast" },
617 { "rx_packets" },
618 { "rx_errors_total" },
619 { "tx_errors_total" },
621 /* version 2 stats */
622 { "tx_deferral" },
623 { "tx_packets" },
624 { "rx_bytes" },
625 { "tx_pause" },
626 { "rx_pause" },
627 { "rx_drop_frame" },
629 /* version 3 stats */
630 { "tx_unicast" },
631 { "tx_multicast" },
632 { "tx_broadcast" }
635 struct nv_ethtool_stats {
636 u64 tx_bytes;
637 u64 tx_zero_rexmt;
638 u64 tx_one_rexmt;
639 u64 tx_many_rexmt;
640 u64 tx_late_collision;
641 u64 tx_fifo_errors;
642 u64 tx_carrier_errors;
643 u64 tx_excess_deferral;
644 u64 tx_retry_error;
645 u64 rx_frame_error;
646 u64 rx_extra_byte;
647 u64 rx_late_collision;
648 u64 rx_runt;
649 u64 rx_frame_too_long;
650 u64 rx_over_errors;
651 u64 rx_crc_errors;
652 u64 rx_frame_align_error;
653 u64 rx_length_error;
654 u64 rx_unicast;
655 u64 rx_multicast;
656 u64 rx_broadcast;
657 u64 rx_packets;
658 u64 rx_errors_total;
659 u64 tx_errors_total;
661 /* version 2 stats */
662 u64 tx_deferral;
663 u64 tx_packets;
664 u64 rx_bytes;
665 u64 tx_pause;
666 u64 rx_pause;
667 u64 rx_drop_frame;
669 /* version 3 stats */
670 u64 tx_unicast;
671 u64 tx_multicast;
672 u64 tx_broadcast;
675 #define NV_DEV_STATISTICS_V3_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64))
676 #define NV_DEV_STATISTICS_V2_COUNT (NV_DEV_STATISTICS_V3_COUNT - 3)
677 #define NV_DEV_STATISTICS_V1_COUNT (NV_DEV_STATISTICS_V2_COUNT - 6)
679 /* diagnostics */
680 #define NV_TEST_COUNT_BASE 3
681 #define NV_TEST_COUNT_EXTENDED 4
683 static const struct nv_ethtool_str nv_etests_str[] = {
684 { "link (online/offline)" },
685 { "register (offline) " },
686 { "interrupt (offline) " },
687 { "loopback (offline) " }
690 struct register_test {
691 __u32 reg;
692 __u32 mask;
695 static const struct register_test nv_registers_test[] = {
696 { NvRegUnknownSetupReg6, 0x01 },
697 { NvRegMisc1, 0x03c },
698 { NvRegOffloadConfig, 0x03ff },
699 { NvRegMulticastAddrA, 0xffffffff },
700 { NvRegTxWatermark, 0x0ff },
701 { NvRegWakeUpFlags, 0x07777 },
702 { 0,0 }
705 struct nv_skb_map {
706 struct sk_buff *skb;
707 dma_addr_t dma;
708 unsigned int dma_len;
709 struct ring_desc_ex *first_tx_desc;
710 struct nv_skb_map *next_tx_ctx;
714 * SMP locking:
715 * All hardware access under dev->priv->lock, except the performance
716 * critical parts:
717 * - rx is (pseudo-) lockless: it relies on the single-threading provided
718 * by the arch code for interrupts.
719 * - tx setup is lockless: it relies on netif_tx_lock. Actual submission
720 * needs dev->priv->lock :-(
721 * - set_multicast_list: preparation lockless, relies on netif_tx_lock.
724 /* in dev: base, irq */
725 struct fe_priv {
726 spinlock_t lock;
728 struct net_device *dev;
729 struct napi_struct napi;
731 /* General data:
732 * Locking: spin_lock(&np->lock); */
733 struct nv_ethtool_stats estats;
734 int in_shutdown;
735 u32 linkspeed;
736 int duplex;
737 int autoneg;
738 int fixed_mode;
739 int phyaddr;
740 int wolenabled;
741 unsigned int phy_oui;
742 unsigned int phy_model;
743 unsigned int phy_rev;
744 u16 gigabit;
745 int intr_test;
746 int recover_error;
748 /* General data: RO fields */
749 dma_addr_t ring_addr;
750 struct pci_dev *pci_dev;
751 u32 orig_mac[2];
752 u32 irqmask;
753 u32 desc_ver;
754 u32 txrxctl_bits;
755 u32 vlanctl_bits;
756 u32 driver_data;
757 u32 device_id;
758 u32 register_size;
759 int rx_csum;
760 u32 mac_in_use;
762 void __iomem *base;
764 /* rx specific fields.
765 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
767 union ring_type get_rx, put_rx, first_rx, last_rx;
768 struct nv_skb_map *get_rx_ctx, *put_rx_ctx;
769 struct nv_skb_map *first_rx_ctx, *last_rx_ctx;
770 struct nv_skb_map *rx_skb;
772 union ring_type rx_ring;
773 unsigned int rx_buf_sz;
774 unsigned int pkt_limit;
775 struct timer_list oom_kick;
776 struct timer_list nic_poll;
777 struct timer_list stats_poll;
778 u32 nic_poll_irq;
779 int rx_ring_size;
781 /* media detection workaround.
782 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
784 int need_linktimer;
785 unsigned long link_timeout;
787 * tx specific fields.
789 union ring_type get_tx, put_tx, first_tx, last_tx;
790 struct nv_skb_map *get_tx_ctx, *put_tx_ctx;
791 struct nv_skb_map *first_tx_ctx, *last_tx_ctx;
792 struct nv_skb_map *tx_skb;
794 union ring_type tx_ring;
795 u32 tx_flags;
796 int tx_ring_size;
797 int tx_limit;
798 u32 tx_pkts_in_progress;
799 struct nv_skb_map *tx_change_owner;
800 struct nv_skb_map *tx_end_flip;
801 int tx_stop;
803 /* vlan fields */
804 struct vlan_group *vlangrp;
806 /* msi/msi-x fields */
807 u32 msi_flags;
808 struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS];
810 /* flow control */
811 u32 pause_flags;
813 /* power saved state */
814 u32 saved_config_space[NV_PCI_REGSZ_MAX/4];
818 * Maximum number of loops until we assume that a bit in the irq mask
819 * is stuck. Overridable with module param.
821 static int max_interrupt_work = 5;
824 * Optimization can be either throuput mode or cpu mode
826 * Throughput Mode: Every tx and rx packet will generate an interrupt.
827 * CPU Mode: Interrupts are controlled by a timer.
829 enum {
830 NV_OPTIMIZATION_MODE_THROUGHPUT,
831 NV_OPTIMIZATION_MODE_CPU
833 static int optimization_mode = NV_OPTIMIZATION_MODE_THROUGHPUT;
836 * Poll interval for timer irq
838 * This interval determines how frequent an interrupt is generated.
839 * The is value is determined by [(time_in_micro_secs * 100) / (2^10)]
840 * Min = 0, and Max = 65535
842 static int poll_interval = -1;
845 * MSI interrupts
847 enum {
848 NV_MSI_INT_DISABLED,
849 NV_MSI_INT_ENABLED
851 static int msi = NV_MSI_INT_ENABLED;
854 * MSIX interrupts
856 enum {
857 NV_MSIX_INT_DISABLED,
858 NV_MSIX_INT_ENABLED
860 static int msix = NV_MSIX_INT_DISABLED;
863 * DMA 64bit
865 enum {
866 NV_DMA_64BIT_DISABLED,
867 NV_DMA_64BIT_ENABLED
869 static int dma_64bit = NV_DMA_64BIT_ENABLED;
872 * Crossover Detection
873 * Realtek 8201 phy + some OEM boards do not work properly.
875 enum {
876 NV_CROSSOVER_DETECTION_DISABLED,
877 NV_CROSSOVER_DETECTION_ENABLED
879 static int phy_cross = NV_CROSSOVER_DETECTION_DISABLED;
881 static inline struct fe_priv *get_nvpriv(struct net_device *dev)
883 return netdev_priv(dev);
886 static inline u8 __iomem *get_hwbase(struct net_device *dev)
888 return ((struct fe_priv *)netdev_priv(dev))->base;
891 static inline void pci_push(u8 __iomem *base)
893 /* force out pending posted writes */
894 readl(base);
897 static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v)
899 return le32_to_cpu(prd->flaglen)
900 & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2);
903 static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v)
905 return le32_to_cpu(prd->flaglen) & LEN_MASK_V2;
908 static bool nv_optimized(struct fe_priv *np)
910 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
911 return false;
912 return true;
915 static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
916 int delay, int delaymax, const char *msg)
918 u8 __iomem *base = get_hwbase(dev);
920 pci_push(base);
921 do {
922 udelay(delay);
923 delaymax -= delay;
924 if (delaymax < 0) {
925 if (msg)
926 printk(msg);
927 return 1;
929 } while ((readl(base + offset) & mask) != target);
930 return 0;
933 #define NV_SETUP_RX_RING 0x01
934 #define NV_SETUP_TX_RING 0x02
936 static inline u32 dma_low(dma_addr_t addr)
938 return addr;
941 static inline u32 dma_high(dma_addr_t addr)
943 return addr>>31>>1; /* 0 if 32bit, shift down by 32 if 64bit */
946 static void setup_hw_rings(struct net_device *dev, int rxtx_flags)
948 struct fe_priv *np = get_nvpriv(dev);
949 u8 __iomem *base = get_hwbase(dev);
951 if (!nv_optimized(np)) {
952 if (rxtx_flags & NV_SETUP_RX_RING) {
953 writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
955 if (rxtx_flags & NV_SETUP_TX_RING) {
956 writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
958 } else {
959 if (rxtx_flags & NV_SETUP_RX_RING) {
960 writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
961 writel(dma_high(np->ring_addr), base + NvRegRxRingPhysAddrHigh);
963 if (rxtx_flags & NV_SETUP_TX_RING) {
964 writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
965 writel(dma_high(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddrHigh);
970 static void free_rings(struct net_device *dev)
972 struct fe_priv *np = get_nvpriv(dev);
974 if (!nv_optimized(np)) {
975 if (np->rx_ring.orig)
976 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
977 np->rx_ring.orig, np->ring_addr);
978 } else {
979 if (np->rx_ring.ex)
980 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
981 np->rx_ring.ex, np->ring_addr);
983 if (np->rx_skb)
984 kfree(np->rx_skb);
985 if (np->tx_skb)
986 kfree(np->tx_skb);
989 static int using_multi_irqs(struct net_device *dev)
991 struct fe_priv *np = get_nvpriv(dev);
993 if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
994 ((np->msi_flags & NV_MSI_X_ENABLED) &&
995 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1)))
996 return 0;
997 else
998 return 1;
1001 static void nv_enable_irq(struct net_device *dev)
1003 struct fe_priv *np = get_nvpriv(dev);
1005 if (!using_multi_irqs(dev)) {
1006 if (np->msi_flags & NV_MSI_X_ENABLED)
1007 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1008 else
1009 enable_irq(np->pci_dev->irq);
1010 } else {
1011 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1012 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
1013 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
1017 static void nv_disable_irq(struct net_device *dev)
1019 struct fe_priv *np = get_nvpriv(dev);
1021 if (!using_multi_irqs(dev)) {
1022 if (np->msi_flags & NV_MSI_X_ENABLED)
1023 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1024 else
1025 disable_irq(np->pci_dev->irq);
1026 } else {
1027 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1028 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
1029 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
1033 /* In MSIX mode, a write to irqmask behaves as XOR */
1034 static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask)
1036 u8 __iomem *base = get_hwbase(dev);
1038 writel(mask, base + NvRegIrqMask);
1041 static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask)
1043 struct fe_priv *np = get_nvpriv(dev);
1044 u8 __iomem *base = get_hwbase(dev);
1046 if (np->msi_flags & NV_MSI_X_ENABLED) {
1047 writel(mask, base + NvRegIrqMask);
1048 } else {
1049 if (np->msi_flags & NV_MSI_ENABLED)
1050 writel(0, base + NvRegMSIIrqMask);
1051 writel(0, base + NvRegIrqMask);
1055 #define MII_READ (-1)
1056 /* mii_rw: read/write a register on the PHY.
1058 * Caller must guarantee serialization
1060 static int mii_rw(struct net_device *dev, int addr, int miireg, int value)
1062 u8 __iomem *base = get_hwbase(dev);
1063 u32 reg;
1064 int retval;
1066 writel(NVREG_MIISTAT_MASK_RW, base + NvRegMIIStatus);
1068 reg = readl(base + NvRegMIIControl);
1069 if (reg & NVREG_MIICTL_INUSE) {
1070 writel(NVREG_MIICTL_INUSE, base + NvRegMIIControl);
1071 udelay(NV_MIIBUSY_DELAY);
1074 reg = (addr << NVREG_MIICTL_ADDRSHIFT) | miireg;
1075 if (value != MII_READ) {
1076 writel(value, base + NvRegMIIData);
1077 reg |= NVREG_MIICTL_WRITE;
1079 writel(reg, base + NvRegMIIControl);
1081 if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0,
1082 NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX, NULL)) {
1083 dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d timed out.\n",
1084 dev->name, miireg, addr);
1085 retval = -1;
1086 } else if (value != MII_READ) {
1087 /* it was a write operation - fewer failures are detectable */
1088 dprintk(KERN_DEBUG "%s: mii_rw wrote 0x%x to reg %d at PHY %d\n",
1089 dev->name, value, miireg, addr);
1090 retval = 0;
1091 } else if (readl(base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) {
1092 dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d failed.\n",
1093 dev->name, miireg, addr);
1094 retval = -1;
1095 } else {
1096 retval = readl(base + NvRegMIIData);
1097 dprintk(KERN_DEBUG "%s: mii_rw read from reg %d at PHY %d: 0x%x.\n",
1098 dev->name, miireg, addr, retval);
1101 return retval;
1104 static int phy_reset(struct net_device *dev, u32 bmcr_setup)
1106 struct fe_priv *np = netdev_priv(dev);
1107 u32 miicontrol;
1108 unsigned int tries = 0;
1110 miicontrol = BMCR_RESET | bmcr_setup;
1111 if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) {
1112 return -1;
1115 /* wait for 500ms */
1116 msleep(500);
1118 /* must wait till reset is deasserted */
1119 while (miicontrol & BMCR_RESET) {
1120 msleep(10);
1121 miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1122 /* FIXME: 100 tries seem excessive */
1123 if (tries++ > 100)
1124 return -1;
1126 return 0;
1129 static int phy_init(struct net_device *dev)
1131 struct fe_priv *np = get_nvpriv(dev);
1132 u8 __iomem *base = get_hwbase(dev);
1133 u32 phyinterface, phy_reserved, mii_status, mii_control, mii_control_1000,reg;
1135 /* phy errata for E3016 phy */
1136 if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
1137 reg = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
1138 reg &= ~PHY_MARVELL_E3016_INITMASK;
1139 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, reg)) {
1140 printk(KERN_INFO "%s: phy write to errata reg failed.\n", pci_name(np->pci_dev));
1141 return PHY_ERROR;
1144 if (np->phy_oui == PHY_OUI_REALTEK) {
1145 if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
1146 np->phy_rev == PHY_REV_REALTEK_8211B) {
1147 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
1148 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1149 return PHY_ERROR;
1151 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) {
1152 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1153 return PHY_ERROR;
1155 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) {
1156 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1157 return PHY_ERROR;
1159 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) {
1160 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1161 return PHY_ERROR;
1163 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG4, PHY_REALTEK_INIT5)) {
1164 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1165 return PHY_ERROR;
1167 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG5, PHY_REALTEK_INIT6)) {
1168 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1169 return PHY_ERROR;
1171 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
1172 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1173 return PHY_ERROR;
1176 if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
1177 np->phy_rev == PHY_REV_REALTEK_8211C) {
1178 u32 powerstate = readl(base + NvRegPowerState2);
1180 /* need to perform hw phy reset */
1181 powerstate |= NVREG_POWERSTATE2_PHY_RESET;
1182 writel(powerstate, base + NvRegPowerState2);
1183 msleep(25);
1185 powerstate &= ~NVREG_POWERSTATE2_PHY_RESET;
1186 writel(powerstate, base + NvRegPowerState2);
1187 msleep(25);
1189 reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ);
1190 reg |= PHY_REALTEK_INIT9;
1191 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, reg)) {
1192 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1193 return PHY_ERROR;
1195 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT10)) {
1196 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1197 return PHY_ERROR;
1199 reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, MII_READ);
1200 if (!(reg & PHY_REALTEK_INIT11)) {
1201 reg |= PHY_REALTEK_INIT11;
1202 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, reg)) {
1203 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1204 return PHY_ERROR;
1207 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
1208 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1209 return PHY_ERROR;
1212 if (np->phy_model == PHY_MODEL_REALTEK_8201) {
1213 if (np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_32 ||
1214 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_33 ||
1215 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_34 ||
1216 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_35 ||
1217 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_36 ||
1218 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_37 ||
1219 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_38 ||
1220 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_39) {
1221 phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ);
1222 phy_reserved |= PHY_REALTEK_INIT7;
1223 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, phy_reserved)) {
1224 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1225 return PHY_ERROR;
1231 /* set advertise register */
1232 reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
1233 reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|ADVERTISE_PAUSE_ASYM|ADVERTISE_PAUSE_CAP);
1234 if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) {
1235 printk(KERN_INFO "%s: phy write to advertise failed.\n", pci_name(np->pci_dev));
1236 return PHY_ERROR;
1239 /* get phy interface type */
1240 phyinterface = readl(base + NvRegPhyInterface);
1242 /* see if gigabit phy */
1243 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
1244 if (mii_status & PHY_GIGABIT) {
1245 np->gigabit = PHY_GIGABIT;
1246 mii_control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
1247 mii_control_1000 &= ~ADVERTISE_1000HALF;
1248 if (phyinterface & PHY_RGMII)
1249 mii_control_1000 |= ADVERTISE_1000FULL;
1250 else
1251 mii_control_1000 &= ~ADVERTISE_1000FULL;
1253 if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) {
1254 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1255 return PHY_ERROR;
1258 else
1259 np->gigabit = 0;
1261 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1262 mii_control |= BMCR_ANENABLE;
1264 if (np->phy_oui == PHY_OUI_REALTEK &&
1265 np->phy_model == PHY_MODEL_REALTEK_8211 &&
1266 np->phy_rev == PHY_REV_REALTEK_8211C) {
1267 /* start autoneg since we already performed hw reset above */
1268 mii_control |= BMCR_ANRESTART;
1269 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
1270 printk(KERN_INFO "%s: phy init failed\n", pci_name(np->pci_dev));
1271 return PHY_ERROR;
1273 } else {
1274 /* reset the phy
1275 * (certain phys need bmcr to be setup with reset)
1277 if (phy_reset(dev, mii_control)) {
1278 printk(KERN_INFO "%s: phy reset failed\n", pci_name(np->pci_dev));
1279 return PHY_ERROR;
1283 /* phy vendor specific configuration */
1284 if ((np->phy_oui == PHY_OUI_CICADA) && (phyinterface & PHY_RGMII) ) {
1285 phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ);
1286 phy_reserved &= ~(PHY_CICADA_INIT1 | PHY_CICADA_INIT2);
1287 phy_reserved |= (PHY_CICADA_INIT3 | PHY_CICADA_INIT4);
1288 if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved)) {
1289 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1290 return PHY_ERROR;
1292 phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
1293 phy_reserved |= PHY_CICADA_INIT5;
1294 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved)) {
1295 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1296 return PHY_ERROR;
1299 if (np->phy_oui == PHY_OUI_CICADA) {
1300 phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ);
1301 phy_reserved |= PHY_CICADA_INIT6;
1302 if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved)) {
1303 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1304 return PHY_ERROR;
1307 if (np->phy_oui == PHY_OUI_VITESSE) {
1308 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT1)) {
1309 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1310 return PHY_ERROR;
1312 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT2)) {
1313 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1314 return PHY_ERROR;
1316 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
1317 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
1318 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1319 return PHY_ERROR;
1321 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
1322 phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
1323 phy_reserved |= PHY_VITESSE_INIT3;
1324 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
1325 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1326 return PHY_ERROR;
1328 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT4)) {
1329 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1330 return PHY_ERROR;
1332 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT5)) {
1333 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1334 return PHY_ERROR;
1336 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
1337 phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
1338 phy_reserved |= PHY_VITESSE_INIT3;
1339 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
1340 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1341 return PHY_ERROR;
1343 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
1344 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
1345 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1346 return PHY_ERROR;
1348 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT6)) {
1349 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1350 return PHY_ERROR;
1352 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT7)) {
1353 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1354 return PHY_ERROR;
1356 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
1357 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
1358 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1359 return PHY_ERROR;
1361 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
1362 phy_reserved &= ~PHY_VITESSE_INIT_MSK2;
1363 phy_reserved |= PHY_VITESSE_INIT8;
1364 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
1365 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1366 return PHY_ERROR;
1368 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT9)) {
1369 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1370 return PHY_ERROR;
1372 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT10)) {
1373 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1374 return PHY_ERROR;
1377 if (np->phy_oui == PHY_OUI_REALTEK) {
1378 if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
1379 np->phy_rev == PHY_REV_REALTEK_8211B) {
1380 /* reset could have cleared these out, set them back */
1381 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
1382 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1383 return PHY_ERROR;
1385 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) {
1386 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1387 return PHY_ERROR;
1389 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) {
1390 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1391 return PHY_ERROR;
1393 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) {
1394 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1395 return PHY_ERROR;
1397 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG4, PHY_REALTEK_INIT5)) {
1398 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1399 return PHY_ERROR;
1401 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG5, PHY_REALTEK_INIT6)) {
1402 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1403 return PHY_ERROR;
1405 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
1406 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1407 return PHY_ERROR;
1410 if (np->phy_model == PHY_MODEL_REALTEK_8201) {
1411 if (np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_32 ||
1412 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_33 ||
1413 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_34 ||
1414 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_35 ||
1415 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_36 ||
1416 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_37 ||
1417 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_38 ||
1418 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_39) {
1419 phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ);
1420 phy_reserved |= PHY_REALTEK_INIT7;
1421 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, phy_reserved)) {
1422 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1423 return PHY_ERROR;
1426 if (phy_cross == NV_CROSSOVER_DETECTION_DISABLED) {
1427 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) {
1428 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1429 return PHY_ERROR;
1431 phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, MII_READ);
1432 phy_reserved &= ~PHY_REALTEK_INIT_MSK1;
1433 phy_reserved |= PHY_REALTEK_INIT3;
1434 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, phy_reserved)) {
1435 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1436 return PHY_ERROR;
1438 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
1439 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1440 return PHY_ERROR;
1446 /* some phys clear out pause advertisment on reset, set it back */
1447 mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg);
1449 /* restart auto negotiation */
1450 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1451 mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
1452 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
1453 return PHY_ERROR;
1456 return 0;
1459 static void nv_start_rx(struct net_device *dev)
1461 struct fe_priv *np = netdev_priv(dev);
1462 u8 __iomem *base = get_hwbase(dev);
1463 u32 rx_ctrl = readl(base + NvRegReceiverControl);
1465 dprintk(KERN_DEBUG "%s: nv_start_rx\n", dev->name);
1466 /* Already running? Stop it. */
1467 if ((readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) && !np->mac_in_use) {
1468 rx_ctrl &= ~NVREG_RCVCTL_START;
1469 writel(rx_ctrl, base + NvRegReceiverControl);
1470 pci_push(base);
1472 writel(np->linkspeed, base + NvRegLinkSpeed);
1473 pci_push(base);
1474 rx_ctrl |= NVREG_RCVCTL_START;
1475 if (np->mac_in_use)
1476 rx_ctrl &= ~NVREG_RCVCTL_RX_PATH_EN;
1477 writel(rx_ctrl, base + NvRegReceiverControl);
1478 dprintk(KERN_DEBUG "%s: nv_start_rx to duplex %d, speed 0x%08x.\n",
1479 dev->name, np->duplex, np->linkspeed);
1480 pci_push(base);
1483 static void nv_stop_rx(struct net_device *dev)
1485 struct fe_priv *np = netdev_priv(dev);
1486 u8 __iomem *base = get_hwbase(dev);
1487 u32 rx_ctrl = readl(base + NvRegReceiverControl);
1489 dprintk(KERN_DEBUG "%s: nv_stop_rx\n", dev->name);
1490 if (!np->mac_in_use)
1491 rx_ctrl &= ~NVREG_RCVCTL_START;
1492 else
1493 rx_ctrl |= NVREG_RCVCTL_RX_PATH_EN;
1494 writel(rx_ctrl, base + NvRegReceiverControl);
1495 reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0,
1496 NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX,
1497 KERN_INFO "nv_stop_rx: ReceiverStatus remained busy");
1499 udelay(NV_RXSTOP_DELAY2);
1500 if (!np->mac_in_use)
1501 writel(0, base + NvRegLinkSpeed);
1504 static void nv_start_tx(struct net_device *dev)
1506 struct fe_priv *np = netdev_priv(dev);
1507 u8 __iomem *base = get_hwbase(dev);
1508 u32 tx_ctrl = readl(base + NvRegTransmitterControl);
1510 dprintk(KERN_DEBUG "%s: nv_start_tx\n", dev->name);
1511 tx_ctrl |= NVREG_XMITCTL_START;
1512 if (np->mac_in_use)
1513 tx_ctrl &= ~NVREG_XMITCTL_TX_PATH_EN;
1514 writel(tx_ctrl, base + NvRegTransmitterControl);
1515 pci_push(base);
1518 static void nv_stop_tx(struct net_device *dev)
1520 struct fe_priv *np = netdev_priv(dev);
1521 u8 __iomem *base = get_hwbase(dev);
1522 u32 tx_ctrl = readl(base + NvRegTransmitterControl);
1524 dprintk(KERN_DEBUG "%s: nv_stop_tx\n", dev->name);
1525 if (!np->mac_in_use)
1526 tx_ctrl &= ~NVREG_XMITCTL_START;
1527 else
1528 tx_ctrl |= NVREG_XMITCTL_TX_PATH_EN;
1529 writel(tx_ctrl, base + NvRegTransmitterControl);
1530 reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0,
1531 NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX,
1532 KERN_INFO "nv_stop_tx: TransmitterStatus remained busy");
1534 udelay(NV_TXSTOP_DELAY2);
1535 if (!np->mac_in_use)
1536 writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV,
1537 base + NvRegTransmitPoll);
1540 static void nv_start_rxtx(struct net_device *dev)
1542 nv_start_rx(dev);
1543 nv_start_tx(dev);
1546 static void nv_stop_rxtx(struct net_device *dev)
1548 nv_stop_rx(dev);
1549 nv_stop_tx(dev);
1552 static void nv_txrx_reset(struct net_device *dev)
1554 struct fe_priv *np = netdev_priv(dev);
1555 u8 __iomem *base = get_hwbase(dev);
1557 dprintk(KERN_DEBUG "%s: nv_txrx_reset\n", dev->name);
1558 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
1559 pci_push(base);
1560 udelay(NV_TXRX_RESET_DELAY);
1561 writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
1562 pci_push(base);
1565 static void nv_mac_reset(struct net_device *dev)
1567 struct fe_priv *np = netdev_priv(dev);
1568 u8 __iomem *base = get_hwbase(dev);
1569 u32 temp1, temp2, temp3;
1571 dprintk(KERN_DEBUG "%s: nv_mac_reset\n", dev->name);
1573 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
1574 pci_push(base);
1576 /* save registers since they will be cleared on reset */
1577 temp1 = readl(base + NvRegMacAddrA);
1578 temp2 = readl(base + NvRegMacAddrB);
1579 temp3 = readl(base + NvRegTransmitPoll);
1581 writel(NVREG_MAC_RESET_ASSERT, base + NvRegMacReset);
1582 pci_push(base);
1583 udelay(NV_MAC_RESET_DELAY);
1584 writel(0, base + NvRegMacReset);
1585 pci_push(base);
1586 udelay(NV_MAC_RESET_DELAY);
1588 /* restore saved registers */
1589 writel(temp1, base + NvRegMacAddrA);
1590 writel(temp2, base + NvRegMacAddrB);
1591 writel(temp3, base + NvRegTransmitPoll);
1593 writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
1594 pci_push(base);
1597 static void nv_get_hw_stats(struct net_device *dev)
1599 struct fe_priv *np = netdev_priv(dev);
1600 u8 __iomem *base = get_hwbase(dev);
1602 np->estats.tx_bytes += readl(base + NvRegTxCnt);
1603 np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt);
1604 np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt);
1605 np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt);
1606 np->estats.tx_late_collision += readl(base + NvRegTxLateCol);
1607 np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow);
1608 np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier);
1609 np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef);
1610 np->estats.tx_retry_error += readl(base + NvRegTxRetryErr);
1611 np->estats.rx_frame_error += readl(base + NvRegRxFrameErr);
1612 np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte);
1613 np->estats.rx_late_collision += readl(base + NvRegRxLateCol);
1614 np->estats.rx_runt += readl(base + NvRegRxRunt);
1615 np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong);
1616 np->estats.rx_over_errors += readl(base + NvRegRxOverflow);
1617 np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr);
1618 np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr);
1619 np->estats.rx_length_error += readl(base + NvRegRxLenErr);
1620 np->estats.rx_unicast += readl(base + NvRegRxUnicast);
1621 np->estats.rx_multicast += readl(base + NvRegRxMulticast);
1622 np->estats.rx_broadcast += readl(base + NvRegRxBroadcast);
1623 np->estats.rx_packets =
1624 np->estats.rx_unicast +
1625 np->estats.rx_multicast +
1626 np->estats.rx_broadcast;
1627 np->estats.rx_errors_total =
1628 np->estats.rx_crc_errors +
1629 np->estats.rx_over_errors +
1630 np->estats.rx_frame_error +
1631 (np->estats.rx_frame_align_error - np->estats.rx_extra_byte) +
1632 np->estats.rx_late_collision +
1633 np->estats.rx_runt +
1634 np->estats.rx_frame_too_long;
1635 np->estats.tx_errors_total =
1636 np->estats.tx_late_collision +
1637 np->estats.tx_fifo_errors +
1638 np->estats.tx_carrier_errors +
1639 np->estats.tx_excess_deferral +
1640 np->estats.tx_retry_error;
1642 if (np->driver_data & DEV_HAS_STATISTICS_V2) {
1643 np->estats.tx_deferral += readl(base + NvRegTxDef);
1644 np->estats.tx_packets += readl(base + NvRegTxFrame);
1645 np->estats.rx_bytes += readl(base + NvRegRxCnt);
1646 np->estats.tx_pause += readl(base + NvRegTxPause);
1647 np->estats.rx_pause += readl(base + NvRegRxPause);
1648 np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame);
1651 if (np->driver_data & DEV_HAS_STATISTICS_V3) {
1652 np->estats.tx_unicast += readl(base + NvRegTxUnicast);
1653 np->estats.tx_multicast += readl(base + NvRegTxMulticast);
1654 np->estats.tx_broadcast += readl(base + NvRegTxBroadcast);
1659 * nv_get_stats: dev->get_stats function
1660 * Get latest stats value from the nic.
1661 * Called with read_lock(&dev_base_lock) held for read -
1662 * only synchronized against unregister_netdevice.
1664 static struct net_device_stats *nv_get_stats(struct net_device *dev)
1666 struct fe_priv *np = netdev_priv(dev);
1668 /* If the nic supports hw counters then retrieve latest values */
1669 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3)) {
1670 nv_get_hw_stats(dev);
1672 /* copy to net_device stats */
1673 dev->stats.tx_bytes = np->estats.tx_bytes;
1674 dev->stats.tx_fifo_errors = np->estats.tx_fifo_errors;
1675 dev->stats.tx_carrier_errors = np->estats.tx_carrier_errors;
1676 dev->stats.rx_crc_errors = np->estats.rx_crc_errors;
1677 dev->stats.rx_over_errors = np->estats.rx_over_errors;
1678 dev->stats.rx_errors = np->estats.rx_errors_total;
1679 dev->stats.tx_errors = np->estats.tx_errors_total;
1682 return &dev->stats;
1686 * nv_alloc_rx: fill rx ring entries.
1687 * Return 1 if the allocations for the skbs failed and the
1688 * rx engine is without Available descriptors
1690 static int nv_alloc_rx(struct net_device *dev)
1692 struct fe_priv *np = netdev_priv(dev);
1693 struct ring_desc* less_rx;
1695 less_rx = np->get_rx.orig;
1696 if (less_rx-- == np->first_rx.orig)
1697 less_rx = np->last_rx.orig;
1699 while (np->put_rx.orig != less_rx) {
1700 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
1701 if (skb) {
1702 np->put_rx_ctx->skb = skb;
1703 np->put_rx_ctx->dma = pci_map_single(np->pci_dev,
1704 skb->data,
1705 skb_tailroom(skb),
1706 PCI_DMA_FROMDEVICE);
1707 np->put_rx_ctx->dma_len = skb_tailroom(skb);
1708 np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma);
1709 wmb();
1710 np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL);
1711 if (unlikely(np->put_rx.orig++ == np->last_rx.orig))
1712 np->put_rx.orig = np->first_rx.orig;
1713 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
1714 np->put_rx_ctx = np->first_rx_ctx;
1715 } else {
1716 return 1;
1719 return 0;
1722 static int nv_alloc_rx_optimized(struct net_device *dev)
1724 struct fe_priv *np = netdev_priv(dev);
1725 struct ring_desc_ex* less_rx;
1727 less_rx = np->get_rx.ex;
1728 if (less_rx-- == np->first_rx.ex)
1729 less_rx = np->last_rx.ex;
1731 while (np->put_rx.ex != less_rx) {
1732 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
1733 if (skb) {
1734 np->put_rx_ctx->skb = skb;
1735 np->put_rx_ctx->dma = pci_map_single(np->pci_dev,
1736 skb->data,
1737 skb_tailroom(skb),
1738 PCI_DMA_FROMDEVICE);
1739 np->put_rx_ctx->dma_len = skb_tailroom(skb);
1740 np->put_rx.ex->bufhigh = cpu_to_le32(dma_high(np->put_rx_ctx->dma));
1741 np->put_rx.ex->buflow = cpu_to_le32(dma_low(np->put_rx_ctx->dma));
1742 wmb();
1743 np->put_rx.ex->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL);
1744 if (unlikely(np->put_rx.ex++ == np->last_rx.ex))
1745 np->put_rx.ex = np->first_rx.ex;
1746 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
1747 np->put_rx_ctx = np->first_rx_ctx;
1748 } else {
1749 return 1;
1752 return 0;
1755 /* If rx bufs are exhausted called after 50ms to attempt to refresh */
1756 #ifdef CONFIG_FORCEDETH_NAPI
1757 static void nv_do_rx_refill(unsigned long data)
1759 struct net_device *dev = (struct net_device *) data;
1760 struct fe_priv *np = netdev_priv(dev);
1762 /* Just reschedule NAPI rx processing */
1763 netif_rx_schedule(dev, &np->napi);
1765 #else
1766 static void nv_do_rx_refill(unsigned long data)
1768 struct net_device *dev = (struct net_device *) data;
1769 struct fe_priv *np = netdev_priv(dev);
1770 int retcode;
1772 if (!using_multi_irqs(dev)) {
1773 if (np->msi_flags & NV_MSI_X_ENABLED)
1774 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1775 else
1776 disable_irq(np->pci_dev->irq);
1777 } else {
1778 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1780 if (!nv_optimized(np))
1781 retcode = nv_alloc_rx(dev);
1782 else
1783 retcode = nv_alloc_rx_optimized(dev);
1784 if (retcode) {
1785 spin_lock_irq(&np->lock);
1786 if (!np->in_shutdown)
1787 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
1788 spin_unlock_irq(&np->lock);
1790 if (!using_multi_irqs(dev)) {
1791 if (np->msi_flags & NV_MSI_X_ENABLED)
1792 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1793 else
1794 enable_irq(np->pci_dev->irq);
1795 } else {
1796 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1799 #endif
1801 static void nv_init_rx(struct net_device *dev)
1803 struct fe_priv *np = netdev_priv(dev);
1804 int i;
1806 np->get_rx = np->put_rx = np->first_rx = np->rx_ring;
1808 if (!nv_optimized(np))
1809 np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1];
1810 else
1811 np->last_rx.ex = &np->rx_ring.ex[np->rx_ring_size-1];
1812 np->get_rx_ctx = np->put_rx_ctx = np->first_rx_ctx = np->rx_skb;
1813 np->last_rx_ctx = &np->rx_skb[np->rx_ring_size-1];
1815 for (i = 0; i < np->rx_ring_size; i++) {
1816 if (!nv_optimized(np)) {
1817 np->rx_ring.orig[i].flaglen = 0;
1818 np->rx_ring.orig[i].buf = 0;
1819 } else {
1820 np->rx_ring.ex[i].flaglen = 0;
1821 np->rx_ring.ex[i].txvlan = 0;
1822 np->rx_ring.ex[i].bufhigh = 0;
1823 np->rx_ring.ex[i].buflow = 0;
1825 np->rx_skb[i].skb = NULL;
1826 np->rx_skb[i].dma = 0;
1830 static void nv_init_tx(struct net_device *dev)
1832 struct fe_priv *np = netdev_priv(dev);
1833 int i;
1835 np->get_tx = np->put_tx = np->first_tx = np->tx_ring;
1837 if (!nv_optimized(np))
1838 np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1];
1839 else
1840 np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1];
1841 np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx = np->tx_skb;
1842 np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1];
1843 np->tx_pkts_in_progress = 0;
1844 np->tx_change_owner = NULL;
1845 np->tx_end_flip = NULL;
1847 for (i = 0; i < np->tx_ring_size; i++) {
1848 if (!nv_optimized(np)) {
1849 np->tx_ring.orig[i].flaglen = 0;
1850 np->tx_ring.orig[i].buf = 0;
1851 } else {
1852 np->tx_ring.ex[i].flaglen = 0;
1853 np->tx_ring.ex[i].txvlan = 0;
1854 np->tx_ring.ex[i].bufhigh = 0;
1855 np->tx_ring.ex[i].buflow = 0;
1857 np->tx_skb[i].skb = NULL;
1858 np->tx_skb[i].dma = 0;
1859 np->tx_skb[i].dma_len = 0;
1860 np->tx_skb[i].first_tx_desc = NULL;
1861 np->tx_skb[i].next_tx_ctx = NULL;
1865 static int nv_init_ring(struct net_device *dev)
1867 struct fe_priv *np = netdev_priv(dev);
1869 nv_init_tx(dev);
1870 nv_init_rx(dev);
1872 if (!nv_optimized(np))
1873 return nv_alloc_rx(dev);
1874 else
1875 return nv_alloc_rx_optimized(dev);
1878 static int nv_release_txskb(struct net_device *dev, struct nv_skb_map* tx_skb)
1880 struct fe_priv *np = netdev_priv(dev);
1882 if (tx_skb->dma) {
1883 pci_unmap_page(np->pci_dev, tx_skb->dma,
1884 tx_skb->dma_len,
1885 PCI_DMA_TODEVICE);
1886 tx_skb->dma = 0;
1888 if (tx_skb->skb) {
1889 dev_kfree_skb_any(tx_skb->skb);
1890 tx_skb->skb = NULL;
1891 return 1;
1892 } else {
1893 return 0;
1897 static void nv_drain_tx(struct net_device *dev)
1899 struct fe_priv *np = netdev_priv(dev);
1900 unsigned int i;
1902 for (i = 0; i < np->tx_ring_size; i++) {
1903 if (!nv_optimized(np)) {
1904 np->tx_ring.orig[i].flaglen = 0;
1905 np->tx_ring.orig[i].buf = 0;
1906 } else {
1907 np->tx_ring.ex[i].flaglen = 0;
1908 np->tx_ring.ex[i].txvlan = 0;
1909 np->tx_ring.ex[i].bufhigh = 0;
1910 np->tx_ring.ex[i].buflow = 0;
1912 if (nv_release_txskb(dev, &np->tx_skb[i]))
1913 dev->stats.tx_dropped++;
1914 np->tx_skb[i].dma = 0;
1915 np->tx_skb[i].dma_len = 0;
1916 np->tx_skb[i].first_tx_desc = NULL;
1917 np->tx_skb[i].next_tx_ctx = NULL;
1919 np->tx_pkts_in_progress = 0;
1920 np->tx_change_owner = NULL;
1921 np->tx_end_flip = NULL;
1924 static void nv_drain_rx(struct net_device *dev)
1926 struct fe_priv *np = netdev_priv(dev);
1927 int i;
1929 for (i = 0; i < np->rx_ring_size; i++) {
1930 if (!nv_optimized(np)) {
1931 np->rx_ring.orig[i].flaglen = 0;
1932 np->rx_ring.orig[i].buf = 0;
1933 } else {
1934 np->rx_ring.ex[i].flaglen = 0;
1935 np->rx_ring.ex[i].txvlan = 0;
1936 np->rx_ring.ex[i].bufhigh = 0;
1937 np->rx_ring.ex[i].buflow = 0;
1939 wmb();
1940 if (np->rx_skb[i].skb) {
1941 pci_unmap_single(np->pci_dev, np->rx_skb[i].dma,
1942 (skb_end_pointer(np->rx_skb[i].skb) -
1943 np->rx_skb[i].skb->data),
1944 PCI_DMA_FROMDEVICE);
1945 dev_kfree_skb(np->rx_skb[i].skb);
1946 np->rx_skb[i].skb = NULL;
1951 static void nv_drain_rxtx(struct net_device *dev)
1953 nv_drain_tx(dev);
1954 nv_drain_rx(dev);
1957 static inline u32 nv_get_empty_tx_slots(struct fe_priv *np)
1959 return (u32)(np->tx_ring_size - ((np->tx_ring_size + (np->put_tx_ctx - np->get_tx_ctx)) % np->tx_ring_size));
1962 static void nv_legacybackoff_reseed(struct net_device *dev)
1964 u8 __iomem *base = get_hwbase(dev);
1965 u32 reg;
1966 u32 low;
1967 int tx_status = 0;
1969 reg = readl(base + NvRegSlotTime) & ~NVREG_SLOTTIME_MASK;
1970 get_random_bytes(&low, sizeof(low));
1971 reg |= low & NVREG_SLOTTIME_MASK;
1973 /* Need to stop tx before change takes effect.
1974 * Caller has already gained np->lock.
1976 tx_status = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_START;
1977 if (tx_status)
1978 nv_stop_tx(dev);
1979 nv_stop_rx(dev);
1980 writel(reg, base + NvRegSlotTime);
1981 if (tx_status)
1982 nv_start_tx(dev);
1983 nv_start_rx(dev);
1986 /* Gear Backoff Seeds */
1987 #define BACKOFF_SEEDSET_ROWS 8
1988 #define BACKOFF_SEEDSET_LFSRS 15
1990 /* Known Good seed sets */
1991 static const u32 main_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = {
1992 {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
1993 {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 385, 761, 790, 974},
1994 {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
1995 {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 386, 761, 790, 974},
1996 {266, 265, 276, 585, 397, 208, 345, 355, 365, 376, 385, 396, 771, 700, 984},
1997 {266, 265, 276, 586, 397, 208, 346, 355, 365, 376, 285, 396, 771, 700, 984},
1998 {366, 365, 376, 686, 497, 308, 447, 455, 466, 476, 485, 496, 871, 800, 84},
1999 {466, 465, 476, 786, 597, 408, 547, 555, 566, 576, 585, 597, 971, 900, 184}};
2001 static const u32 gear_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = {
2002 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
2003 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
2004 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 397},
2005 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
2006 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
2007 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
2008 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
2009 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395}};
2011 static void nv_gear_backoff_reseed(struct net_device *dev)
2013 u8 __iomem *base = get_hwbase(dev);
2014 u32 miniseed1, miniseed2, miniseed2_reversed, miniseed3, miniseed3_reversed;
2015 u32 temp, seedset, combinedSeed;
2016 int i;
2018 /* Setup seed for free running LFSR */
2019 /* We are going to read the time stamp counter 3 times
2020 and swizzle bits around to increase randomness */
2021 get_random_bytes(&miniseed1, sizeof(miniseed1));
2022 miniseed1 &= 0x0fff;
2023 if (miniseed1 == 0)
2024 miniseed1 = 0xabc;
2026 get_random_bytes(&miniseed2, sizeof(miniseed2));
2027 miniseed2 &= 0x0fff;
2028 if (miniseed2 == 0)
2029 miniseed2 = 0xabc;
2030 miniseed2_reversed =
2031 ((miniseed2 & 0xF00) >> 8) |
2032 (miniseed2 & 0x0F0) |
2033 ((miniseed2 & 0x00F) << 8);
2035 get_random_bytes(&miniseed3, sizeof(miniseed3));
2036 miniseed3 &= 0x0fff;
2037 if (miniseed3 == 0)
2038 miniseed3 = 0xabc;
2039 miniseed3_reversed =
2040 ((miniseed3 & 0xF00) >> 8) |
2041 (miniseed3 & 0x0F0) |
2042 ((miniseed3 & 0x00F) << 8);
2044 combinedSeed = ((miniseed1 ^ miniseed2_reversed) << 12) |
2045 (miniseed2 ^ miniseed3_reversed);
2047 /* Seeds can not be zero */
2048 if ((combinedSeed & NVREG_BKOFFCTRL_SEED_MASK) == 0)
2049 combinedSeed |= 0x08;
2050 if ((combinedSeed & (NVREG_BKOFFCTRL_SEED_MASK << NVREG_BKOFFCTRL_GEAR)) == 0)
2051 combinedSeed |= 0x8000;
2053 /* No need to disable tx here */
2054 temp = NVREG_BKOFFCTRL_DEFAULT | (0 << NVREG_BKOFFCTRL_SELECT);
2055 temp |= combinedSeed & NVREG_BKOFFCTRL_SEED_MASK;
2056 temp |= combinedSeed >> NVREG_BKOFFCTRL_GEAR;
2057 writel(temp,base + NvRegBackOffControl);
2059 /* Setup seeds for all gear LFSRs. */
2060 get_random_bytes(&seedset, sizeof(seedset));
2061 seedset = seedset % BACKOFF_SEEDSET_ROWS;
2062 for (i = 1; i <= BACKOFF_SEEDSET_LFSRS; i++)
2064 temp = NVREG_BKOFFCTRL_DEFAULT | (i << NVREG_BKOFFCTRL_SELECT);
2065 temp |= main_seedset[seedset][i-1] & 0x3ff;
2066 temp |= ((gear_seedset[seedset][i-1] & 0x3ff) << NVREG_BKOFFCTRL_GEAR);
2067 writel(temp, base + NvRegBackOffControl);
2072 * nv_start_xmit: dev->hard_start_xmit function
2073 * Called with netif_tx_lock held.
2075 static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
2077 struct fe_priv *np = netdev_priv(dev);
2078 u32 tx_flags = 0;
2079 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
2080 unsigned int fragments = skb_shinfo(skb)->nr_frags;
2081 unsigned int i;
2082 u32 offset = 0;
2083 u32 bcnt;
2084 u32 size = skb->len-skb->data_len;
2085 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2086 u32 empty_slots;
2087 struct ring_desc* put_tx;
2088 struct ring_desc* start_tx;
2089 struct ring_desc* prev_tx;
2090 struct nv_skb_map* prev_tx_ctx;
2091 unsigned long flags;
2093 /* add fragments to entries count */
2094 for (i = 0; i < fragments; i++) {
2095 entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) +
2096 ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2099 empty_slots = nv_get_empty_tx_slots(np);
2100 if (unlikely(empty_slots <= entries)) {
2101 spin_lock_irqsave(&np->lock, flags);
2102 netif_stop_queue(dev);
2103 np->tx_stop = 1;
2104 spin_unlock_irqrestore(&np->lock, flags);
2105 return NETDEV_TX_BUSY;
2108 start_tx = put_tx = np->put_tx.orig;
2110 /* setup the header buffer */
2111 do {
2112 prev_tx = put_tx;
2113 prev_tx_ctx = np->put_tx_ctx;
2114 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
2115 np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
2116 PCI_DMA_TODEVICE);
2117 np->put_tx_ctx->dma_len = bcnt;
2118 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
2119 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
2121 tx_flags = np->tx_flags;
2122 offset += bcnt;
2123 size -= bcnt;
2124 if (unlikely(put_tx++ == np->last_tx.orig))
2125 put_tx = np->first_tx.orig;
2126 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
2127 np->put_tx_ctx = np->first_tx_ctx;
2128 } while (size);
2130 /* setup the fragments */
2131 for (i = 0; i < fragments; i++) {
2132 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2133 u32 size = frag->size;
2134 offset = 0;
2136 do {
2137 prev_tx = put_tx;
2138 prev_tx_ctx = np->put_tx_ctx;
2139 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
2140 np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,
2141 PCI_DMA_TODEVICE);
2142 np->put_tx_ctx->dma_len = bcnt;
2143 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
2144 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
2146 offset += bcnt;
2147 size -= bcnt;
2148 if (unlikely(put_tx++ == np->last_tx.orig))
2149 put_tx = np->first_tx.orig;
2150 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
2151 np->put_tx_ctx = np->first_tx_ctx;
2152 } while (size);
2155 /* set last fragment flag */
2156 prev_tx->flaglen |= cpu_to_le32(tx_flags_extra);
2158 /* save skb in this slot's context area */
2159 prev_tx_ctx->skb = skb;
2161 if (skb_is_gso(skb))
2162 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
2163 else
2164 tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
2165 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
2167 spin_lock_irqsave(&np->lock, flags);
2169 /* set tx flags */
2170 start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
2171 np->put_tx.orig = put_tx;
2173 spin_unlock_irqrestore(&np->lock, flags);
2175 dprintk(KERN_DEBUG "%s: nv_start_xmit: entries %d queued for transmission. tx_flags_extra: %x\n",
2176 dev->name, entries, tx_flags_extra);
2178 int j;
2179 for (j=0; j<64; j++) {
2180 if ((j%16) == 0)
2181 dprintk("\n%03x:", j);
2182 dprintk(" %02x", ((unsigned char*)skb->data)[j]);
2184 dprintk("\n");
2187 dev->trans_start = jiffies;
2188 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
2189 return NETDEV_TX_OK;
2192 static int nv_start_xmit_optimized(struct sk_buff *skb, struct net_device *dev)
2194 struct fe_priv *np = netdev_priv(dev);
2195 u32 tx_flags = 0;
2196 u32 tx_flags_extra;
2197 unsigned int fragments = skb_shinfo(skb)->nr_frags;
2198 unsigned int i;
2199 u32 offset = 0;
2200 u32 bcnt;
2201 u32 size = skb->len-skb->data_len;
2202 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2203 u32 empty_slots;
2204 struct ring_desc_ex* put_tx;
2205 struct ring_desc_ex* start_tx;
2206 struct ring_desc_ex* prev_tx;
2207 struct nv_skb_map* prev_tx_ctx;
2208 struct nv_skb_map* start_tx_ctx;
2209 unsigned long flags;
2211 /* add fragments to entries count */
2212 for (i = 0; i < fragments; i++) {
2213 entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) +
2214 ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2217 empty_slots = nv_get_empty_tx_slots(np);
2218 if (unlikely(empty_slots <= entries)) {
2219 spin_lock_irqsave(&np->lock, flags);
2220 netif_stop_queue(dev);
2221 np->tx_stop = 1;
2222 spin_unlock_irqrestore(&np->lock, flags);
2223 return NETDEV_TX_BUSY;
2226 start_tx = put_tx = np->put_tx.ex;
2227 start_tx_ctx = np->put_tx_ctx;
2229 /* setup the header buffer */
2230 do {
2231 prev_tx = put_tx;
2232 prev_tx_ctx = np->put_tx_ctx;
2233 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
2234 np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
2235 PCI_DMA_TODEVICE);
2236 np->put_tx_ctx->dma_len = bcnt;
2237 put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma));
2238 put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma));
2239 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
2241 tx_flags = NV_TX2_VALID;
2242 offset += bcnt;
2243 size -= bcnt;
2244 if (unlikely(put_tx++ == np->last_tx.ex))
2245 put_tx = np->first_tx.ex;
2246 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
2247 np->put_tx_ctx = np->first_tx_ctx;
2248 } while (size);
2250 /* setup the fragments */
2251 for (i = 0; i < fragments; i++) {
2252 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2253 u32 size = frag->size;
2254 offset = 0;
2256 do {
2257 prev_tx = put_tx;
2258 prev_tx_ctx = np->put_tx_ctx;
2259 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
2260 np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,
2261 PCI_DMA_TODEVICE);
2262 np->put_tx_ctx->dma_len = bcnt;
2263 put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma));
2264 put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma));
2265 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
2267 offset += bcnt;
2268 size -= bcnt;
2269 if (unlikely(put_tx++ == np->last_tx.ex))
2270 put_tx = np->first_tx.ex;
2271 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
2272 np->put_tx_ctx = np->first_tx_ctx;
2273 } while (size);
2276 /* set last fragment flag */
2277 prev_tx->flaglen |= cpu_to_le32(NV_TX2_LASTPACKET);
2279 /* save skb in this slot's context area */
2280 prev_tx_ctx->skb = skb;
2282 if (skb_is_gso(skb))
2283 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
2284 else
2285 tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
2286 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
2288 /* vlan tag */
2289 if (likely(!np->vlangrp)) {
2290 start_tx->txvlan = 0;
2291 } else {
2292 if (vlan_tx_tag_present(skb))
2293 start_tx->txvlan = cpu_to_le32(NV_TX3_VLAN_TAG_PRESENT | vlan_tx_tag_get(skb));
2294 else
2295 start_tx->txvlan = 0;
2298 spin_lock_irqsave(&np->lock, flags);
2300 if (np->tx_limit) {
2301 /* Limit the number of outstanding tx. Setup all fragments, but
2302 * do not set the VALID bit on the first descriptor. Save a pointer
2303 * to that descriptor and also for next skb_map element.
2306 if (np->tx_pkts_in_progress == NV_TX_LIMIT_COUNT) {
2307 if (!np->tx_change_owner)
2308 np->tx_change_owner = start_tx_ctx;
2310 /* remove VALID bit */
2311 tx_flags &= ~NV_TX2_VALID;
2312 start_tx_ctx->first_tx_desc = start_tx;
2313 start_tx_ctx->next_tx_ctx = np->put_tx_ctx;
2314 np->tx_end_flip = np->put_tx_ctx;
2315 } else {
2316 np->tx_pkts_in_progress++;
2320 /* set tx flags */
2321 start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
2322 np->put_tx.ex = put_tx;
2324 spin_unlock_irqrestore(&np->lock, flags);
2326 dprintk(KERN_DEBUG "%s: nv_start_xmit_optimized: entries %d queued for transmission. tx_flags_extra: %x\n",
2327 dev->name, entries, tx_flags_extra);
2329 int j;
2330 for (j=0; j<64; j++) {
2331 if ((j%16) == 0)
2332 dprintk("\n%03x:", j);
2333 dprintk(" %02x", ((unsigned char*)skb->data)[j]);
2335 dprintk("\n");
2338 dev->trans_start = jiffies;
2339 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
2340 return NETDEV_TX_OK;
2343 static inline void nv_tx_flip_ownership(struct net_device *dev)
2345 struct fe_priv *np = netdev_priv(dev);
2347 np->tx_pkts_in_progress--;
2348 if (np->tx_change_owner) {
2349 np->tx_change_owner->first_tx_desc->flaglen |=
2350 cpu_to_le32(NV_TX2_VALID);
2351 np->tx_pkts_in_progress++;
2353 np->tx_change_owner = np->tx_change_owner->next_tx_ctx;
2354 if (np->tx_change_owner == np->tx_end_flip)
2355 np->tx_change_owner = NULL;
2357 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
2362 * nv_tx_done: check for completed packets, release the skbs.
2364 * Caller must own np->lock.
2366 static void nv_tx_done(struct net_device *dev)
2368 struct fe_priv *np = netdev_priv(dev);
2369 u32 flags;
2370 struct ring_desc* orig_get_tx = np->get_tx.orig;
2372 while ((np->get_tx.orig != np->put_tx.orig) &&
2373 !((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID)) {
2375 dprintk(KERN_DEBUG "%s: nv_tx_done: flags 0x%x.\n",
2376 dev->name, flags);
2378 pci_unmap_page(np->pci_dev, np->get_tx_ctx->dma,
2379 np->get_tx_ctx->dma_len,
2380 PCI_DMA_TODEVICE);
2381 np->get_tx_ctx->dma = 0;
2383 if (np->desc_ver == DESC_VER_1) {
2384 if (flags & NV_TX_LASTPACKET) {
2385 if (flags & NV_TX_ERROR) {
2386 if (flags & NV_TX_UNDERFLOW)
2387 dev->stats.tx_fifo_errors++;
2388 if (flags & NV_TX_CARRIERLOST)
2389 dev->stats.tx_carrier_errors++;
2390 if ((flags & NV_TX_RETRYERROR) && !(flags & NV_TX_RETRYCOUNT_MASK))
2391 nv_legacybackoff_reseed(dev);
2392 dev->stats.tx_errors++;
2393 } else {
2394 dev->stats.tx_packets++;
2395 dev->stats.tx_bytes += np->get_tx_ctx->skb->len;
2397 dev_kfree_skb_any(np->get_tx_ctx->skb);
2398 np->get_tx_ctx->skb = NULL;
2400 } else {
2401 if (flags & NV_TX2_LASTPACKET) {
2402 if (flags & NV_TX2_ERROR) {
2403 if (flags & NV_TX2_UNDERFLOW)
2404 dev->stats.tx_fifo_errors++;
2405 if (flags & NV_TX2_CARRIERLOST)
2406 dev->stats.tx_carrier_errors++;
2407 if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK))
2408 nv_legacybackoff_reseed(dev);
2409 dev->stats.tx_errors++;
2410 } else {
2411 dev->stats.tx_packets++;
2412 dev->stats.tx_bytes += np->get_tx_ctx->skb->len;
2414 dev_kfree_skb_any(np->get_tx_ctx->skb);
2415 np->get_tx_ctx->skb = NULL;
2418 if (unlikely(np->get_tx.orig++ == np->last_tx.orig))
2419 np->get_tx.orig = np->first_tx.orig;
2420 if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
2421 np->get_tx_ctx = np->first_tx_ctx;
2423 if (unlikely((np->tx_stop == 1) && (np->get_tx.orig != orig_get_tx))) {
2424 np->tx_stop = 0;
2425 netif_wake_queue(dev);
2429 static void nv_tx_done_optimized(struct net_device *dev, int limit)
2431 struct fe_priv *np = netdev_priv(dev);
2432 u32 flags;
2433 struct ring_desc_ex* orig_get_tx = np->get_tx.ex;
2435 while ((np->get_tx.ex != np->put_tx.ex) &&
2436 !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX_VALID) &&
2437 (limit-- > 0)) {
2439 dprintk(KERN_DEBUG "%s: nv_tx_done_optimized: flags 0x%x.\n",
2440 dev->name, flags);
2442 pci_unmap_page(np->pci_dev, np->get_tx_ctx->dma,
2443 np->get_tx_ctx->dma_len,
2444 PCI_DMA_TODEVICE);
2445 np->get_tx_ctx->dma = 0;
2447 if (flags & NV_TX2_LASTPACKET) {
2448 if (!(flags & NV_TX2_ERROR))
2449 dev->stats.tx_packets++;
2450 else {
2451 if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK)) {
2452 if (np->driver_data & DEV_HAS_GEAR_MODE)
2453 nv_gear_backoff_reseed(dev);
2454 else
2455 nv_legacybackoff_reseed(dev);
2459 dev_kfree_skb_any(np->get_tx_ctx->skb);
2460 np->get_tx_ctx->skb = NULL;
2462 if (np->tx_limit) {
2463 nv_tx_flip_ownership(dev);
2466 if (unlikely(np->get_tx.ex++ == np->last_tx.ex))
2467 np->get_tx.ex = np->first_tx.ex;
2468 if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
2469 np->get_tx_ctx = np->first_tx_ctx;
2471 if (unlikely((np->tx_stop == 1) && (np->get_tx.ex != orig_get_tx))) {
2472 np->tx_stop = 0;
2473 netif_wake_queue(dev);
2478 * nv_tx_timeout: dev->tx_timeout function
2479 * Called with netif_tx_lock held.
2481 static void nv_tx_timeout(struct net_device *dev)
2483 struct fe_priv *np = netdev_priv(dev);
2484 u8 __iomem *base = get_hwbase(dev);
2485 u32 status;
2487 if (np->msi_flags & NV_MSI_X_ENABLED)
2488 status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
2489 else
2490 status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
2492 printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name, status);
2495 int i;
2497 printk(KERN_INFO "%s: Ring at %lx\n",
2498 dev->name, (unsigned long)np->ring_addr);
2499 printk(KERN_INFO "%s: Dumping tx registers\n", dev->name);
2500 for (i=0;i<=np->register_size;i+= 32) {
2501 printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
2503 readl(base + i + 0), readl(base + i + 4),
2504 readl(base + i + 8), readl(base + i + 12),
2505 readl(base + i + 16), readl(base + i + 20),
2506 readl(base + i + 24), readl(base + i + 28));
2508 printk(KERN_INFO "%s: Dumping tx ring\n", dev->name);
2509 for (i=0;i<np->tx_ring_size;i+= 4) {
2510 if (!nv_optimized(np)) {
2511 printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
2513 le32_to_cpu(np->tx_ring.orig[i].buf),
2514 le32_to_cpu(np->tx_ring.orig[i].flaglen),
2515 le32_to_cpu(np->tx_ring.orig[i+1].buf),
2516 le32_to_cpu(np->tx_ring.orig[i+1].flaglen),
2517 le32_to_cpu(np->tx_ring.orig[i+2].buf),
2518 le32_to_cpu(np->tx_ring.orig[i+2].flaglen),
2519 le32_to_cpu(np->tx_ring.orig[i+3].buf),
2520 le32_to_cpu(np->tx_ring.orig[i+3].flaglen));
2521 } else {
2522 printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n",
2524 le32_to_cpu(np->tx_ring.ex[i].bufhigh),
2525 le32_to_cpu(np->tx_ring.ex[i].buflow),
2526 le32_to_cpu(np->tx_ring.ex[i].flaglen),
2527 le32_to_cpu(np->tx_ring.ex[i+1].bufhigh),
2528 le32_to_cpu(np->tx_ring.ex[i+1].buflow),
2529 le32_to_cpu(np->tx_ring.ex[i+1].flaglen),
2530 le32_to_cpu(np->tx_ring.ex[i+2].bufhigh),
2531 le32_to_cpu(np->tx_ring.ex[i+2].buflow),
2532 le32_to_cpu(np->tx_ring.ex[i+2].flaglen),
2533 le32_to_cpu(np->tx_ring.ex[i+3].bufhigh),
2534 le32_to_cpu(np->tx_ring.ex[i+3].buflow),
2535 le32_to_cpu(np->tx_ring.ex[i+3].flaglen));
2540 spin_lock_irq(&np->lock);
2542 /* 1) stop tx engine */
2543 nv_stop_tx(dev);
2545 /* 2) check that the packets were not sent already: */
2546 if (!nv_optimized(np))
2547 nv_tx_done(dev);
2548 else
2549 nv_tx_done_optimized(dev, np->tx_ring_size);
2551 /* 3) if there are dead entries: clear everything */
2552 if (np->get_tx_ctx != np->put_tx_ctx) {
2553 printk(KERN_DEBUG "%s: tx_timeout: dead entries!\n", dev->name);
2554 nv_drain_tx(dev);
2555 nv_init_tx(dev);
2556 setup_hw_rings(dev, NV_SETUP_TX_RING);
2559 netif_wake_queue(dev);
2561 /* 4) restart tx engine */
2562 nv_start_tx(dev);
2563 spin_unlock_irq(&np->lock);
2567 * Called when the nic notices a mismatch between the actual data len on the
2568 * wire and the len indicated in the 802 header
2570 static int nv_getlen(struct net_device *dev, void *packet, int datalen)
2572 int hdrlen; /* length of the 802 header */
2573 int protolen; /* length as stored in the proto field */
2575 /* 1) calculate len according to header */
2576 if ( ((struct vlan_ethhdr *)packet)->h_vlan_proto == htons(ETH_P_8021Q)) {
2577 protolen = ntohs( ((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto );
2578 hdrlen = VLAN_HLEN;
2579 } else {
2580 protolen = ntohs( ((struct ethhdr *)packet)->h_proto);
2581 hdrlen = ETH_HLEN;
2583 dprintk(KERN_DEBUG "%s: nv_getlen: datalen %d, protolen %d, hdrlen %d\n",
2584 dev->name, datalen, protolen, hdrlen);
2585 if (protolen > ETH_DATA_LEN)
2586 return datalen; /* Value in proto field not a len, no checks possible */
2588 protolen += hdrlen;
2589 /* consistency checks: */
2590 if (datalen > ETH_ZLEN) {
2591 if (datalen >= protolen) {
2592 /* more data on wire than in 802 header, trim of
2593 * additional data.
2595 dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n",
2596 dev->name, protolen);
2597 return protolen;
2598 } else {
2599 /* less data on wire than mentioned in header.
2600 * Discard the packet.
2602 dprintk(KERN_DEBUG "%s: nv_getlen: discarding long packet.\n",
2603 dev->name);
2604 return -1;
2606 } else {
2607 /* short packet. Accept only if 802 values are also short */
2608 if (protolen > ETH_ZLEN) {
2609 dprintk(KERN_DEBUG "%s: nv_getlen: discarding short packet.\n",
2610 dev->name);
2611 return -1;
2613 dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n",
2614 dev->name, datalen);
2615 return datalen;
2619 static int nv_rx_process(struct net_device *dev, int limit)
2621 struct fe_priv *np = netdev_priv(dev);
2622 u32 flags;
2623 int rx_work = 0;
2624 struct sk_buff *skb;
2625 int len;
2627 while((np->get_rx.orig != np->put_rx.orig) &&
2628 !((flags = le32_to_cpu(np->get_rx.orig->flaglen)) & NV_RX_AVAIL) &&
2629 (rx_work < limit)) {
2631 dprintk(KERN_DEBUG "%s: nv_rx_process: flags 0x%x.\n",
2632 dev->name, flags);
2635 * the packet is for us - immediately tear down the pci mapping.
2636 * TODO: check if a prefetch of the first cacheline improves
2637 * the performance.
2639 pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma,
2640 np->get_rx_ctx->dma_len,
2641 PCI_DMA_FROMDEVICE);
2642 skb = np->get_rx_ctx->skb;
2643 np->get_rx_ctx->skb = NULL;
2646 int j;
2647 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags);
2648 for (j=0; j<64; j++) {
2649 if ((j%16) == 0)
2650 dprintk("\n%03x:", j);
2651 dprintk(" %02x", ((unsigned char*)skb->data)[j]);
2653 dprintk("\n");
2655 /* look at what we actually got: */
2656 if (np->desc_ver == DESC_VER_1) {
2657 if (likely(flags & NV_RX_DESCRIPTORVALID)) {
2658 len = flags & LEN_MASK_V1;
2659 if (unlikely(flags & NV_RX_ERROR)) {
2660 if ((flags & NV_RX_ERROR_MASK) == NV_RX_ERROR4) {
2661 len = nv_getlen(dev, skb->data, len);
2662 if (len < 0) {
2663 dev->stats.rx_errors++;
2664 dev_kfree_skb(skb);
2665 goto next_pkt;
2668 /* framing errors are soft errors */
2669 else if ((flags & NV_RX_ERROR_MASK) == NV_RX_FRAMINGERR) {
2670 if (flags & NV_RX_SUBSTRACT1) {
2671 len--;
2674 /* the rest are hard errors */
2675 else {
2676 if (flags & NV_RX_MISSEDFRAME)
2677 dev->stats.rx_missed_errors++;
2678 if (flags & NV_RX_CRCERR)
2679 dev->stats.rx_crc_errors++;
2680 if (flags & NV_RX_OVERFLOW)
2681 dev->stats.rx_over_errors++;
2682 dev->stats.rx_errors++;
2683 dev_kfree_skb(skb);
2684 goto next_pkt;
2687 } else {
2688 dev_kfree_skb(skb);
2689 goto next_pkt;
2691 } else {
2692 if (likely(flags & NV_RX2_DESCRIPTORVALID)) {
2693 len = flags & LEN_MASK_V2;
2694 if (unlikely(flags & NV_RX2_ERROR)) {
2695 if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) {
2696 len = nv_getlen(dev, skb->data, len);
2697 if (len < 0) {
2698 dev->stats.rx_errors++;
2699 dev_kfree_skb(skb);
2700 goto next_pkt;
2703 /* framing errors are soft errors */
2704 else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
2705 if (flags & NV_RX2_SUBSTRACT1) {
2706 len--;
2709 /* the rest are hard errors */
2710 else {
2711 if (flags & NV_RX2_CRCERR)
2712 dev->stats.rx_crc_errors++;
2713 if (flags & NV_RX2_OVERFLOW)
2714 dev->stats.rx_over_errors++;
2715 dev->stats.rx_errors++;
2716 dev_kfree_skb(skb);
2717 goto next_pkt;
2720 if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */
2721 ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP)) /*ip and udp */
2722 skb->ip_summed = CHECKSUM_UNNECESSARY;
2723 } else {
2724 dev_kfree_skb(skb);
2725 goto next_pkt;
2728 /* got a valid packet - forward it to the network core */
2729 skb_put(skb, len);
2730 skb->protocol = eth_type_trans(skb, dev);
2731 dprintk(KERN_DEBUG "%s: nv_rx_process: %d bytes, proto %d accepted.\n",
2732 dev->name, len, skb->protocol);
2733 #ifdef CONFIG_FORCEDETH_NAPI
2734 netif_receive_skb(skb);
2735 #else
2736 netif_rx(skb);
2737 #endif
2738 dev->last_rx = jiffies;
2739 dev->stats.rx_packets++;
2740 dev->stats.rx_bytes += len;
2741 next_pkt:
2742 if (unlikely(np->get_rx.orig++ == np->last_rx.orig))
2743 np->get_rx.orig = np->first_rx.orig;
2744 if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
2745 np->get_rx_ctx = np->first_rx_ctx;
2747 rx_work++;
2750 return rx_work;
2753 static int nv_rx_process_optimized(struct net_device *dev, int limit)
2755 struct fe_priv *np = netdev_priv(dev);
2756 u32 flags;
2757 u32 vlanflags = 0;
2758 int rx_work = 0;
2759 struct sk_buff *skb;
2760 int len;
2762 while((np->get_rx.ex != np->put_rx.ex) &&
2763 !((flags = le32_to_cpu(np->get_rx.ex->flaglen)) & NV_RX2_AVAIL) &&
2764 (rx_work < limit)) {
2766 dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: flags 0x%x.\n",
2767 dev->name, flags);
2770 * the packet is for us - immediately tear down the pci mapping.
2771 * TODO: check if a prefetch of the first cacheline improves
2772 * the performance.
2774 pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma,
2775 np->get_rx_ctx->dma_len,
2776 PCI_DMA_FROMDEVICE);
2777 skb = np->get_rx_ctx->skb;
2778 np->get_rx_ctx->skb = NULL;
2781 int j;
2782 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags);
2783 for (j=0; j<64; j++) {
2784 if ((j%16) == 0)
2785 dprintk("\n%03x:", j);
2786 dprintk(" %02x", ((unsigned char*)skb->data)[j]);
2788 dprintk("\n");
2790 /* look at what we actually got: */
2791 if (likely(flags & NV_RX2_DESCRIPTORVALID)) {
2792 len = flags & LEN_MASK_V2;
2793 if (unlikely(flags & NV_RX2_ERROR)) {
2794 if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) {
2795 len = nv_getlen(dev, skb->data, len);
2796 if (len < 0) {
2797 dev_kfree_skb(skb);
2798 goto next_pkt;
2801 /* framing errors are soft errors */
2802 else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
2803 if (flags & NV_RX2_SUBSTRACT1) {
2804 len--;
2807 /* the rest are hard errors */
2808 else {
2809 dev_kfree_skb(skb);
2810 goto next_pkt;
2814 if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */
2815 ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP)) /*ip and udp */
2816 skb->ip_summed = CHECKSUM_UNNECESSARY;
2818 /* got a valid packet - forward it to the network core */
2819 skb_put(skb, len);
2820 skb->protocol = eth_type_trans(skb, dev);
2821 prefetch(skb->data);
2823 dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: %d bytes, proto %d accepted.\n",
2824 dev->name, len, skb->protocol);
2826 if (likely(!np->vlangrp)) {
2827 #ifdef CONFIG_FORCEDETH_NAPI
2828 netif_receive_skb(skb);
2829 #else
2830 netif_rx(skb);
2831 #endif
2832 } else {
2833 vlanflags = le32_to_cpu(np->get_rx.ex->buflow);
2834 if (vlanflags & NV_RX3_VLAN_TAG_PRESENT) {
2835 #ifdef CONFIG_FORCEDETH_NAPI
2836 vlan_hwaccel_receive_skb(skb, np->vlangrp,
2837 vlanflags & NV_RX3_VLAN_TAG_MASK);
2838 #else
2839 vlan_hwaccel_rx(skb, np->vlangrp,
2840 vlanflags & NV_RX3_VLAN_TAG_MASK);
2841 #endif
2842 } else {
2843 #ifdef CONFIG_FORCEDETH_NAPI
2844 netif_receive_skb(skb);
2845 #else
2846 netif_rx(skb);
2847 #endif
2851 dev->last_rx = jiffies;
2852 dev->stats.rx_packets++;
2853 dev->stats.rx_bytes += len;
2854 } else {
2855 dev_kfree_skb(skb);
2857 next_pkt:
2858 if (unlikely(np->get_rx.ex++ == np->last_rx.ex))
2859 np->get_rx.ex = np->first_rx.ex;
2860 if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
2861 np->get_rx_ctx = np->first_rx_ctx;
2863 rx_work++;
2866 return rx_work;
2869 static void set_bufsize(struct net_device *dev)
2871 struct fe_priv *np = netdev_priv(dev);
2873 if (dev->mtu <= ETH_DATA_LEN)
2874 np->rx_buf_sz = ETH_DATA_LEN + NV_RX_HEADERS;
2875 else
2876 np->rx_buf_sz = dev->mtu + NV_RX_HEADERS;
2880 * nv_change_mtu: dev->change_mtu function
2881 * Called with dev_base_lock held for read.
2883 static int nv_change_mtu(struct net_device *dev, int new_mtu)
2885 struct fe_priv *np = netdev_priv(dev);
2886 int old_mtu;
2888 if (new_mtu < 64 || new_mtu > np->pkt_limit)
2889 return -EINVAL;
2891 old_mtu = dev->mtu;
2892 dev->mtu = new_mtu;
2894 /* return early if the buffer sizes will not change */
2895 if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
2896 return 0;
2897 if (old_mtu == new_mtu)
2898 return 0;
2900 /* synchronized against open : rtnl_lock() held by caller */
2901 if (netif_running(dev)) {
2902 u8 __iomem *base = get_hwbase(dev);
2904 * It seems that the nic preloads valid ring entries into an
2905 * internal buffer. The procedure for flushing everything is
2906 * guessed, there is probably a simpler approach.
2907 * Changing the MTU is a rare event, it shouldn't matter.
2909 nv_disable_irq(dev);
2910 netif_tx_lock_bh(dev);
2911 netif_addr_lock(dev);
2912 spin_lock(&np->lock);
2913 /* stop engines */
2914 nv_stop_rxtx(dev);
2915 nv_txrx_reset(dev);
2916 /* drain rx queue */
2917 nv_drain_rxtx(dev);
2918 /* reinit driver view of the rx queue */
2919 set_bufsize(dev);
2920 if (nv_init_ring(dev)) {
2921 if (!np->in_shutdown)
2922 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
2924 /* reinit nic view of the rx queue */
2925 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
2926 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
2927 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
2928 base + NvRegRingSizes);
2929 pci_push(base);
2930 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
2931 pci_push(base);
2933 /* restart rx engine */
2934 nv_start_rxtx(dev);
2935 spin_unlock(&np->lock);
2936 netif_addr_unlock(dev);
2937 netif_tx_unlock_bh(dev);
2938 nv_enable_irq(dev);
2940 return 0;
2943 static void nv_copy_mac_to_hw(struct net_device *dev)
2945 u8 __iomem *base = get_hwbase(dev);
2946 u32 mac[2];
2948 mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) +
2949 (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24);
2950 mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8);
2952 writel(mac[0], base + NvRegMacAddrA);
2953 writel(mac[1], base + NvRegMacAddrB);
2957 * nv_set_mac_address: dev->set_mac_address function
2958 * Called with rtnl_lock() held.
2960 static int nv_set_mac_address(struct net_device *dev, void *addr)
2962 struct fe_priv *np = netdev_priv(dev);
2963 struct sockaddr *macaddr = (struct sockaddr*)addr;
2965 if (!is_valid_ether_addr(macaddr->sa_data))
2966 return -EADDRNOTAVAIL;
2968 /* synchronized against open : rtnl_lock() held by caller */
2969 memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN);
2971 if (netif_running(dev)) {
2972 netif_tx_lock_bh(dev);
2973 netif_addr_lock(dev);
2974 spin_lock_irq(&np->lock);
2976 /* stop rx engine */
2977 nv_stop_rx(dev);
2979 /* set mac address */
2980 nv_copy_mac_to_hw(dev);
2982 /* restart rx engine */
2983 nv_start_rx(dev);
2984 spin_unlock_irq(&np->lock);
2985 netif_addr_unlock(dev);
2986 netif_tx_unlock_bh(dev);
2987 } else {
2988 nv_copy_mac_to_hw(dev);
2990 return 0;
2994 * nv_set_multicast: dev->set_multicast function
2995 * Called with netif_tx_lock held.
2997 static void nv_set_multicast(struct net_device *dev)
2999 struct fe_priv *np = netdev_priv(dev);
3000 u8 __iomem *base = get_hwbase(dev);
3001 u32 addr[2];
3002 u32 mask[2];
3003 u32 pff = readl(base + NvRegPacketFilterFlags) & NVREG_PFF_PAUSE_RX;
3005 memset(addr, 0, sizeof(addr));
3006 memset(mask, 0, sizeof(mask));
3008 if (dev->flags & IFF_PROMISC) {
3009 pff |= NVREG_PFF_PROMISC;
3010 } else {
3011 pff |= NVREG_PFF_MYADDR;
3013 if (dev->flags & IFF_ALLMULTI || dev->mc_list) {
3014 u32 alwaysOff[2];
3015 u32 alwaysOn[2];
3017 alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0xffffffff;
3018 if (dev->flags & IFF_ALLMULTI) {
3019 alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0;
3020 } else {
3021 struct dev_mc_list *walk;
3023 walk = dev->mc_list;
3024 while (walk != NULL) {
3025 u32 a, b;
3026 a = le32_to_cpu(*(__le32 *) walk->dmi_addr);
3027 b = le16_to_cpu(*(__le16 *) (&walk->dmi_addr[4]));
3028 alwaysOn[0] &= a;
3029 alwaysOff[0] &= ~a;
3030 alwaysOn[1] &= b;
3031 alwaysOff[1] &= ~b;
3032 walk = walk->next;
3035 addr[0] = alwaysOn[0];
3036 addr[1] = alwaysOn[1];
3037 mask[0] = alwaysOn[0] | alwaysOff[0];
3038 mask[1] = alwaysOn[1] | alwaysOff[1];
3039 } else {
3040 mask[0] = NVREG_MCASTMASKA_NONE;
3041 mask[1] = NVREG_MCASTMASKB_NONE;
3044 addr[0] |= NVREG_MCASTADDRA_FORCE;
3045 pff |= NVREG_PFF_ALWAYS;
3046 spin_lock_irq(&np->lock);
3047 nv_stop_rx(dev);
3048 writel(addr[0], base + NvRegMulticastAddrA);
3049 writel(addr[1], base + NvRegMulticastAddrB);
3050 writel(mask[0], base + NvRegMulticastMaskA);
3051 writel(mask[1], base + NvRegMulticastMaskB);
3052 writel(pff, base + NvRegPacketFilterFlags);
3053 dprintk(KERN_INFO "%s: reconfiguration for multicast lists.\n",
3054 dev->name);
3055 nv_start_rx(dev);
3056 spin_unlock_irq(&np->lock);
3059 static void nv_update_pause(struct net_device *dev, u32 pause_flags)
3061 struct fe_priv *np = netdev_priv(dev);
3062 u8 __iomem *base = get_hwbase(dev);
3064 np->pause_flags &= ~(NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE);
3066 if (np->pause_flags & NV_PAUSEFRAME_RX_CAPABLE) {
3067 u32 pff = readl(base + NvRegPacketFilterFlags) & ~NVREG_PFF_PAUSE_RX;
3068 if (pause_flags & NV_PAUSEFRAME_RX_ENABLE) {
3069 writel(pff|NVREG_PFF_PAUSE_RX, base + NvRegPacketFilterFlags);
3070 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
3071 } else {
3072 writel(pff, base + NvRegPacketFilterFlags);
3075 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) {
3076 u32 regmisc = readl(base + NvRegMisc1) & ~NVREG_MISC1_PAUSE_TX;
3077 if (pause_flags & NV_PAUSEFRAME_TX_ENABLE) {
3078 u32 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V1;
3079 if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V2)
3080 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V2;
3081 if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V3) {
3082 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V3;
3083 /* limit the number of tx pause frames to a default of 8 */
3084 writel(readl(base + NvRegTxPauseFrameLimit)|NVREG_TX_PAUSEFRAMELIMIT_ENABLE, base + NvRegTxPauseFrameLimit);
3086 writel(pause_enable, base + NvRegTxPauseFrame);
3087 writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1);
3088 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3089 } else {
3090 writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame);
3091 writel(regmisc, base + NvRegMisc1);
3097 * nv_update_linkspeed: Setup the MAC according to the link partner
3098 * @dev: Network device to be configured
3100 * The function queries the PHY and checks if there is a link partner.
3101 * If yes, then it sets up the MAC accordingly. Otherwise, the MAC is
3102 * set to 10 MBit HD.
3104 * The function returns 0 if there is no link partner and 1 if there is
3105 * a good link partner.
3107 static int nv_update_linkspeed(struct net_device *dev)
3109 struct fe_priv *np = netdev_priv(dev);
3110 u8 __iomem *base = get_hwbase(dev);
3111 int adv = 0;
3112 int lpa = 0;
3113 int adv_lpa, adv_pause, lpa_pause;
3114 int newls = np->linkspeed;
3115 int newdup = np->duplex;
3116 int mii_status;
3117 int retval = 0;
3118 u32 control_1000, status_1000, phyreg, pause_flags, txreg;
3119 u32 txrxFlags = 0;
3120 u32 phy_exp;
3122 /* BMSR_LSTATUS is latched, read it twice:
3123 * we want the current value.
3125 mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
3126 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
3128 if (!(mii_status & BMSR_LSTATUS)) {
3129 dprintk(KERN_DEBUG "%s: no link detected by phy - falling back to 10HD.\n",
3130 dev->name);
3131 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3132 newdup = 0;
3133 retval = 0;
3134 goto set_speed;
3137 if (np->autoneg == 0) {
3138 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: autoneg off, PHY set to 0x%04x.\n",
3139 dev->name, np->fixed_mode);
3140 if (np->fixed_mode & LPA_100FULL) {
3141 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
3142 newdup = 1;
3143 } else if (np->fixed_mode & LPA_100HALF) {
3144 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
3145 newdup = 0;
3146 } else if (np->fixed_mode & LPA_10FULL) {
3147 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3148 newdup = 1;
3149 } else {
3150 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3151 newdup = 0;
3153 retval = 1;
3154 goto set_speed;
3156 /* check auto negotiation is complete */
3157 if (!(mii_status & BMSR_ANEGCOMPLETE)) {
3158 /* still in autonegotiation - configure nic for 10 MBit HD and wait. */
3159 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3160 newdup = 0;
3161 retval = 0;
3162 dprintk(KERN_DEBUG "%s: autoneg not completed - falling back to 10HD.\n", dev->name);
3163 goto set_speed;
3166 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
3167 lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ);
3168 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: PHY advertises 0x%04x, lpa 0x%04x.\n",
3169 dev->name, adv, lpa);
3171 retval = 1;
3172 if (np->gigabit == PHY_GIGABIT) {
3173 control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
3174 status_1000 = mii_rw(dev, np->phyaddr, MII_STAT1000, MII_READ);
3176 if ((control_1000 & ADVERTISE_1000FULL) &&
3177 (status_1000 & LPA_1000FULL)) {
3178 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: GBit ethernet detected.\n",
3179 dev->name);
3180 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_1000;
3181 newdup = 1;
3182 goto set_speed;
3186 /* FIXME: handle parallel detection properly */
3187 adv_lpa = lpa & adv;
3188 if (adv_lpa & LPA_100FULL) {
3189 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
3190 newdup = 1;
3191 } else if (adv_lpa & LPA_100HALF) {
3192 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
3193 newdup = 0;
3194 } else if (adv_lpa & LPA_10FULL) {
3195 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3196 newdup = 1;
3197 } else if (adv_lpa & LPA_10HALF) {
3198 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3199 newdup = 0;
3200 } else {
3201 dprintk(KERN_DEBUG "%s: bad ability %04x - falling back to 10HD.\n", dev->name, adv_lpa);
3202 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3203 newdup = 0;
3206 set_speed:
3207 if (np->duplex == newdup && np->linkspeed == newls)
3208 return retval;
3210 dprintk(KERN_INFO "%s: changing link setting from %d/%d to %d/%d.\n",
3211 dev->name, np->linkspeed, np->duplex, newls, newdup);
3213 np->duplex = newdup;
3214 np->linkspeed = newls;
3216 /* The transmitter and receiver must be restarted for safe update */
3217 if (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_START) {
3218 txrxFlags |= NV_RESTART_TX;
3219 nv_stop_tx(dev);
3221 if (readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) {
3222 txrxFlags |= NV_RESTART_RX;
3223 nv_stop_rx(dev);
3226 if (np->gigabit == PHY_GIGABIT) {
3227 phyreg = readl(base + NvRegSlotTime);
3228 phyreg &= ~(0x3FF00);
3229 if (((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10) ||
3230 ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100))
3231 phyreg |= NVREG_SLOTTIME_10_100_FULL;
3232 else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000)
3233 phyreg |= NVREG_SLOTTIME_1000_FULL;
3234 writel(phyreg, base + NvRegSlotTime);
3237 phyreg = readl(base + NvRegPhyInterface);
3238 phyreg &= ~(PHY_HALF|PHY_100|PHY_1000);
3239 if (np->duplex == 0)
3240 phyreg |= PHY_HALF;
3241 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100)
3242 phyreg |= PHY_100;
3243 else if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
3244 phyreg |= PHY_1000;
3245 writel(phyreg, base + NvRegPhyInterface);
3247 phy_exp = mii_rw(dev, np->phyaddr, MII_EXPANSION, MII_READ) & EXPANSION_NWAY; /* autoneg capable */
3248 if (phyreg & PHY_RGMII) {
3249 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) {
3250 txreg = NVREG_TX_DEFERRAL_RGMII_1000;
3251 } else {
3252 if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX)) {
3253 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_10)
3254 txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_10;
3255 else
3256 txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_100;
3257 } else {
3258 txreg = NVREG_TX_DEFERRAL_RGMII_10_100;
3261 } else {
3262 if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX))
3263 txreg = NVREG_TX_DEFERRAL_MII_STRETCH;
3264 else
3265 txreg = NVREG_TX_DEFERRAL_DEFAULT;
3267 writel(txreg, base + NvRegTxDeferral);
3269 if (np->desc_ver == DESC_VER_1) {
3270 txreg = NVREG_TX_WM_DESC1_DEFAULT;
3271 } else {
3272 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
3273 txreg = NVREG_TX_WM_DESC2_3_1000;
3274 else
3275 txreg = NVREG_TX_WM_DESC2_3_DEFAULT;
3277 writel(txreg, base + NvRegTxWatermark);
3279 writel(NVREG_MISC1_FORCE | ( np->duplex ? 0 : NVREG_MISC1_HD),
3280 base + NvRegMisc1);
3281 pci_push(base);
3282 writel(np->linkspeed, base + NvRegLinkSpeed);
3283 pci_push(base);
3285 pause_flags = 0;
3286 /* setup pause frame */
3287 if (np->duplex != 0) {
3288 if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) {
3289 adv_pause = adv & (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM);
3290 lpa_pause = lpa & (LPA_PAUSE_CAP| LPA_PAUSE_ASYM);
3292 switch (adv_pause) {
3293 case ADVERTISE_PAUSE_CAP:
3294 if (lpa_pause & LPA_PAUSE_CAP) {
3295 pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
3296 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
3297 pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3299 break;
3300 case ADVERTISE_PAUSE_ASYM:
3301 if (lpa_pause == (LPA_PAUSE_CAP| LPA_PAUSE_ASYM))
3303 pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3305 break;
3306 case ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM:
3307 if (lpa_pause & LPA_PAUSE_CAP)
3309 pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
3310 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
3311 pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3313 if (lpa_pause == LPA_PAUSE_ASYM)
3315 pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
3317 break;
3319 } else {
3320 pause_flags = np->pause_flags;
3323 nv_update_pause(dev, pause_flags);
3325 if (txrxFlags & NV_RESTART_TX)
3326 nv_start_tx(dev);
3327 if (txrxFlags & NV_RESTART_RX)
3328 nv_start_rx(dev);
3330 return retval;
3333 static void nv_linkchange(struct net_device *dev)
3335 if (nv_update_linkspeed(dev)) {
3336 if (!netif_carrier_ok(dev)) {
3337 netif_carrier_on(dev);
3338 printk(KERN_INFO "%s: link up.\n", dev->name);
3339 nv_start_rx(dev);
3341 } else {
3342 if (netif_carrier_ok(dev)) {
3343 netif_carrier_off(dev);
3344 printk(KERN_INFO "%s: link down.\n", dev->name);
3345 nv_stop_rx(dev);
3350 static void nv_link_irq(struct net_device *dev)
3352 u8 __iomem *base = get_hwbase(dev);
3353 u32 miistat;
3355 miistat = readl(base + NvRegMIIStatus);
3356 writel(NVREG_MIISTAT_LINKCHANGE, base + NvRegMIIStatus);
3357 dprintk(KERN_INFO "%s: link change irq, status 0x%x.\n", dev->name, miistat);
3359 if (miistat & (NVREG_MIISTAT_LINKCHANGE))
3360 nv_linkchange(dev);
3361 dprintk(KERN_DEBUG "%s: link change notification done.\n", dev->name);
3364 static void nv_msi_workaround(struct fe_priv *np)
3367 /* Need to toggle the msi irq mask within the ethernet device,
3368 * otherwise, future interrupts will not be detected.
3370 if (np->msi_flags & NV_MSI_ENABLED) {
3371 u8 __iomem *base = np->base;
3373 writel(0, base + NvRegMSIIrqMask);
3374 writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
3378 static irqreturn_t nv_nic_irq(int foo, void *data)
3380 struct net_device *dev = (struct net_device *) data;
3381 struct fe_priv *np = netdev_priv(dev);
3382 u8 __iomem *base = get_hwbase(dev);
3383 u32 events;
3384 int i;
3386 dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name);
3388 for (i=0; ; i++) {
3389 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3390 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
3391 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
3392 } else {
3393 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
3394 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
3396 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
3397 if (!(events & np->irqmask))
3398 break;
3400 nv_msi_workaround(np);
3402 spin_lock(&np->lock);
3403 nv_tx_done(dev);
3404 spin_unlock(&np->lock);
3406 #ifdef CONFIG_FORCEDETH_NAPI
3407 if (events & NVREG_IRQ_RX_ALL) {
3408 netif_rx_schedule(dev, &np->napi);
3410 /* Disable furthur receive irq's */
3411 spin_lock(&np->lock);
3412 np->irqmask &= ~NVREG_IRQ_RX_ALL;
3414 if (np->msi_flags & NV_MSI_X_ENABLED)
3415 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
3416 else
3417 writel(np->irqmask, base + NvRegIrqMask);
3418 spin_unlock(&np->lock);
3420 #else
3421 if (nv_rx_process(dev, RX_WORK_PER_LOOP)) {
3422 if (unlikely(nv_alloc_rx(dev))) {
3423 spin_lock(&np->lock);
3424 if (!np->in_shutdown)
3425 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3426 spin_unlock(&np->lock);
3429 #endif
3430 if (unlikely(events & NVREG_IRQ_LINK)) {
3431 spin_lock(&np->lock);
3432 nv_link_irq(dev);
3433 spin_unlock(&np->lock);
3435 if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
3436 spin_lock(&np->lock);
3437 nv_linkchange(dev);
3438 spin_unlock(&np->lock);
3439 np->link_timeout = jiffies + LINK_TIMEOUT;
3441 if (unlikely(events & (NVREG_IRQ_TX_ERR))) {
3442 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
3443 dev->name, events);
3445 if (unlikely(events & (NVREG_IRQ_UNKNOWN))) {
3446 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n",
3447 dev->name, events);
3449 if (unlikely(events & NVREG_IRQ_RECOVER_ERROR)) {
3450 spin_lock(&np->lock);
3451 /* disable interrupts on the nic */
3452 if (!(np->msi_flags & NV_MSI_X_ENABLED))
3453 writel(0, base + NvRegIrqMask);
3454 else
3455 writel(np->irqmask, base + NvRegIrqMask);
3456 pci_push(base);
3458 if (!np->in_shutdown) {
3459 np->nic_poll_irq = np->irqmask;
3460 np->recover_error = 1;
3461 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3463 spin_unlock(&np->lock);
3464 break;
3466 if (unlikely(i > max_interrupt_work)) {
3467 spin_lock(&np->lock);
3468 /* disable interrupts on the nic */
3469 if (!(np->msi_flags & NV_MSI_X_ENABLED))
3470 writel(0, base + NvRegIrqMask);
3471 else
3472 writel(np->irqmask, base + NvRegIrqMask);
3473 pci_push(base);
3475 if (!np->in_shutdown) {
3476 np->nic_poll_irq = np->irqmask;
3477 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3479 spin_unlock(&np->lock);
3480 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i);
3481 break;
3485 dprintk(KERN_DEBUG "%s: nv_nic_irq completed\n", dev->name);
3487 return IRQ_RETVAL(i);
3491 * All _optimized functions are used to help increase performance
3492 * (reduce CPU and increase throughput). They use descripter version 3,
3493 * compiler directives, and reduce memory accesses.
3495 static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
3497 struct net_device *dev = (struct net_device *) data;
3498 struct fe_priv *np = netdev_priv(dev);
3499 u8 __iomem *base = get_hwbase(dev);
3500 u32 events;
3501 int i;
3503 dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized\n", dev->name);
3505 for (i=0; ; i++) {
3506 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3507 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
3508 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
3509 } else {
3510 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
3511 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
3513 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
3514 if (!(events & np->irqmask))
3515 break;
3517 nv_msi_workaround(np);
3519 spin_lock(&np->lock);
3520 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
3521 spin_unlock(&np->lock);
3523 #ifdef CONFIG_FORCEDETH_NAPI
3524 if (events & NVREG_IRQ_RX_ALL) {
3525 netif_rx_schedule(dev, &np->napi);
3527 /* Disable furthur receive irq's */
3528 spin_lock(&np->lock);
3529 np->irqmask &= ~NVREG_IRQ_RX_ALL;
3531 if (np->msi_flags & NV_MSI_X_ENABLED)
3532 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
3533 else
3534 writel(np->irqmask, base + NvRegIrqMask);
3535 spin_unlock(&np->lock);
3537 #else
3538 if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) {
3539 if (unlikely(nv_alloc_rx_optimized(dev))) {
3540 spin_lock(&np->lock);
3541 if (!np->in_shutdown)
3542 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3543 spin_unlock(&np->lock);
3546 #endif
3547 if (unlikely(events & NVREG_IRQ_LINK)) {
3548 spin_lock(&np->lock);
3549 nv_link_irq(dev);
3550 spin_unlock(&np->lock);
3552 if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
3553 spin_lock(&np->lock);
3554 nv_linkchange(dev);
3555 spin_unlock(&np->lock);
3556 np->link_timeout = jiffies + LINK_TIMEOUT;
3558 if (unlikely(events & (NVREG_IRQ_TX_ERR))) {
3559 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
3560 dev->name, events);
3562 if (unlikely(events & (NVREG_IRQ_UNKNOWN))) {
3563 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n",
3564 dev->name, events);
3566 if (unlikely(events & NVREG_IRQ_RECOVER_ERROR)) {
3567 spin_lock(&np->lock);
3568 /* disable interrupts on the nic */
3569 if (!(np->msi_flags & NV_MSI_X_ENABLED))
3570 writel(0, base + NvRegIrqMask);
3571 else
3572 writel(np->irqmask, base + NvRegIrqMask);
3573 pci_push(base);
3575 if (!np->in_shutdown) {
3576 np->nic_poll_irq = np->irqmask;
3577 np->recover_error = 1;
3578 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3580 spin_unlock(&np->lock);
3581 break;
3584 if (unlikely(i > max_interrupt_work)) {
3585 spin_lock(&np->lock);
3586 /* disable interrupts on the nic */
3587 if (!(np->msi_flags & NV_MSI_X_ENABLED))
3588 writel(0, base + NvRegIrqMask);
3589 else
3590 writel(np->irqmask, base + NvRegIrqMask);
3591 pci_push(base);
3593 if (!np->in_shutdown) {
3594 np->nic_poll_irq = np->irqmask;
3595 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3597 spin_unlock(&np->lock);
3598 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i);
3599 break;
3603 dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized completed\n", dev->name);
3605 return IRQ_RETVAL(i);
3608 static irqreturn_t nv_nic_irq_tx(int foo, void *data)
3610 struct net_device *dev = (struct net_device *) data;
3611 struct fe_priv *np = netdev_priv(dev);
3612 u8 __iomem *base = get_hwbase(dev);
3613 u32 events;
3614 int i;
3615 unsigned long flags;
3617 dprintk(KERN_DEBUG "%s: nv_nic_irq_tx\n", dev->name);
3619 for (i=0; ; i++) {
3620 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL;
3621 writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus);
3622 dprintk(KERN_DEBUG "%s: tx irq: %08x\n", dev->name, events);
3623 if (!(events & np->irqmask))
3624 break;
3626 spin_lock_irqsave(&np->lock, flags);
3627 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
3628 spin_unlock_irqrestore(&np->lock, flags);
3630 if (unlikely(events & (NVREG_IRQ_TX_ERR))) {
3631 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
3632 dev->name, events);
3634 if (unlikely(i > max_interrupt_work)) {
3635 spin_lock_irqsave(&np->lock, flags);
3636 /* disable interrupts on the nic */
3637 writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask);
3638 pci_push(base);
3640 if (!np->in_shutdown) {
3641 np->nic_poll_irq |= NVREG_IRQ_TX_ALL;
3642 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3644 spin_unlock_irqrestore(&np->lock, flags);
3645 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i);
3646 break;
3650 dprintk(KERN_DEBUG "%s: nv_nic_irq_tx completed\n", dev->name);
3652 return IRQ_RETVAL(i);
3655 #ifdef CONFIG_FORCEDETH_NAPI
3656 static int nv_napi_poll(struct napi_struct *napi, int budget)
3658 struct fe_priv *np = container_of(napi, struct fe_priv, napi);
3659 struct net_device *dev = np->dev;
3660 u8 __iomem *base = get_hwbase(dev);
3661 unsigned long flags;
3662 int pkts, retcode;
3664 if (!nv_optimized(np)) {
3665 pkts = nv_rx_process(dev, budget);
3666 retcode = nv_alloc_rx(dev);
3667 } else {
3668 pkts = nv_rx_process_optimized(dev, budget);
3669 retcode = nv_alloc_rx_optimized(dev);
3672 if (retcode) {
3673 spin_lock_irqsave(&np->lock, flags);
3674 if (!np->in_shutdown)
3675 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3676 spin_unlock_irqrestore(&np->lock, flags);
3679 if (pkts < budget) {
3680 /* re-enable receive interrupts */
3681 spin_lock_irqsave(&np->lock, flags);
3683 __netif_rx_complete(dev, napi);
3685 np->irqmask |= NVREG_IRQ_RX_ALL;
3686 if (np->msi_flags & NV_MSI_X_ENABLED)
3687 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
3688 else
3689 writel(np->irqmask, base + NvRegIrqMask);
3691 spin_unlock_irqrestore(&np->lock, flags);
3693 return pkts;
3695 #endif
3697 #ifdef CONFIG_FORCEDETH_NAPI
3698 static irqreturn_t nv_nic_irq_rx(int foo, void *data)
3700 struct net_device *dev = (struct net_device *) data;
3701 struct fe_priv *np = netdev_priv(dev);
3702 u8 __iomem *base = get_hwbase(dev);
3703 u32 events;
3705 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
3706 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus);
3708 if (events) {
3709 netif_rx_schedule(dev, &np->napi);
3710 /* disable receive interrupts on the nic */
3711 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
3712 pci_push(base);
3714 return IRQ_HANDLED;
3716 #else
3717 static irqreturn_t nv_nic_irq_rx(int foo, void *data)
3719 struct net_device *dev = (struct net_device *) data;
3720 struct fe_priv *np = netdev_priv(dev);
3721 u8 __iomem *base = get_hwbase(dev);
3722 u32 events;
3723 int i;
3724 unsigned long flags;
3726 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx\n", dev->name);
3728 for (i=0; ; i++) {
3729 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
3730 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus);
3731 dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events);
3732 if (!(events & np->irqmask))
3733 break;
3735 if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) {
3736 if (unlikely(nv_alloc_rx_optimized(dev))) {
3737 spin_lock_irqsave(&np->lock, flags);
3738 if (!np->in_shutdown)
3739 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3740 spin_unlock_irqrestore(&np->lock, flags);
3744 if (unlikely(i > max_interrupt_work)) {
3745 spin_lock_irqsave(&np->lock, flags);
3746 /* disable interrupts on the nic */
3747 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
3748 pci_push(base);
3750 if (!np->in_shutdown) {
3751 np->nic_poll_irq |= NVREG_IRQ_RX_ALL;
3752 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3754 spin_unlock_irqrestore(&np->lock, flags);
3755 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i);
3756 break;
3759 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx completed\n", dev->name);
3761 return IRQ_RETVAL(i);
3763 #endif
3765 static irqreturn_t nv_nic_irq_other(int foo, void *data)
3767 struct net_device *dev = (struct net_device *) data;
3768 struct fe_priv *np = netdev_priv(dev);
3769 u8 __iomem *base = get_hwbase(dev);
3770 u32 events;
3771 int i;
3772 unsigned long flags;
3774 dprintk(KERN_DEBUG "%s: nv_nic_irq_other\n", dev->name);
3776 for (i=0; ; i++) {
3777 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER;
3778 writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus);
3779 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
3780 if (!(events & np->irqmask))
3781 break;
3783 /* check tx in case we reached max loop limit in tx isr */
3784 spin_lock_irqsave(&np->lock, flags);
3785 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
3786 spin_unlock_irqrestore(&np->lock, flags);
3788 if (events & NVREG_IRQ_LINK) {
3789 spin_lock_irqsave(&np->lock, flags);
3790 nv_link_irq(dev);
3791 spin_unlock_irqrestore(&np->lock, flags);
3793 if (np->need_linktimer && time_after(jiffies, np->link_timeout)) {
3794 spin_lock_irqsave(&np->lock, flags);
3795 nv_linkchange(dev);
3796 spin_unlock_irqrestore(&np->lock, flags);
3797 np->link_timeout = jiffies + LINK_TIMEOUT;
3799 if (events & NVREG_IRQ_RECOVER_ERROR) {
3800 spin_lock_irq(&np->lock);
3801 /* disable interrupts on the nic */
3802 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
3803 pci_push(base);
3805 if (!np->in_shutdown) {
3806 np->nic_poll_irq |= NVREG_IRQ_OTHER;
3807 np->recover_error = 1;
3808 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3810 spin_unlock_irq(&np->lock);
3811 break;
3813 if (events & (NVREG_IRQ_UNKNOWN)) {
3814 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n",
3815 dev->name, events);
3817 if (unlikely(i > max_interrupt_work)) {
3818 spin_lock_irqsave(&np->lock, flags);
3819 /* disable interrupts on the nic */
3820 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
3821 pci_push(base);
3823 if (!np->in_shutdown) {
3824 np->nic_poll_irq |= NVREG_IRQ_OTHER;
3825 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3827 spin_unlock_irqrestore(&np->lock, flags);
3828 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i);
3829 break;
3833 dprintk(KERN_DEBUG "%s: nv_nic_irq_other completed\n", dev->name);
3835 return IRQ_RETVAL(i);
3838 static irqreturn_t nv_nic_irq_test(int foo, void *data)
3840 struct net_device *dev = (struct net_device *) data;
3841 struct fe_priv *np = netdev_priv(dev);
3842 u8 __iomem *base = get_hwbase(dev);
3843 u32 events;
3845 dprintk(KERN_DEBUG "%s: nv_nic_irq_test\n", dev->name);
3847 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3848 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
3849 writel(NVREG_IRQ_TIMER, base + NvRegIrqStatus);
3850 } else {
3851 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
3852 writel(NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus);
3854 pci_push(base);
3855 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
3856 if (!(events & NVREG_IRQ_TIMER))
3857 return IRQ_RETVAL(0);
3859 nv_msi_workaround(np);
3861 spin_lock(&np->lock);
3862 np->intr_test = 1;
3863 spin_unlock(&np->lock);
3865 dprintk(KERN_DEBUG "%s: nv_nic_irq_test completed\n", dev->name);
3867 return IRQ_RETVAL(1);
3870 static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask)
3872 u8 __iomem *base = get_hwbase(dev);
3873 int i;
3874 u32 msixmap = 0;
3876 /* Each interrupt bit can be mapped to a MSIX vector (4 bits).
3877 * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents
3878 * the remaining 8 interrupts.
3880 for (i = 0; i < 8; i++) {
3881 if ((irqmask >> i) & 0x1) {
3882 msixmap |= vector << (i << 2);
3885 writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0);
3887 msixmap = 0;
3888 for (i = 0; i < 8; i++) {
3889 if ((irqmask >> (i + 8)) & 0x1) {
3890 msixmap |= vector << (i << 2);
3893 writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1);
3896 static int nv_request_irq(struct net_device *dev, int intr_test)
3898 struct fe_priv *np = get_nvpriv(dev);
3899 u8 __iomem *base = get_hwbase(dev);
3900 int ret = 1;
3901 int i;
3902 irqreturn_t (*handler)(int foo, void *data);
3904 if (intr_test) {
3905 handler = nv_nic_irq_test;
3906 } else {
3907 if (nv_optimized(np))
3908 handler = nv_nic_irq_optimized;
3909 else
3910 handler = nv_nic_irq;
3913 if (np->msi_flags & NV_MSI_X_CAPABLE) {
3914 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
3915 np->msi_x_entry[i].entry = i;
3917 if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) {
3918 np->msi_flags |= NV_MSI_X_ENABLED;
3919 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) {
3920 /* Request irq for rx handling */
3921 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, IRQF_SHARED, dev->name, dev) != 0) {
3922 printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret);
3923 pci_disable_msix(np->pci_dev);
3924 np->msi_flags &= ~NV_MSI_X_ENABLED;
3925 goto out_err;
3927 /* Request irq for tx handling */
3928 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, IRQF_SHARED, dev->name, dev) != 0) {
3929 printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret);
3930 pci_disable_msix(np->pci_dev);
3931 np->msi_flags &= ~NV_MSI_X_ENABLED;
3932 goto out_free_rx;
3934 /* Request irq for link and timer handling */
3935 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, IRQF_SHARED, dev->name, dev) != 0) {
3936 printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret);
3937 pci_disable_msix(np->pci_dev);
3938 np->msi_flags &= ~NV_MSI_X_ENABLED;
3939 goto out_free_tx;
3941 /* map interrupts to their respective vector */
3942 writel(0, base + NvRegMSIXMap0);
3943 writel(0, base + NvRegMSIXMap1);
3944 set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL);
3945 set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL);
3946 set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER);
3947 } else {
3948 /* Request irq for all interrupts */
3949 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, handler, IRQF_SHARED, dev->name, dev) != 0) {
3950 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
3951 pci_disable_msix(np->pci_dev);
3952 np->msi_flags &= ~NV_MSI_X_ENABLED;
3953 goto out_err;
3956 /* map interrupts to vector 0 */
3957 writel(0, base + NvRegMSIXMap0);
3958 writel(0, base + NvRegMSIXMap1);
3962 if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
3963 if ((ret = pci_enable_msi(np->pci_dev)) == 0) {
3964 np->msi_flags |= NV_MSI_ENABLED;
3965 dev->irq = np->pci_dev->irq;
3966 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) {
3967 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
3968 pci_disable_msi(np->pci_dev);
3969 np->msi_flags &= ~NV_MSI_ENABLED;
3970 dev->irq = np->pci_dev->irq;
3971 goto out_err;
3974 /* map interrupts to vector 0 */
3975 writel(0, base + NvRegMSIMap0);
3976 writel(0, base + NvRegMSIMap1);
3977 /* enable msi vector 0 */
3978 writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
3981 if (ret != 0) {
3982 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0)
3983 goto out_err;
3987 return 0;
3988 out_free_tx:
3989 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev);
3990 out_free_rx:
3991 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev);
3992 out_err:
3993 return 1;
3996 static void nv_free_irq(struct net_device *dev)
3998 struct fe_priv *np = get_nvpriv(dev);
3999 int i;
4001 if (np->msi_flags & NV_MSI_X_ENABLED) {
4002 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
4003 free_irq(np->msi_x_entry[i].vector, dev);
4005 pci_disable_msix(np->pci_dev);
4006 np->msi_flags &= ~NV_MSI_X_ENABLED;
4007 } else {
4008 free_irq(np->pci_dev->irq, dev);
4009 if (np->msi_flags & NV_MSI_ENABLED) {
4010 pci_disable_msi(np->pci_dev);
4011 np->msi_flags &= ~NV_MSI_ENABLED;
4016 static void nv_do_nic_poll(unsigned long data)
4018 struct net_device *dev = (struct net_device *) data;
4019 struct fe_priv *np = netdev_priv(dev);
4020 u8 __iomem *base = get_hwbase(dev);
4021 u32 mask = 0;
4024 * First disable irq(s) and then
4025 * reenable interrupts on the nic, we have to do this before calling
4026 * nv_nic_irq because that may decide to do otherwise
4029 if (!using_multi_irqs(dev)) {
4030 if (np->msi_flags & NV_MSI_X_ENABLED)
4031 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
4032 else
4033 disable_irq_lockdep(np->pci_dev->irq);
4034 mask = np->irqmask;
4035 } else {
4036 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
4037 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
4038 mask |= NVREG_IRQ_RX_ALL;
4040 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
4041 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
4042 mask |= NVREG_IRQ_TX_ALL;
4044 if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
4045 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
4046 mask |= NVREG_IRQ_OTHER;
4049 np->nic_poll_irq = 0;
4051 /* disable_irq() contains synchronize_irq, thus no irq handler can run now */
4053 if (np->recover_error) {
4054 np->recover_error = 0;
4055 printk(KERN_INFO "forcedeth: MAC in recoverable error state\n");
4056 if (netif_running(dev)) {
4057 netif_tx_lock_bh(dev);
4058 netif_addr_lock(dev);
4059 spin_lock(&np->lock);
4060 /* stop engines */
4061 nv_stop_rxtx(dev);
4062 nv_txrx_reset(dev);
4063 /* drain rx queue */
4064 nv_drain_rxtx(dev);
4065 /* reinit driver view of the rx queue */
4066 set_bufsize(dev);
4067 if (nv_init_ring(dev)) {
4068 if (!np->in_shutdown)
4069 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
4071 /* reinit nic view of the rx queue */
4072 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4073 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4074 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4075 base + NvRegRingSizes);
4076 pci_push(base);
4077 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4078 pci_push(base);
4080 /* restart rx engine */
4081 nv_start_rxtx(dev);
4082 spin_unlock(&np->lock);
4083 netif_addr_unlock(dev);
4084 netif_tx_unlock_bh(dev);
4089 writel(mask, base + NvRegIrqMask);
4090 pci_push(base);
4092 if (!using_multi_irqs(dev)) {
4093 if (nv_optimized(np))
4094 nv_nic_irq_optimized(0, dev);
4095 else
4096 nv_nic_irq(0, dev);
4097 if (np->msi_flags & NV_MSI_X_ENABLED)
4098 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
4099 else
4100 enable_irq_lockdep(np->pci_dev->irq);
4101 } else {
4102 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
4103 nv_nic_irq_rx(0, dev);
4104 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
4106 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
4107 nv_nic_irq_tx(0, dev);
4108 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
4110 if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
4111 nv_nic_irq_other(0, dev);
4112 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
4117 #ifdef CONFIG_NET_POLL_CONTROLLER
4118 static void nv_poll_controller(struct net_device *dev)
4120 nv_do_nic_poll((unsigned long) dev);
4122 #endif
4124 static void nv_do_stats_poll(unsigned long data)
4126 struct net_device *dev = (struct net_device *) data;
4127 struct fe_priv *np = netdev_priv(dev);
4129 nv_get_hw_stats(dev);
4131 if (!np->in_shutdown)
4132 mod_timer(&np->stats_poll,
4133 round_jiffies(jiffies + STATS_INTERVAL));
4136 static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4138 struct fe_priv *np = netdev_priv(dev);
4139 strcpy(info->driver, DRV_NAME);
4140 strcpy(info->version, FORCEDETH_VERSION);
4141 strcpy(info->bus_info, pci_name(np->pci_dev));
4144 static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
4146 struct fe_priv *np = netdev_priv(dev);
4147 wolinfo->supported = WAKE_MAGIC;
4149 spin_lock_irq(&np->lock);
4150 if (np->wolenabled)
4151 wolinfo->wolopts = WAKE_MAGIC;
4152 spin_unlock_irq(&np->lock);
4155 static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
4157 struct fe_priv *np = netdev_priv(dev);
4158 u8 __iomem *base = get_hwbase(dev);
4159 u32 flags = 0;
4161 if (wolinfo->wolopts == 0) {
4162 np->wolenabled = 0;
4163 } else if (wolinfo->wolopts & WAKE_MAGIC) {
4164 np->wolenabled = 1;
4165 flags = NVREG_WAKEUPFLAGS_ENABLE;
4167 if (netif_running(dev)) {
4168 spin_lock_irq(&np->lock);
4169 writel(flags, base + NvRegWakeUpFlags);
4170 spin_unlock_irq(&np->lock);
4172 return 0;
4175 static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
4177 struct fe_priv *np = netdev_priv(dev);
4178 int adv;
4180 spin_lock_irq(&np->lock);
4181 ecmd->port = PORT_MII;
4182 if (!netif_running(dev)) {
4183 /* We do not track link speed / duplex setting if the
4184 * interface is disabled. Force a link check */
4185 if (nv_update_linkspeed(dev)) {
4186 if (!netif_carrier_ok(dev))
4187 netif_carrier_on(dev);
4188 } else {
4189 if (netif_carrier_ok(dev))
4190 netif_carrier_off(dev);
4194 if (netif_carrier_ok(dev)) {
4195 switch(np->linkspeed & (NVREG_LINKSPEED_MASK)) {
4196 case NVREG_LINKSPEED_10:
4197 ecmd->speed = SPEED_10;
4198 break;
4199 case NVREG_LINKSPEED_100:
4200 ecmd->speed = SPEED_100;
4201 break;
4202 case NVREG_LINKSPEED_1000:
4203 ecmd->speed = SPEED_1000;
4204 break;
4206 ecmd->duplex = DUPLEX_HALF;
4207 if (np->duplex)
4208 ecmd->duplex = DUPLEX_FULL;
4209 } else {
4210 ecmd->speed = -1;
4211 ecmd->duplex = -1;
4214 ecmd->autoneg = np->autoneg;
4216 ecmd->advertising = ADVERTISED_MII;
4217 if (np->autoneg) {
4218 ecmd->advertising |= ADVERTISED_Autoneg;
4219 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4220 if (adv & ADVERTISE_10HALF)
4221 ecmd->advertising |= ADVERTISED_10baseT_Half;
4222 if (adv & ADVERTISE_10FULL)
4223 ecmd->advertising |= ADVERTISED_10baseT_Full;
4224 if (adv & ADVERTISE_100HALF)
4225 ecmd->advertising |= ADVERTISED_100baseT_Half;
4226 if (adv & ADVERTISE_100FULL)
4227 ecmd->advertising |= ADVERTISED_100baseT_Full;
4228 if (np->gigabit == PHY_GIGABIT) {
4229 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
4230 if (adv & ADVERTISE_1000FULL)
4231 ecmd->advertising |= ADVERTISED_1000baseT_Full;
4234 ecmd->supported = (SUPPORTED_Autoneg |
4235 SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
4236 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
4237 SUPPORTED_MII);
4238 if (np->gigabit == PHY_GIGABIT)
4239 ecmd->supported |= SUPPORTED_1000baseT_Full;
4241 ecmd->phy_address = np->phyaddr;
4242 ecmd->transceiver = XCVR_EXTERNAL;
4244 /* ignore maxtxpkt, maxrxpkt for now */
4245 spin_unlock_irq(&np->lock);
4246 return 0;
4249 static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
4251 struct fe_priv *np = netdev_priv(dev);
4253 if (ecmd->port != PORT_MII)
4254 return -EINVAL;
4255 if (ecmd->transceiver != XCVR_EXTERNAL)
4256 return -EINVAL;
4257 if (ecmd->phy_address != np->phyaddr) {
4258 /* TODO: support switching between multiple phys. Should be
4259 * trivial, but not enabled due to lack of test hardware. */
4260 return -EINVAL;
4262 if (ecmd->autoneg == AUTONEG_ENABLE) {
4263 u32 mask;
4265 mask = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
4266 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full;
4267 if (np->gigabit == PHY_GIGABIT)
4268 mask |= ADVERTISED_1000baseT_Full;
4270 if ((ecmd->advertising & mask) == 0)
4271 return -EINVAL;
4273 } else if (ecmd->autoneg == AUTONEG_DISABLE) {
4274 /* Note: autonegotiation disable, speed 1000 intentionally
4275 * forbidden - noone should need that. */
4277 if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100)
4278 return -EINVAL;
4279 if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
4280 return -EINVAL;
4281 } else {
4282 return -EINVAL;
4285 netif_carrier_off(dev);
4286 if (netif_running(dev)) {
4287 unsigned long flags;
4289 nv_disable_irq(dev);
4290 netif_tx_lock_bh(dev);
4291 netif_addr_lock(dev);
4292 /* with plain spinlock lockdep complains */
4293 spin_lock_irqsave(&np->lock, flags);
4294 /* stop engines */
4295 /* FIXME:
4296 * this can take some time, and interrupts are disabled
4297 * due to spin_lock_irqsave, but let's hope no daemon
4298 * is going to change the settings very often...
4299 * Worst case:
4300 * NV_RXSTOP_DELAY1MAX + NV_TXSTOP_DELAY1MAX
4301 * + some minor delays, which is up to a second approximately
4303 nv_stop_rxtx(dev);
4304 spin_unlock_irqrestore(&np->lock, flags);
4305 netif_addr_unlock(dev);
4306 netif_tx_unlock_bh(dev);
4309 if (ecmd->autoneg == AUTONEG_ENABLE) {
4310 int adv, bmcr;
4312 np->autoneg = 1;
4314 /* advertise only what has been requested */
4315 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4316 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
4317 if (ecmd->advertising & ADVERTISED_10baseT_Half)
4318 adv |= ADVERTISE_10HALF;
4319 if (ecmd->advertising & ADVERTISED_10baseT_Full)
4320 adv |= ADVERTISE_10FULL;
4321 if (ecmd->advertising & ADVERTISED_100baseT_Half)
4322 adv |= ADVERTISE_100HALF;
4323 if (ecmd->advertising & ADVERTISED_100baseT_Full)
4324 adv |= ADVERTISE_100FULL;
4325 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */
4326 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4327 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
4328 adv |= ADVERTISE_PAUSE_ASYM;
4329 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
4331 if (np->gigabit == PHY_GIGABIT) {
4332 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
4333 adv &= ~ADVERTISE_1000FULL;
4334 if (ecmd->advertising & ADVERTISED_1000baseT_Full)
4335 adv |= ADVERTISE_1000FULL;
4336 mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
4339 if (netif_running(dev))
4340 printk(KERN_INFO "%s: link down.\n", dev->name);
4341 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4342 if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
4343 bmcr |= BMCR_ANENABLE;
4344 /* reset the phy in order for settings to stick,
4345 * and cause autoneg to start */
4346 if (phy_reset(dev, bmcr)) {
4347 printk(KERN_INFO "%s: phy reset failed\n", dev->name);
4348 return -EINVAL;
4350 } else {
4351 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
4352 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4354 } else {
4355 int adv, bmcr;
4357 np->autoneg = 0;
4359 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4360 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
4361 if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF)
4362 adv |= ADVERTISE_10HALF;
4363 if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL)
4364 adv |= ADVERTISE_10FULL;
4365 if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF)
4366 adv |= ADVERTISE_100HALF;
4367 if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL)
4368 adv |= ADVERTISE_100FULL;
4369 np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
4370 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) {/* for rx we set both advertisments but disable tx pause */
4371 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4372 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
4374 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) {
4375 adv |= ADVERTISE_PAUSE_ASYM;
4376 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
4378 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
4379 np->fixed_mode = adv;
4381 if (np->gigabit == PHY_GIGABIT) {
4382 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
4383 adv &= ~ADVERTISE_1000FULL;
4384 mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
4387 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4388 bmcr &= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_SPEED1000|BMCR_FULLDPLX);
4389 if (np->fixed_mode & (ADVERTISE_10FULL|ADVERTISE_100FULL))
4390 bmcr |= BMCR_FULLDPLX;
4391 if (np->fixed_mode & (ADVERTISE_100HALF|ADVERTISE_100FULL))
4392 bmcr |= BMCR_SPEED100;
4393 if (np->phy_oui == PHY_OUI_MARVELL) {
4394 /* reset the phy in order for forced mode settings to stick */
4395 if (phy_reset(dev, bmcr)) {
4396 printk(KERN_INFO "%s: phy reset failed\n", dev->name);
4397 return -EINVAL;
4399 } else {
4400 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4401 if (netif_running(dev)) {
4402 /* Wait a bit and then reconfigure the nic. */
4403 udelay(10);
4404 nv_linkchange(dev);
4409 if (netif_running(dev)) {
4410 nv_start_rxtx(dev);
4411 nv_enable_irq(dev);
4414 return 0;
4417 #define FORCEDETH_REGS_VER 1
4419 static int nv_get_regs_len(struct net_device *dev)
4421 struct fe_priv *np = netdev_priv(dev);
4422 return np->register_size;
4425 static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
4427 struct fe_priv *np = netdev_priv(dev);
4428 u8 __iomem *base = get_hwbase(dev);
4429 u32 *rbuf = buf;
4430 int i;
4432 regs->version = FORCEDETH_REGS_VER;
4433 spin_lock_irq(&np->lock);
4434 for (i = 0;i <= np->register_size/sizeof(u32); i++)
4435 rbuf[i] = readl(base + i*sizeof(u32));
4436 spin_unlock_irq(&np->lock);
4439 static int nv_nway_reset(struct net_device *dev)
4441 struct fe_priv *np = netdev_priv(dev);
4442 int ret;
4444 if (np->autoneg) {
4445 int bmcr;
4447 netif_carrier_off(dev);
4448 if (netif_running(dev)) {
4449 nv_disable_irq(dev);
4450 netif_tx_lock_bh(dev);
4451 netif_addr_lock(dev);
4452 spin_lock(&np->lock);
4453 /* stop engines */
4454 nv_stop_rxtx(dev);
4455 spin_unlock(&np->lock);
4456 netif_addr_unlock(dev);
4457 netif_tx_unlock_bh(dev);
4458 printk(KERN_INFO "%s: link down.\n", dev->name);
4461 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4462 if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
4463 bmcr |= BMCR_ANENABLE;
4464 /* reset the phy in order for settings to stick*/
4465 if (phy_reset(dev, bmcr)) {
4466 printk(KERN_INFO "%s: phy reset failed\n", dev->name);
4467 return -EINVAL;
4469 } else {
4470 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
4471 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4474 if (netif_running(dev)) {
4475 nv_start_rxtx(dev);
4476 nv_enable_irq(dev);
4478 ret = 0;
4479 } else {
4480 ret = -EINVAL;
4483 return ret;
4486 static int nv_set_tso(struct net_device *dev, u32 value)
4488 struct fe_priv *np = netdev_priv(dev);
4490 if ((np->driver_data & DEV_HAS_CHECKSUM))
4491 return ethtool_op_set_tso(dev, value);
4492 else
4493 return -EOPNOTSUPP;
4496 static void nv_get_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
4498 struct fe_priv *np = netdev_priv(dev);
4500 ring->rx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
4501 ring->rx_mini_max_pending = 0;
4502 ring->rx_jumbo_max_pending = 0;
4503 ring->tx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
4505 ring->rx_pending = np->rx_ring_size;
4506 ring->rx_mini_pending = 0;
4507 ring->rx_jumbo_pending = 0;
4508 ring->tx_pending = np->tx_ring_size;
4511 static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
4513 struct fe_priv *np = netdev_priv(dev);
4514 u8 __iomem *base = get_hwbase(dev);
4515 u8 *rxtx_ring, *rx_skbuff, *tx_skbuff;
4516 dma_addr_t ring_addr;
4518 if (ring->rx_pending < RX_RING_MIN ||
4519 ring->tx_pending < TX_RING_MIN ||
4520 ring->rx_mini_pending != 0 ||
4521 ring->rx_jumbo_pending != 0 ||
4522 (np->desc_ver == DESC_VER_1 &&
4523 (ring->rx_pending > RING_MAX_DESC_VER_1 ||
4524 ring->tx_pending > RING_MAX_DESC_VER_1)) ||
4525 (np->desc_ver != DESC_VER_1 &&
4526 (ring->rx_pending > RING_MAX_DESC_VER_2_3 ||
4527 ring->tx_pending > RING_MAX_DESC_VER_2_3))) {
4528 return -EINVAL;
4531 /* allocate new rings */
4532 if (!nv_optimized(np)) {
4533 rxtx_ring = pci_alloc_consistent(np->pci_dev,
4534 sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
4535 &ring_addr);
4536 } else {
4537 rxtx_ring = pci_alloc_consistent(np->pci_dev,
4538 sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
4539 &ring_addr);
4541 rx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->rx_pending, GFP_KERNEL);
4542 tx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->tx_pending, GFP_KERNEL);
4543 if (!rxtx_ring || !rx_skbuff || !tx_skbuff) {
4544 /* fall back to old rings */
4545 if (!nv_optimized(np)) {
4546 if (rxtx_ring)
4547 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
4548 rxtx_ring, ring_addr);
4549 } else {
4550 if (rxtx_ring)
4551 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
4552 rxtx_ring, ring_addr);
4554 if (rx_skbuff)
4555 kfree(rx_skbuff);
4556 if (tx_skbuff)
4557 kfree(tx_skbuff);
4558 goto exit;
4561 if (netif_running(dev)) {
4562 nv_disable_irq(dev);
4563 netif_tx_lock_bh(dev);
4564 netif_addr_lock(dev);
4565 spin_lock(&np->lock);
4566 /* stop engines */
4567 nv_stop_rxtx(dev);
4568 nv_txrx_reset(dev);
4569 /* drain queues */
4570 nv_drain_rxtx(dev);
4571 /* delete queues */
4572 free_rings(dev);
4575 /* set new values */
4576 np->rx_ring_size = ring->rx_pending;
4577 np->tx_ring_size = ring->tx_pending;
4579 if (!nv_optimized(np)) {
4580 np->rx_ring.orig = (struct ring_desc*)rxtx_ring;
4581 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
4582 } else {
4583 np->rx_ring.ex = (struct ring_desc_ex*)rxtx_ring;
4584 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
4586 np->rx_skb = (struct nv_skb_map*)rx_skbuff;
4587 np->tx_skb = (struct nv_skb_map*)tx_skbuff;
4588 np->ring_addr = ring_addr;
4590 memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size);
4591 memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size);
4593 if (netif_running(dev)) {
4594 /* reinit driver view of the queues */
4595 set_bufsize(dev);
4596 if (nv_init_ring(dev)) {
4597 if (!np->in_shutdown)
4598 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
4601 /* reinit nic view of the queues */
4602 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4603 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4604 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4605 base + NvRegRingSizes);
4606 pci_push(base);
4607 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4608 pci_push(base);
4610 /* restart engines */
4611 nv_start_rxtx(dev);
4612 spin_unlock(&np->lock);
4613 netif_addr_unlock(dev);
4614 netif_tx_unlock_bh(dev);
4615 nv_enable_irq(dev);
4617 return 0;
4618 exit:
4619 return -ENOMEM;
4622 static void nv_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
4624 struct fe_priv *np = netdev_priv(dev);
4626 pause->autoneg = (np->pause_flags & NV_PAUSEFRAME_AUTONEG) != 0;
4627 pause->rx_pause = (np->pause_flags & NV_PAUSEFRAME_RX_ENABLE) != 0;
4628 pause->tx_pause = (np->pause_flags & NV_PAUSEFRAME_TX_ENABLE) != 0;
4631 static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
4633 struct fe_priv *np = netdev_priv(dev);
4634 int adv, bmcr;
4636 if ((!np->autoneg && np->duplex == 0) ||
4637 (np->autoneg && !pause->autoneg && np->duplex == 0)) {
4638 printk(KERN_INFO "%s: can not set pause settings when forced link is in half duplex.\n",
4639 dev->name);
4640 return -EINVAL;
4642 if (pause->tx_pause && !(np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)) {
4643 printk(KERN_INFO "%s: hardware does not support tx pause frames.\n", dev->name);
4644 return -EINVAL;
4647 netif_carrier_off(dev);
4648 if (netif_running(dev)) {
4649 nv_disable_irq(dev);
4650 netif_tx_lock_bh(dev);
4651 netif_addr_lock(dev);
4652 spin_lock(&np->lock);
4653 /* stop engines */
4654 nv_stop_rxtx(dev);
4655 spin_unlock(&np->lock);
4656 netif_addr_unlock(dev);
4657 netif_tx_unlock_bh(dev);
4660 np->pause_flags &= ~(NV_PAUSEFRAME_RX_REQ|NV_PAUSEFRAME_TX_REQ);
4661 if (pause->rx_pause)
4662 np->pause_flags |= NV_PAUSEFRAME_RX_REQ;
4663 if (pause->tx_pause)
4664 np->pause_flags |= NV_PAUSEFRAME_TX_REQ;
4666 if (np->autoneg && pause->autoneg) {
4667 np->pause_flags |= NV_PAUSEFRAME_AUTONEG;
4669 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4670 adv &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
4671 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */
4672 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4673 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
4674 adv |= ADVERTISE_PAUSE_ASYM;
4675 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
4677 if (netif_running(dev))
4678 printk(KERN_INFO "%s: link down.\n", dev->name);
4679 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4680 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
4681 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4682 } else {
4683 np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
4684 if (pause->rx_pause)
4685 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
4686 if (pause->tx_pause)
4687 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
4689 if (!netif_running(dev))
4690 nv_update_linkspeed(dev);
4691 else
4692 nv_update_pause(dev, np->pause_flags);
4695 if (netif_running(dev)) {
4696 nv_start_rxtx(dev);
4697 nv_enable_irq(dev);
4699 return 0;
4702 static u32 nv_get_rx_csum(struct net_device *dev)
4704 struct fe_priv *np = netdev_priv(dev);
4705 return (np->rx_csum) != 0;
4708 static int nv_set_rx_csum(struct net_device *dev, u32 data)
4710 struct fe_priv *np = netdev_priv(dev);
4711 u8 __iomem *base = get_hwbase(dev);
4712 int retcode = 0;
4714 if (np->driver_data & DEV_HAS_CHECKSUM) {
4715 if (data) {
4716 np->rx_csum = 1;
4717 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
4718 } else {
4719 np->rx_csum = 0;
4720 /* vlan is dependent on rx checksum offload */
4721 if (!(np->vlanctl_bits & NVREG_VLANCONTROL_ENABLE))
4722 np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK;
4724 if (netif_running(dev)) {
4725 spin_lock_irq(&np->lock);
4726 writel(np->txrxctl_bits, base + NvRegTxRxControl);
4727 spin_unlock_irq(&np->lock);
4729 } else {
4730 return -EINVAL;
4733 return retcode;
4736 static int nv_set_tx_csum(struct net_device *dev, u32 data)
4738 struct fe_priv *np = netdev_priv(dev);
4740 if (np->driver_data & DEV_HAS_CHECKSUM)
4741 return ethtool_op_set_tx_hw_csum(dev, data);
4742 else
4743 return -EOPNOTSUPP;
4746 static int nv_set_sg(struct net_device *dev, u32 data)
4748 struct fe_priv *np = netdev_priv(dev);
4750 if (np->driver_data & DEV_HAS_CHECKSUM)
4751 return ethtool_op_set_sg(dev, data);
4752 else
4753 return -EOPNOTSUPP;
4756 static int nv_get_sset_count(struct net_device *dev, int sset)
4758 struct fe_priv *np = netdev_priv(dev);
4760 switch (sset) {
4761 case ETH_SS_TEST:
4762 if (np->driver_data & DEV_HAS_TEST_EXTENDED)
4763 return NV_TEST_COUNT_EXTENDED;
4764 else
4765 return NV_TEST_COUNT_BASE;
4766 case ETH_SS_STATS:
4767 if (np->driver_data & DEV_HAS_STATISTICS_V1)
4768 return NV_DEV_STATISTICS_V1_COUNT;
4769 else if (np->driver_data & DEV_HAS_STATISTICS_V2)
4770 return NV_DEV_STATISTICS_V2_COUNT;
4771 else if (np->driver_data & DEV_HAS_STATISTICS_V3)
4772 return NV_DEV_STATISTICS_V3_COUNT;
4773 else
4774 return 0;
4775 default:
4776 return -EOPNOTSUPP;
4780 static void nv_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *estats, u64 *buffer)
4782 struct fe_priv *np = netdev_priv(dev);
4784 /* update stats */
4785 nv_do_stats_poll((unsigned long)dev);
4787 memcpy(buffer, &np->estats, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(u64));
4790 static int nv_link_test(struct net_device *dev)
4792 struct fe_priv *np = netdev_priv(dev);
4793 int mii_status;
4795 mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
4796 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
4798 /* check phy link status */
4799 if (!(mii_status & BMSR_LSTATUS))
4800 return 0;
4801 else
4802 return 1;
4805 static int nv_register_test(struct net_device *dev)
4807 u8 __iomem *base = get_hwbase(dev);
4808 int i = 0;
4809 u32 orig_read, new_read;
4811 do {
4812 orig_read = readl(base + nv_registers_test[i].reg);
4814 /* xor with mask to toggle bits */
4815 orig_read ^= nv_registers_test[i].mask;
4817 writel(orig_read, base + nv_registers_test[i].reg);
4819 new_read = readl(base + nv_registers_test[i].reg);
4821 if ((new_read & nv_registers_test[i].mask) != (orig_read & nv_registers_test[i].mask))
4822 return 0;
4824 /* restore original value */
4825 orig_read ^= nv_registers_test[i].mask;
4826 writel(orig_read, base + nv_registers_test[i].reg);
4828 } while (nv_registers_test[++i].reg != 0);
4830 return 1;
4833 static int nv_interrupt_test(struct net_device *dev)
4835 struct fe_priv *np = netdev_priv(dev);
4836 u8 __iomem *base = get_hwbase(dev);
4837 int ret = 1;
4838 int testcnt;
4839 u32 save_msi_flags, save_poll_interval = 0;
4841 if (netif_running(dev)) {
4842 /* free current irq */
4843 nv_free_irq(dev);
4844 save_poll_interval = readl(base+NvRegPollingInterval);
4847 /* flag to test interrupt handler */
4848 np->intr_test = 0;
4850 /* setup test irq */
4851 save_msi_flags = np->msi_flags;
4852 np->msi_flags &= ~NV_MSI_X_VECTORS_MASK;
4853 np->msi_flags |= 0x001; /* setup 1 vector */
4854 if (nv_request_irq(dev, 1))
4855 return 0;
4857 /* setup timer interrupt */
4858 writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
4859 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
4861 nv_enable_hw_interrupts(dev, NVREG_IRQ_TIMER);
4863 /* wait for at least one interrupt */
4864 msleep(100);
4866 spin_lock_irq(&np->lock);
4868 /* flag should be set within ISR */
4869 testcnt = np->intr_test;
4870 if (!testcnt)
4871 ret = 2;
4873 nv_disable_hw_interrupts(dev, NVREG_IRQ_TIMER);
4874 if (!(np->msi_flags & NV_MSI_X_ENABLED))
4875 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
4876 else
4877 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
4879 spin_unlock_irq(&np->lock);
4881 nv_free_irq(dev);
4883 np->msi_flags = save_msi_flags;
4885 if (netif_running(dev)) {
4886 writel(save_poll_interval, base + NvRegPollingInterval);
4887 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
4888 /* restore original irq */
4889 if (nv_request_irq(dev, 0))
4890 return 0;
4893 return ret;
4896 static int nv_loopback_test(struct net_device *dev)
4898 struct fe_priv *np = netdev_priv(dev);
4899 u8 __iomem *base = get_hwbase(dev);
4900 struct sk_buff *tx_skb, *rx_skb;
4901 dma_addr_t test_dma_addr;
4902 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
4903 u32 flags;
4904 int len, i, pkt_len;
4905 u8 *pkt_data;
4906 u32 filter_flags = 0;
4907 u32 misc1_flags = 0;
4908 int ret = 1;
4910 if (netif_running(dev)) {
4911 nv_disable_irq(dev);
4912 filter_flags = readl(base + NvRegPacketFilterFlags);
4913 misc1_flags = readl(base + NvRegMisc1);
4914 } else {
4915 nv_txrx_reset(dev);
4918 /* reinit driver view of the rx queue */
4919 set_bufsize(dev);
4920 nv_init_ring(dev);
4922 /* setup hardware for loopback */
4923 writel(NVREG_MISC1_FORCE, base + NvRegMisc1);
4924 writel(NVREG_PFF_ALWAYS | NVREG_PFF_LOOPBACK, base + NvRegPacketFilterFlags);
4926 /* reinit nic view of the rx queue */
4927 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4928 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4929 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4930 base + NvRegRingSizes);
4931 pci_push(base);
4933 /* restart rx engine */
4934 nv_start_rxtx(dev);
4936 /* setup packet for tx */
4937 pkt_len = ETH_DATA_LEN;
4938 tx_skb = dev_alloc_skb(pkt_len);
4939 if (!tx_skb) {
4940 printk(KERN_ERR "dev_alloc_skb() failed during loopback test"
4941 " of %s\n", dev->name);
4942 ret = 0;
4943 goto out;
4945 test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data,
4946 skb_tailroom(tx_skb),
4947 PCI_DMA_FROMDEVICE);
4948 pkt_data = skb_put(tx_skb, pkt_len);
4949 for (i = 0; i < pkt_len; i++)
4950 pkt_data[i] = (u8)(i & 0xff);
4952 if (!nv_optimized(np)) {
4953 np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr);
4954 np->tx_ring.orig[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
4955 } else {
4956 np->tx_ring.ex[0].bufhigh = cpu_to_le32(dma_high(test_dma_addr));
4957 np->tx_ring.ex[0].buflow = cpu_to_le32(dma_low(test_dma_addr));
4958 np->tx_ring.ex[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
4960 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4961 pci_push(get_hwbase(dev));
4963 msleep(500);
4965 /* check for rx of the packet */
4966 if (!nv_optimized(np)) {
4967 flags = le32_to_cpu(np->rx_ring.orig[0].flaglen);
4968 len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver);
4970 } else {
4971 flags = le32_to_cpu(np->rx_ring.ex[0].flaglen);
4972 len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver);
4975 if (flags & NV_RX_AVAIL) {
4976 ret = 0;
4977 } else if (np->desc_ver == DESC_VER_1) {
4978 if (flags & NV_RX_ERROR)
4979 ret = 0;
4980 } else {
4981 if (flags & NV_RX2_ERROR) {
4982 ret = 0;
4986 if (ret) {
4987 if (len != pkt_len) {
4988 ret = 0;
4989 dprintk(KERN_DEBUG "%s: loopback len mismatch %d vs %d\n",
4990 dev->name, len, pkt_len);
4991 } else {
4992 rx_skb = np->rx_skb[0].skb;
4993 for (i = 0; i < pkt_len; i++) {
4994 if (rx_skb->data[i] != (u8)(i & 0xff)) {
4995 ret = 0;
4996 dprintk(KERN_DEBUG "%s: loopback pattern check failed on byte %d\n",
4997 dev->name, i);
4998 break;
5002 } else {
5003 dprintk(KERN_DEBUG "%s: loopback - did not receive test packet\n", dev->name);
5006 pci_unmap_page(np->pci_dev, test_dma_addr,
5007 (skb_end_pointer(tx_skb) - tx_skb->data),
5008 PCI_DMA_TODEVICE);
5009 dev_kfree_skb_any(tx_skb);
5010 out:
5011 /* stop engines */
5012 nv_stop_rxtx(dev);
5013 nv_txrx_reset(dev);
5014 /* drain rx queue */
5015 nv_drain_rxtx(dev);
5017 if (netif_running(dev)) {
5018 writel(misc1_flags, base + NvRegMisc1);
5019 writel(filter_flags, base + NvRegPacketFilterFlags);
5020 nv_enable_irq(dev);
5023 return ret;
5026 static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 *buffer)
5028 struct fe_priv *np = netdev_priv(dev);
5029 u8 __iomem *base = get_hwbase(dev);
5030 int result;
5031 memset(buffer, 0, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(u64));
5033 if (!nv_link_test(dev)) {
5034 test->flags |= ETH_TEST_FL_FAILED;
5035 buffer[0] = 1;
5038 if (test->flags & ETH_TEST_FL_OFFLINE) {
5039 if (netif_running(dev)) {
5040 netif_stop_queue(dev);
5041 #ifdef CONFIG_FORCEDETH_NAPI
5042 napi_disable(&np->napi);
5043 #endif
5044 netif_tx_lock_bh(dev);
5045 netif_addr_lock(dev);
5046 spin_lock_irq(&np->lock);
5047 nv_disable_hw_interrupts(dev, np->irqmask);
5048 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
5049 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
5050 } else {
5051 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
5053 /* stop engines */
5054 nv_stop_rxtx(dev);
5055 nv_txrx_reset(dev);
5056 /* drain rx queue */
5057 nv_drain_rxtx(dev);
5058 spin_unlock_irq(&np->lock);
5059 netif_addr_unlock(dev);
5060 netif_tx_unlock_bh(dev);
5063 if (!nv_register_test(dev)) {
5064 test->flags |= ETH_TEST_FL_FAILED;
5065 buffer[1] = 1;
5068 result = nv_interrupt_test(dev);
5069 if (result != 1) {
5070 test->flags |= ETH_TEST_FL_FAILED;
5071 buffer[2] = 1;
5073 if (result == 0) {
5074 /* bail out */
5075 return;
5078 if (!nv_loopback_test(dev)) {
5079 test->flags |= ETH_TEST_FL_FAILED;
5080 buffer[3] = 1;
5083 if (netif_running(dev)) {
5084 /* reinit driver view of the rx queue */
5085 set_bufsize(dev);
5086 if (nv_init_ring(dev)) {
5087 if (!np->in_shutdown)
5088 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
5090 /* reinit nic view of the rx queue */
5091 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
5092 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
5093 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
5094 base + NvRegRingSizes);
5095 pci_push(base);
5096 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
5097 pci_push(base);
5098 /* restart rx engine */
5099 nv_start_rxtx(dev);
5100 netif_start_queue(dev);
5101 #ifdef CONFIG_FORCEDETH_NAPI
5102 napi_enable(&np->napi);
5103 #endif
5104 nv_enable_hw_interrupts(dev, np->irqmask);
5109 static void nv_get_strings(struct net_device *dev, u32 stringset, u8 *buffer)
5111 switch (stringset) {
5112 case ETH_SS_STATS:
5113 memcpy(buffer, &nv_estats_str, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(struct nv_ethtool_str));
5114 break;
5115 case ETH_SS_TEST:
5116 memcpy(buffer, &nv_etests_str, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(struct nv_ethtool_str));
5117 break;
5121 static const struct ethtool_ops ops = {
5122 .get_drvinfo = nv_get_drvinfo,
5123 .get_link = ethtool_op_get_link,
5124 .get_wol = nv_get_wol,
5125 .set_wol = nv_set_wol,
5126 .get_settings = nv_get_settings,
5127 .set_settings = nv_set_settings,
5128 .get_regs_len = nv_get_regs_len,
5129 .get_regs = nv_get_regs,
5130 .nway_reset = nv_nway_reset,
5131 .set_tso = nv_set_tso,
5132 .get_ringparam = nv_get_ringparam,
5133 .set_ringparam = nv_set_ringparam,
5134 .get_pauseparam = nv_get_pauseparam,
5135 .set_pauseparam = nv_set_pauseparam,
5136 .get_rx_csum = nv_get_rx_csum,
5137 .set_rx_csum = nv_set_rx_csum,
5138 .set_tx_csum = nv_set_tx_csum,
5139 .set_sg = nv_set_sg,
5140 .get_strings = nv_get_strings,
5141 .get_ethtool_stats = nv_get_ethtool_stats,
5142 .get_sset_count = nv_get_sset_count,
5143 .self_test = nv_self_test,
5146 static void nv_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
5148 struct fe_priv *np = get_nvpriv(dev);
5150 spin_lock_irq(&np->lock);
5152 /* save vlan group */
5153 np->vlangrp = grp;
5155 if (grp) {
5156 /* enable vlan on MAC */
5157 np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP | NVREG_TXRXCTL_VLANINS;
5158 } else {
5159 /* disable vlan on MAC */
5160 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP;
5161 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS;
5164 writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
5166 spin_unlock_irq(&np->lock);
5169 /* The mgmt unit and driver use a semaphore to access the phy during init */
5170 static int nv_mgmt_acquire_sema(struct net_device *dev)
5172 u8 __iomem *base = get_hwbase(dev);
5173 int i;
5174 u32 tx_ctrl, mgmt_sema;
5176 for (i = 0; i < 10; i++) {
5177 mgmt_sema = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_SEMA_MASK;
5178 if (mgmt_sema == NVREG_XMITCTL_MGMT_SEMA_FREE)
5179 break;
5180 msleep(500);
5183 if (mgmt_sema != NVREG_XMITCTL_MGMT_SEMA_FREE)
5184 return 0;
5186 for (i = 0; i < 2; i++) {
5187 tx_ctrl = readl(base + NvRegTransmitterControl);
5188 tx_ctrl |= NVREG_XMITCTL_HOST_SEMA_ACQ;
5189 writel(tx_ctrl, base + NvRegTransmitterControl);
5191 /* verify that semaphore was acquired */
5192 tx_ctrl = readl(base + NvRegTransmitterControl);
5193 if (((tx_ctrl & NVREG_XMITCTL_HOST_SEMA_MASK) == NVREG_XMITCTL_HOST_SEMA_ACQ) &&
5194 ((tx_ctrl & NVREG_XMITCTL_MGMT_SEMA_MASK) == NVREG_XMITCTL_MGMT_SEMA_FREE))
5195 return 1;
5196 else
5197 udelay(50);
5200 return 0;
5203 static int nv_open(struct net_device *dev)
5205 struct fe_priv *np = netdev_priv(dev);
5206 u8 __iomem *base = get_hwbase(dev);
5207 int ret = 1;
5208 int oom, i;
5209 u32 low;
5211 dprintk(KERN_DEBUG "nv_open: begin\n");
5213 /* erase previous misconfiguration */
5214 if (np->driver_data & DEV_HAS_POWER_CNTRL)
5215 nv_mac_reset(dev);
5216 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
5217 writel(0, base + NvRegMulticastAddrB);
5218 writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA);
5219 writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB);
5220 writel(0, base + NvRegPacketFilterFlags);
5222 writel(0, base + NvRegTransmitterControl);
5223 writel(0, base + NvRegReceiverControl);
5225 writel(0, base + NvRegAdapterControl);
5227 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)
5228 writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame);
5230 /* initialize descriptor rings */
5231 set_bufsize(dev);
5232 oom = nv_init_ring(dev);
5234 writel(0, base + NvRegLinkSpeed);
5235 writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
5236 nv_txrx_reset(dev);
5237 writel(0, base + NvRegUnknownSetupReg6);
5239 np->in_shutdown = 0;
5241 /* give hw rings */
5242 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
5243 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
5244 base + NvRegRingSizes);
5246 writel(np->linkspeed, base + NvRegLinkSpeed);
5247 if (np->desc_ver == DESC_VER_1)
5248 writel(NVREG_TX_WM_DESC1_DEFAULT, base + NvRegTxWatermark);
5249 else
5250 writel(NVREG_TX_WM_DESC2_3_DEFAULT, base + NvRegTxWatermark);
5251 writel(np->txrxctl_bits, base + NvRegTxRxControl);
5252 writel(np->vlanctl_bits, base + NvRegVlanControl);
5253 pci_push(base);
5254 writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl);
5255 reg_delay(dev, NvRegUnknownSetupReg5, NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31,
5256 NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX,
5257 KERN_INFO "open: SetupReg5, Bit 31 remained off\n");
5259 writel(0, base + NvRegMIIMask);
5260 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
5261 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
5263 writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1);
5264 writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus);
5265 writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags);
5266 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
5268 writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus);
5270 get_random_bytes(&low, sizeof(low));
5271 low &= NVREG_SLOTTIME_MASK;
5272 if (np->desc_ver == DESC_VER_1) {
5273 writel(low|NVREG_SLOTTIME_DEFAULT, base + NvRegSlotTime);
5274 } else {
5275 if (!(np->driver_data & DEV_HAS_GEAR_MODE)) {
5276 /* setup legacy backoff */
5277 writel(NVREG_SLOTTIME_LEGBF_ENABLED|NVREG_SLOTTIME_10_100_FULL|low, base + NvRegSlotTime);
5278 } else {
5279 writel(NVREG_SLOTTIME_10_100_FULL, base + NvRegSlotTime);
5280 nv_gear_backoff_reseed(dev);
5283 writel(NVREG_TX_DEFERRAL_DEFAULT, base + NvRegTxDeferral);
5284 writel(NVREG_RX_DEFERRAL_DEFAULT, base + NvRegRxDeferral);
5285 if (poll_interval == -1) {
5286 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT)
5287 writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval);
5288 else
5289 writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
5291 else
5292 writel(poll_interval & 0xFFFF, base + NvRegPollingInterval);
5293 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
5294 writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING,
5295 base + NvRegAdapterControl);
5296 writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed);
5297 writel(NVREG_MII_LINKCHANGE, base + NvRegMIIMask);
5298 if (np->wolenabled)
5299 writel(NVREG_WAKEUPFLAGS_ENABLE , base + NvRegWakeUpFlags);
5301 i = readl(base + NvRegPowerState);
5302 if ( (i & NVREG_POWERSTATE_POWEREDUP) == 0)
5303 writel(NVREG_POWERSTATE_POWEREDUP|i, base + NvRegPowerState);
5305 pci_push(base);
5306 udelay(10);
5307 writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState);
5309 nv_disable_hw_interrupts(dev, np->irqmask);
5310 pci_push(base);
5311 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
5312 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
5313 pci_push(base);
5315 if (nv_request_irq(dev, 0)) {
5316 goto out_drain;
5319 /* ask for interrupts */
5320 nv_enable_hw_interrupts(dev, np->irqmask);
5322 spin_lock_irq(&np->lock);
5323 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
5324 writel(0, base + NvRegMulticastAddrB);
5325 writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA);
5326 writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB);
5327 writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
5328 /* One manual link speed update: Interrupts are enabled, future link
5329 * speed changes cause interrupts and are handled by nv_link_irq().
5332 u32 miistat;
5333 miistat = readl(base + NvRegMIIStatus);
5334 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
5335 dprintk(KERN_INFO "startup: got 0x%08x.\n", miistat);
5337 /* set linkspeed to invalid value, thus force nv_update_linkspeed
5338 * to init hw */
5339 np->linkspeed = 0;
5340 ret = nv_update_linkspeed(dev);
5341 nv_start_rxtx(dev);
5342 netif_start_queue(dev);
5343 #ifdef CONFIG_FORCEDETH_NAPI
5344 napi_enable(&np->napi);
5345 #endif
5347 if (ret) {
5348 netif_carrier_on(dev);
5349 } else {
5350 printk(KERN_INFO "%s: no link during initialization.\n", dev->name);
5351 netif_carrier_off(dev);
5353 if (oom)
5354 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
5356 /* start statistics timer */
5357 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3))
5358 mod_timer(&np->stats_poll,
5359 round_jiffies(jiffies + STATS_INTERVAL));
5361 spin_unlock_irq(&np->lock);
5363 return 0;
5364 out_drain:
5365 nv_drain_rxtx(dev);
5366 return ret;
5369 static int nv_close(struct net_device *dev)
5371 struct fe_priv *np = netdev_priv(dev);
5372 u8 __iomem *base;
5374 spin_lock_irq(&np->lock);
5375 np->in_shutdown = 1;
5376 spin_unlock_irq(&np->lock);
5377 #ifdef CONFIG_FORCEDETH_NAPI
5378 napi_disable(&np->napi);
5379 #endif
5380 synchronize_irq(np->pci_dev->irq);
5382 del_timer_sync(&np->oom_kick);
5383 del_timer_sync(&np->nic_poll);
5384 del_timer_sync(&np->stats_poll);
5386 netif_stop_queue(dev);
5387 spin_lock_irq(&np->lock);
5388 nv_stop_rxtx(dev);
5389 nv_txrx_reset(dev);
5391 /* disable interrupts on the nic or we will lock up */
5392 base = get_hwbase(dev);
5393 nv_disable_hw_interrupts(dev, np->irqmask);
5394 pci_push(base);
5395 dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name);
5397 spin_unlock_irq(&np->lock);
5399 nv_free_irq(dev);
5401 nv_drain_rxtx(dev);
5403 if (np->wolenabled) {
5404 writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
5405 nv_start_rx(dev);
5408 /* FIXME: power down nic */
5410 return 0;
5413 static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
5415 struct net_device *dev;
5416 struct fe_priv *np;
5417 unsigned long addr;
5418 u8 __iomem *base;
5419 int err, i;
5420 u32 powerstate, txreg;
5421 u32 phystate_orig = 0, phystate;
5422 int phyinitialized = 0;
5423 DECLARE_MAC_BUF(mac);
5424 static int printed_version;
5426 if (!printed_version++)
5427 printk(KERN_INFO "%s: Reverse Engineered nForce ethernet"
5428 " driver. Version %s.\n", DRV_NAME, FORCEDETH_VERSION);
5430 dev = alloc_etherdev(sizeof(struct fe_priv));
5431 err = -ENOMEM;
5432 if (!dev)
5433 goto out;
5435 np = netdev_priv(dev);
5436 np->dev = dev;
5437 np->pci_dev = pci_dev;
5438 spin_lock_init(&np->lock);
5439 SET_NETDEV_DEV(dev, &pci_dev->dev);
5441 init_timer(&np->oom_kick);
5442 np->oom_kick.data = (unsigned long) dev;
5443 np->oom_kick.function = &nv_do_rx_refill; /* timer handler */
5444 init_timer(&np->nic_poll);
5445 np->nic_poll.data = (unsigned long) dev;
5446 np->nic_poll.function = &nv_do_nic_poll; /* timer handler */
5447 init_timer(&np->stats_poll);
5448 np->stats_poll.data = (unsigned long) dev;
5449 np->stats_poll.function = &nv_do_stats_poll; /* timer handler */
5451 err = pci_enable_device(pci_dev);
5452 if (err)
5453 goto out_free;
5455 pci_set_master(pci_dev);
5457 err = pci_request_regions(pci_dev, DRV_NAME);
5458 if (err < 0)
5459 goto out_disable;
5461 if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3))
5462 np->register_size = NV_PCI_REGSZ_VER3;
5463 else if (id->driver_data & DEV_HAS_STATISTICS_V1)
5464 np->register_size = NV_PCI_REGSZ_VER2;
5465 else
5466 np->register_size = NV_PCI_REGSZ_VER1;
5468 err = -EINVAL;
5469 addr = 0;
5470 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
5471 dprintk(KERN_DEBUG "%s: resource %d start %p len %ld flags 0x%08lx.\n",
5472 pci_name(pci_dev), i, (void*)pci_resource_start(pci_dev, i),
5473 pci_resource_len(pci_dev, i),
5474 pci_resource_flags(pci_dev, i));
5475 if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM &&
5476 pci_resource_len(pci_dev, i) >= np->register_size) {
5477 addr = pci_resource_start(pci_dev, i);
5478 break;
5481 if (i == DEVICE_COUNT_RESOURCE) {
5482 dev_printk(KERN_INFO, &pci_dev->dev,
5483 "Couldn't find register window\n");
5484 goto out_relreg;
5487 /* copy of driver data */
5488 np->driver_data = id->driver_data;
5489 /* copy of device id */
5490 np->device_id = id->device;
5492 /* handle different descriptor versions */
5493 if (id->driver_data & DEV_HAS_HIGH_DMA) {
5494 /* packet format 3: supports 40-bit addressing */
5495 np->desc_ver = DESC_VER_3;
5496 np->txrxctl_bits = NVREG_TXRXCTL_DESC_3;
5497 if (dma_64bit) {
5498 if (pci_set_dma_mask(pci_dev, DMA_39BIT_MASK))
5499 dev_printk(KERN_INFO, &pci_dev->dev,
5500 "64-bit DMA failed, using 32-bit addressing\n");
5501 else
5502 dev->features |= NETIF_F_HIGHDMA;
5503 if (pci_set_consistent_dma_mask(pci_dev, DMA_39BIT_MASK)) {
5504 dev_printk(KERN_INFO, &pci_dev->dev,
5505 "64-bit DMA (consistent) failed, using 32-bit ring buffers\n");
5508 } else if (id->driver_data & DEV_HAS_LARGEDESC) {
5509 /* packet format 2: supports jumbo frames */
5510 np->desc_ver = DESC_VER_2;
5511 np->txrxctl_bits = NVREG_TXRXCTL_DESC_2;
5512 } else {
5513 /* original packet format */
5514 np->desc_ver = DESC_VER_1;
5515 np->txrxctl_bits = NVREG_TXRXCTL_DESC_1;
5518 np->pkt_limit = NV_PKTLIMIT_1;
5519 if (id->driver_data & DEV_HAS_LARGEDESC)
5520 np->pkt_limit = NV_PKTLIMIT_2;
5522 if (id->driver_data & DEV_HAS_CHECKSUM) {
5523 np->rx_csum = 1;
5524 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
5525 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
5526 dev->features |= NETIF_F_TSO;
5529 np->vlanctl_bits = 0;
5530 if (id->driver_data & DEV_HAS_VLAN) {
5531 np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE;
5532 dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX;
5533 dev->vlan_rx_register = nv_vlan_rx_register;
5536 np->msi_flags = 0;
5537 if ((id->driver_data & DEV_HAS_MSI) && msi) {
5538 np->msi_flags |= NV_MSI_CAPABLE;
5540 if ((id->driver_data & DEV_HAS_MSI_X) && msix) {
5541 np->msi_flags |= NV_MSI_X_CAPABLE;
5544 np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG;
5545 if ((id->driver_data & DEV_HAS_PAUSEFRAME_TX_V1) ||
5546 (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) ||
5547 (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V3)) {
5548 np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE | NV_PAUSEFRAME_TX_REQ;
5552 err = -ENOMEM;
5553 np->base = ioremap(addr, np->register_size);
5554 if (!np->base)
5555 goto out_relreg;
5556 dev->base_addr = (unsigned long)np->base;
5558 dev->irq = pci_dev->irq;
5560 np->rx_ring_size = RX_RING_DEFAULT;
5561 np->tx_ring_size = TX_RING_DEFAULT;
5563 if (!nv_optimized(np)) {
5564 np->rx_ring.orig = pci_alloc_consistent(pci_dev,
5565 sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
5566 &np->ring_addr);
5567 if (!np->rx_ring.orig)
5568 goto out_unmap;
5569 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
5570 } else {
5571 np->rx_ring.ex = pci_alloc_consistent(pci_dev,
5572 sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
5573 &np->ring_addr);
5574 if (!np->rx_ring.ex)
5575 goto out_unmap;
5576 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
5578 np->rx_skb = kcalloc(np->rx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL);
5579 np->tx_skb = kcalloc(np->tx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL);
5580 if (!np->rx_skb || !np->tx_skb)
5581 goto out_freering;
5583 dev->open = nv_open;
5584 dev->stop = nv_close;
5586 if (!nv_optimized(np))
5587 dev->hard_start_xmit = nv_start_xmit;
5588 else
5589 dev->hard_start_xmit = nv_start_xmit_optimized;
5590 dev->get_stats = nv_get_stats;
5591 dev->change_mtu = nv_change_mtu;
5592 dev->set_mac_address = nv_set_mac_address;
5593 dev->set_multicast_list = nv_set_multicast;
5594 #ifdef CONFIG_NET_POLL_CONTROLLER
5595 dev->poll_controller = nv_poll_controller;
5596 #endif
5597 #ifdef CONFIG_FORCEDETH_NAPI
5598 netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP);
5599 #endif
5600 SET_ETHTOOL_OPS(dev, &ops);
5601 dev->tx_timeout = nv_tx_timeout;
5602 dev->watchdog_timeo = NV_WATCHDOG_TIMEO;
5604 pci_set_drvdata(pci_dev, dev);
5606 /* read the mac address */
5607 base = get_hwbase(dev);
5608 np->orig_mac[0] = readl(base + NvRegMacAddrA);
5609 np->orig_mac[1] = readl(base + NvRegMacAddrB);
5611 /* check the workaround bit for correct mac address order */
5612 txreg = readl(base + NvRegTransmitPoll);
5613 if (id->driver_data & DEV_HAS_CORRECT_MACADDR) {
5614 /* mac address is already in correct order */
5615 dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff;
5616 dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff;
5617 dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff;
5618 dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff;
5619 dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff;
5620 dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff;
5621 } else if (txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV) {
5622 /* mac address is already in correct order */
5623 dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff;
5624 dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff;
5625 dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff;
5626 dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff;
5627 dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff;
5628 dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff;
5630 * Set orig mac address back to the reversed version.
5631 * This flag will be cleared during low power transition.
5632 * Therefore, we should always put back the reversed address.
5634 np->orig_mac[0] = (dev->dev_addr[5] << 0) + (dev->dev_addr[4] << 8) +
5635 (dev->dev_addr[3] << 16) + (dev->dev_addr[2] << 24);
5636 np->orig_mac[1] = (dev->dev_addr[1] << 0) + (dev->dev_addr[0] << 8);
5637 } else {
5638 /* need to reverse mac address to correct order */
5639 dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff;
5640 dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff;
5641 dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff;
5642 dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff;
5643 dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff;
5644 dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff;
5645 writel(txreg|NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
5646 printk(KERN_DEBUG "nv_probe: set workaround bit for reversed mac addr\n");
5648 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
5650 if (!is_valid_ether_addr(dev->perm_addr)) {
5652 * Bad mac address. At least one bios sets the mac address
5653 * to 01:23:45:67:89:ab
5655 dev_printk(KERN_ERR, &pci_dev->dev,
5656 "Invalid Mac address detected: %s\n",
5657 print_mac(mac, dev->dev_addr));
5658 dev_printk(KERN_ERR, &pci_dev->dev,
5659 "Please complain to your hardware vendor. Switching to a random MAC.\n");
5660 dev->dev_addr[0] = 0x00;
5661 dev->dev_addr[1] = 0x00;
5662 dev->dev_addr[2] = 0x6c;
5663 get_random_bytes(&dev->dev_addr[3], 3);
5666 dprintk(KERN_DEBUG "%s: MAC Address %s\n",
5667 pci_name(pci_dev), print_mac(mac, dev->dev_addr));
5669 /* set mac address */
5670 nv_copy_mac_to_hw(dev);
5672 /* Workaround current PCI init glitch: wakeup bits aren't
5673 * being set from PCI PM capability.
5675 device_init_wakeup(&pci_dev->dev, 1);
5677 /* disable WOL */
5678 writel(0, base + NvRegWakeUpFlags);
5679 np->wolenabled = 0;
5681 if (id->driver_data & DEV_HAS_POWER_CNTRL) {
5683 /* take phy and nic out of low power mode */
5684 powerstate = readl(base + NvRegPowerState2);
5685 powerstate &= ~NVREG_POWERSTATE2_POWERUP_MASK;
5686 if ((id->device == PCI_DEVICE_ID_NVIDIA_NVENET_12 ||
5687 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_13) &&
5688 pci_dev->revision >= 0xA3)
5689 powerstate |= NVREG_POWERSTATE2_POWERUP_REV_A3;
5690 writel(powerstate, base + NvRegPowerState2);
5693 if (np->desc_ver == DESC_VER_1) {
5694 np->tx_flags = NV_TX_VALID;
5695 } else {
5696 np->tx_flags = NV_TX2_VALID;
5698 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) {
5699 np->irqmask = NVREG_IRQMASK_THROUGHPUT;
5700 if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */
5701 np->msi_flags |= 0x0003;
5702 } else {
5703 np->irqmask = NVREG_IRQMASK_CPU;
5704 if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */
5705 np->msi_flags |= 0x0001;
5708 if (id->driver_data & DEV_NEED_TIMERIRQ)
5709 np->irqmask |= NVREG_IRQ_TIMER;
5710 if (id->driver_data & DEV_NEED_LINKTIMER) {
5711 dprintk(KERN_INFO "%s: link timer on.\n", pci_name(pci_dev));
5712 np->need_linktimer = 1;
5713 np->link_timeout = jiffies + LINK_TIMEOUT;
5714 } else {
5715 dprintk(KERN_INFO "%s: link timer off.\n", pci_name(pci_dev));
5716 np->need_linktimer = 0;
5719 /* Limit the number of tx's outstanding for hw bug */
5720 if (id->driver_data & DEV_NEED_TX_LIMIT) {
5721 np->tx_limit = 1;
5722 if ((id->device == PCI_DEVICE_ID_NVIDIA_NVENET_32 ||
5723 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_33 ||
5724 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_34 ||
5725 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_35 ||
5726 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_36 ||
5727 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_37 ||
5728 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_38 ||
5729 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_39) &&
5730 pci_dev->revision >= 0xA2)
5731 np->tx_limit = 0;
5734 /* clear phy state and temporarily halt phy interrupts */
5735 writel(0, base + NvRegMIIMask);
5736 phystate = readl(base + NvRegAdapterControl);
5737 if (phystate & NVREG_ADAPTCTL_RUNNING) {
5738 phystate_orig = 1;
5739 phystate &= ~NVREG_ADAPTCTL_RUNNING;
5740 writel(phystate, base + NvRegAdapterControl);
5742 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
5744 if (id->driver_data & DEV_HAS_MGMT_UNIT) {
5745 /* management unit running on the mac? */
5746 if (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_PHY_INIT) {
5747 np->mac_in_use = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_ST;
5748 dprintk(KERN_INFO "%s: mgmt unit is running. mac in use %x.\n", pci_name(pci_dev), np->mac_in_use);
5749 if (nv_mgmt_acquire_sema(dev)) {
5750 /* management unit setup the phy already? */
5751 if ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) ==
5752 NVREG_XMITCTL_SYNC_PHY_INIT) {
5753 /* phy is inited by mgmt unit */
5754 phyinitialized = 1;
5755 dprintk(KERN_INFO "%s: Phy already initialized by mgmt unit.\n", pci_name(pci_dev));
5756 } else {
5757 /* we need to init the phy */
5763 /* find a suitable phy */
5764 for (i = 1; i <= 32; i++) {
5765 int id1, id2;
5766 int phyaddr = i & 0x1F;
5768 spin_lock_irq(&np->lock);
5769 id1 = mii_rw(dev, phyaddr, MII_PHYSID1, MII_READ);
5770 spin_unlock_irq(&np->lock);
5771 if (id1 < 0 || id1 == 0xffff)
5772 continue;
5773 spin_lock_irq(&np->lock);
5774 id2 = mii_rw(dev, phyaddr, MII_PHYSID2, MII_READ);
5775 spin_unlock_irq(&np->lock);
5776 if (id2 < 0 || id2 == 0xffff)
5777 continue;
5779 np->phy_model = id2 & PHYID2_MODEL_MASK;
5780 id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT;
5781 id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT;
5782 dprintk(KERN_DEBUG "%s: open: Found PHY %04x:%04x at address %d.\n",
5783 pci_name(pci_dev), id1, id2, phyaddr);
5784 np->phyaddr = phyaddr;
5785 np->phy_oui = id1 | id2;
5787 /* Realtek hardcoded phy id1 to all zero's on certain phys */
5788 if (np->phy_oui == PHY_OUI_REALTEK2)
5789 np->phy_oui = PHY_OUI_REALTEK;
5790 /* Setup phy revision for Realtek */
5791 if (np->phy_oui == PHY_OUI_REALTEK && np->phy_model == PHY_MODEL_REALTEK_8211)
5792 np->phy_rev = mii_rw(dev, phyaddr, MII_RESV1, MII_READ) & PHY_REV_MASK;
5794 break;
5796 if (i == 33) {
5797 dev_printk(KERN_INFO, &pci_dev->dev,
5798 "open: Could not find a valid PHY.\n");
5799 goto out_error;
5802 if (!phyinitialized) {
5803 /* reset it */
5804 phy_init(dev);
5805 } else {
5806 /* see if it is a gigabit phy */
5807 u32 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
5808 if (mii_status & PHY_GIGABIT) {
5809 np->gigabit = PHY_GIGABIT;
5813 /* set default link speed settings */
5814 np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
5815 np->duplex = 0;
5816 np->autoneg = 1;
5818 err = register_netdev(dev);
5819 if (err) {
5820 dev_printk(KERN_INFO, &pci_dev->dev,
5821 "unable to register netdev: %d\n", err);
5822 goto out_error;
5825 dev_printk(KERN_INFO, &pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, "
5826 "addr %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
5827 dev->name,
5828 np->phy_oui,
5829 np->phyaddr,
5830 dev->dev_addr[0],
5831 dev->dev_addr[1],
5832 dev->dev_addr[2],
5833 dev->dev_addr[3],
5834 dev->dev_addr[4],
5835 dev->dev_addr[5]);
5837 dev_printk(KERN_INFO, &pci_dev->dev, "%s%s%s%s%s%s%s%s%s%sdesc-v%u\n",
5838 dev->features & NETIF_F_HIGHDMA ? "highdma " : "",
5839 dev->features & (NETIF_F_IP_CSUM | NETIF_F_SG) ?
5840 "csum " : "",
5841 dev->features & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX) ?
5842 "vlan " : "",
5843 id->driver_data & DEV_HAS_POWER_CNTRL ? "pwrctl " : "",
5844 id->driver_data & DEV_HAS_MGMT_UNIT ? "mgmt " : "",
5845 id->driver_data & DEV_NEED_TIMERIRQ ? "timirq " : "",
5846 np->gigabit == PHY_GIGABIT ? "gbit " : "",
5847 np->need_linktimer ? "lnktim " : "",
5848 np->msi_flags & NV_MSI_CAPABLE ? "msi " : "",
5849 np->msi_flags & NV_MSI_X_CAPABLE ? "msi-x " : "",
5850 np->desc_ver);
5852 return 0;
5854 out_error:
5855 if (phystate_orig)
5856 writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl);
5857 pci_set_drvdata(pci_dev, NULL);
5858 out_freering:
5859 free_rings(dev);
5860 out_unmap:
5861 iounmap(get_hwbase(dev));
5862 out_relreg:
5863 pci_release_regions(pci_dev);
5864 out_disable:
5865 pci_disable_device(pci_dev);
5866 out_free:
5867 free_netdev(dev);
5868 out:
5869 return err;
5872 static void nv_restore_phy(struct net_device *dev)
5874 struct fe_priv *np = netdev_priv(dev);
5875 u16 phy_reserved, mii_control;
5877 if (np->phy_oui == PHY_OUI_REALTEK &&
5878 np->phy_model == PHY_MODEL_REALTEK_8201 &&
5879 phy_cross == NV_CROSSOVER_DETECTION_DISABLED) {
5880 mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3);
5881 phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, MII_READ);
5882 phy_reserved &= ~PHY_REALTEK_INIT_MSK1;
5883 phy_reserved |= PHY_REALTEK_INIT8;
5884 mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, phy_reserved);
5885 mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1);
5887 /* restart auto negotiation */
5888 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
5889 mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
5890 mii_rw(dev, np->phyaddr, MII_BMCR, mii_control);
5894 static void nv_restore_mac_addr(struct pci_dev *pci_dev)
5896 struct net_device *dev = pci_get_drvdata(pci_dev);
5897 struct fe_priv *np = netdev_priv(dev);
5898 u8 __iomem *base = get_hwbase(dev);
5900 /* special op: write back the misordered MAC address - otherwise
5901 * the next nv_probe would see a wrong address.
5903 writel(np->orig_mac[0], base + NvRegMacAddrA);
5904 writel(np->orig_mac[1], base + NvRegMacAddrB);
5905 writel(readl(base + NvRegTransmitPoll) & ~NVREG_TRANSMITPOLL_MAC_ADDR_REV,
5906 base + NvRegTransmitPoll);
5909 static void __devexit nv_remove(struct pci_dev *pci_dev)
5911 struct net_device *dev = pci_get_drvdata(pci_dev);
5913 unregister_netdev(dev);
5915 nv_restore_mac_addr(pci_dev);
5917 /* restore any phy related changes */
5918 nv_restore_phy(dev);
5920 /* free all structures */
5921 free_rings(dev);
5922 iounmap(get_hwbase(dev));
5923 pci_release_regions(pci_dev);
5924 pci_disable_device(pci_dev);
5925 free_netdev(dev);
5926 pci_set_drvdata(pci_dev, NULL);
5929 #ifdef CONFIG_PM
5930 static int nv_suspend(struct pci_dev *pdev, pm_message_t state)
5932 struct net_device *dev = pci_get_drvdata(pdev);
5933 struct fe_priv *np = netdev_priv(dev);
5934 u8 __iomem *base = get_hwbase(dev);
5935 int i;
5937 if (netif_running(dev)) {
5938 // Gross.
5939 nv_close(dev);
5941 netif_device_detach(dev);
5943 /* save non-pci configuration space */
5944 for (i = 0;i <= np->register_size/sizeof(u32); i++)
5945 np->saved_config_space[i] = readl(base + i*sizeof(u32));
5947 pci_save_state(pdev);
5948 pci_enable_wake(pdev, pci_choose_state(pdev, state), np->wolenabled);
5949 pci_disable_device(pdev);
5950 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5951 return 0;
5954 static int nv_resume(struct pci_dev *pdev)
5956 struct net_device *dev = pci_get_drvdata(pdev);
5957 struct fe_priv *np = netdev_priv(dev);
5958 u8 __iomem *base = get_hwbase(dev);
5959 int i, rc = 0;
5961 pci_set_power_state(pdev, PCI_D0);
5962 pci_restore_state(pdev);
5963 /* ack any pending wake events, disable PME */
5964 pci_enable_wake(pdev, PCI_D0, 0);
5966 /* restore non-pci configuration space */
5967 for (i = 0;i <= np->register_size/sizeof(u32); i++)
5968 writel(np->saved_config_space[i], base+i*sizeof(u32));
5970 netif_device_attach(dev);
5971 if (netif_running(dev)) {
5972 rc = nv_open(dev);
5973 nv_set_multicast(dev);
5975 return rc;
5978 static void nv_shutdown(struct pci_dev *pdev)
5980 struct net_device *dev = pci_get_drvdata(pdev);
5981 struct fe_priv *np = netdev_priv(dev);
5983 if (netif_running(dev))
5984 nv_close(dev);
5986 nv_restore_mac_addr(pdev);
5988 pci_disable_device(pdev);
5989 if (system_state == SYSTEM_POWER_OFF) {
5990 if (pci_enable_wake(pdev, PCI_D3cold, np->wolenabled))
5991 pci_enable_wake(pdev, PCI_D3hot, np->wolenabled);
5992 pci_set_power_state(pdev, PCI_D3hot);
5995 #else
5996 #define nv_suspend NULL
5997 #define nv_shutdown NULL
5998 #define nv_resume NULL
5999 #endif /* CONFIG_PM */
6001 static struct pci_device_id pci_tbl[] = {
6002 { /* nForce Ethernet Controller */
6003 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_1),
6004 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
6006 { /* nForce2 Ethernet Controller */
6007 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_2),
6008 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
6010 { /* nForce3 Ethernet Controller */
6011 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_3),
6012 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
6014 { /* nForce3 Ethernet Controller */
6015 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_4),
6016 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
6018 { /* nForce3 Ethernet Controller */
6019 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_5),
6020 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
6022 { /* nForce3 Ethernet Controller */
6023 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_6),
6024 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
6026 { /* nForce3 Ethernet Controller */
6027 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_7),
6028 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
6030 { /* CK804 Ethernet Controller */
6031 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_8),
6032 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
6034 { /* CK804 Ethernet Controller */
6035 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_9),
6036 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
6038 { /* MCP04 Ethernet Controller */
6039 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_10),
6040 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
6042 { /* MCP04 Ethernet Controller */
6043 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_11),
6044 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
6046 { /* MCP51 Ethernet Controller */
6047 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_12),
6048 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1,
6050 { /* MCP51 Ethernet Controller */
6051 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_13),
6052 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1,
6054 { /* MCP55 Ethernet Controller */
6055 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14),
6056 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT,
6058 { /* MCP55 Ethernet Controller */
6059 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15),
6060 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT,
6062 { /* MCP61 Ethernet Controller */
6063 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_16),
6064 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
6066 { /* MCP61 Ethernet Controller */
6067 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_17),
6068 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
6070 { /* MCP61 Ethernet Controller */
6071 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_18),
6072 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
6074 { /* MCP61 Ethernet Controller */
6075 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_19),
6076 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
6078 { /* MCP65 Ethernet Controller */
6079 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_20),
6080 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
6082 { /* MCP65 Ethernet Controller */
6083 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_21),
6084 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
6086 { /* MCP65 Ethernet Controller */
6087 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_22),
6088 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
6090 { /* MCP65 Ethernet Controller */
6091 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_23),
6092 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
6094 { /* MCP67 Ethernet Controller */
6095 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_24),
6096 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE,
6098 { /* MCP67 Ethernet Controller */
6099 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_25),
6100 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE,
6102 { /* MCP67 Ethernet Controller */
6103 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_26),
6104 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE,
6106 { /* MCP67 Ethernet Controller */
6107 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_27),
6108 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE,
6110 { /* MCP73 Ethernet Controller */
6111 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_28),
6112 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE,
6114 { /* MCP73 Ethernet Controller */
6115 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_29),
6116 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE,
6118 { /* MCP73 Ethernet Controller */
6119 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_30),
6120 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE,
6122 { /* MCP73 Ethernet Controller */
6123 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_31),
6124 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE,
6126 { /* MCP77 Ethernet Controller */
6127 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_32),
6128 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
6130 { /* MCP77 Ethernet Controller */
6131 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_33),
6132 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
6134 { /* MCP77 Ethernet Controller */
6135 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_34),
6136 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
6138 { /* MCP77 Ethernet Controller */
6139 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_35),
6140 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
6142 { /* MCP79 Ethernet Controller */
6143 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_36),
6144 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
6146 { /* MCP79 Ethernet Controller */
6147 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_37),
6148 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
6150 { /* MCP79 Ethernet Controller */
6151 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_38),
6152 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
6154 { /* MCP79 Ethernet Controller */
6155 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_39),
6156 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
6158 {0,},
6161 static struct pci_driver driver = {
6162 .name = DRV_NAME,
6163 .id_table = pci_tbl,
6164 .probe = nv_probe,
6165 .remove = __devexit_p(nv_remove),
6166 .suspend = nv_suspend,
6167 .resume = nv_resume,
6168 .shutdown = nv_shutdown,
6171 static int __init init_nic(void)
6173 return pci_register_driver(&driver);
6176 static void __exit exit_nic(void)
6178 pci_unregister_driver(&driver);
6181 module_param(max_interrupt_work, int, 0);
6182 MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt");
6183 module_param(optimization_mode, int, 0);
6184 MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer.");
6185 module_param(poll_interval, int, 0);
6186 MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535.");
6187 module_param(msi, int, 0);
6188 MODULE_PARM_DESC(msi, "MSI interrupts are enabled by setting to 1 and disabled by setting to 0.");
6189 module_param(msix, int, 0);
6190 MODULE_PARM_DESC(msix, "MSIX interrupts are enabled by setting to 1 and disabled by setting to 0.");
6191 module_param(dma_64bit, int, 0);
6192 MODULE_PARM_DESC(dma_64bit, "High DMA is enabled by setting to 1 and disabled by setting to 0.");
6193 module_param(phy_cross, int, 0);
6194 MODULE_PARM_DESC(phy_cross, "Phy crossover detection for Realtek 8201 phy is enabled by setting to 1 and disabled by setting to 0.");
6196 MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>");
6197 MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");
6198 MODULE_LICENSE("GPL");
6200 MODULE_DEVICE_TABLE(pci, pci_tbl);
6202 module_init(init_nic);
6203 module_exit(exit_nic);