2 * forcedeth: Ethernet driver for NVIDIA nForce media access controllers.
4 * Note: This driver is a cleanroom reimplementation based on reverse
5 * engineered documentation written by Carl-Daniel Hailfinger
6 * and Andrew de Quincey.
8 * NVIDIA, nForce and other NVIDIA marks are trademarks or registered
9 * trademarks of NVIDIA Corporation in the United States and other
12 * Copyright (C) 2003,4,5 Manfred Spraul
13 * Copyright (C) 2004 Andrew de Quincey (wol support)
14 * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane
15 * IRQ rate fixes, bigendian fixes, cleanups, verification)
16 * Copyright (c) 2004,2005,2006,2007,2008 NVIDIA Corporation
18 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License as published by
20 * the Free Software Foundation; either version 2 of the License, or
21 * (at your option) any later version.
23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * GNU General Public License for more details.
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, write to the Free Software
30 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
33 * We suspect that on some hardware no TX done interrupts are generated.
34 * This means recovery from netif_stop_queue only happens if the hw timer
35 * interrupt fires (100 times/second, configurable with NVREG_POLL_DEFAULT)
36 * and the timer is active in the IRQMask, or if a rx packet arrives by chance.
37 * If your hardware reliably generates tx done interrupts, then you can remove
38 * DEV_NEED_TIMERIRQ from the driver_data flags.
39 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
40 * superfluous timer interrupts from the nic.
42 #define FORCEDETH_VERSION "0.61"
43 #define DRV_NAME "forcedeth"
45 #include <linux/module.h>
46 #include <linux/types.h>
47 #include <linux/pci.h>
48 #include <linux/interrupt.h>
49 #include <linux/netdevice.h>
50 #include <linux/etherdevice.h>
51 #include <linux/delay.h>
52 #include <linux/spinlock.h>
53 #include <linux/ethtool.h>
54 #include <linux/timer.h>
55 #include <linux/skbuff.h>
56 #include <linux/mii.h>
57 #include <linux/random.h>
58 #include <linux/init.h>
59 #include <linux/if_vlan.h>
60 #include <linux/dma-mapping.h>
64 #include <asm/uaccess.h>
65 #include <asm/system.h>
68 #define dprintk printk
70 #define dprintk(x...) do { } while (0)
73 #define TX_WORK_PER_LOOP 64
74 #define RX_WORK_PER_LOOP 64
80 #define DEV_NEED_TIMERIRQ 0x00001 /* set the timer irq flag in the irq mask */
81 #define DEV_NEED_LINKTIMER 0x00002 /* poll link settings. Relies on the timer irq */
82 #define DEV_HAS_LARGEDESC 0x00004 /* device supports jumbo frames and needs packet format 2 */
83 #define DEV_HAS_HIGH_DMA 0x00008 /* device supports 64bit dma */
84 #define DEV_HAS_CHECKSUM 0x00010 /* device supports tx and rx checksum offloads */
85 #define DEV_HAS_VLAN 0x00020 /* device supports vlan tagging and striping */
86 #define DEV_HAS_MSI 0x00040 /* device supports MSI */
87 #define DEV_HAS_MSI_X 0x00080 /* device supports MSI-X */
88 #define DEV_HAS_POWER_CNTRL 0x00100 /* device supports power savings */
89 #define DEV_HAS_STATISTICS_V1 0x00200 /* device supports hw statistics version 1 */
90 #define DEV_HAS_STATISTICS_V2 0x00400 /* device supports hw statistics version 2 */
91 #define DEV_HAS_TEST_EXTENDED 0x00800 /* device supports extended diagnostic test */
92 #define DEV_HAS_MGMT_UNIT 0x01000 /* device supports management unit */
93 #define DEV_HAS_CORRECT_MACADDR 0x02000 /* device supports correct mac address order */
94 #define DEV_HAS_COLLISION_FIX 0x04000 /* device supports tx collision fix */
95 #define DEV_HAS_PAUSEFRAME_TX_V1 0x08000 /* device supports tx pause frames version 1 */
96 #define DEV_HAS_PAUSEFRAME_TX_V2 0x10000 /* device supports tx pause frames version 2 */
97 #define DEV_HAS_PAUSEFRAME_TX_V3 0x20000 /* device supports tx pause frames version 3 */
98 #define DEV_NEED_TX_LIMIT 0x40000 /* device needs to limit tx */
99 #define DEV_HAS_GEAR_MODE 0x80000 /* device supports gear mode */
102 NvRegIrqStatus
= 0x000,
103 #define NVREG_IRQSTAT_MIIEVENT 0x040
104 #define NVREG_IRQSTAT_MASK 0x81ff
105 NvRegIrqMask
= 0x004,
106 #define NVREG_IRQ_RX_ERROR 0x0001
107 #define NVREG_IRQ_RX 0x0002
108 #define NVREG_IRQ_RX_NOBUF 0x0004
109 #define NVREG_IRQ_TX_ERR 0x0008
110 #define NVREG_IRQ_TX_OK 0x0010
111 #define NVREG_IRQ_TIMER 0x0020
112 #define NVREG_IRQ_LINK 0x0040
113 #define NVREG_IRQ_RX_FORCED 0x0080
114 #define NVREG_IRQ_TX_FORCED 0x0100
115 #define NVREG_IRQ_RECOVER_ERROR 0x8000
116 #define NVREG_IRQMASK_THROUGHPUT 0x00df
117 #define NVREG_IRQMASK_CPU 0x0060
118 #define NVREG_IRQ_TX_ALL (NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED)
119 #define NVREG_IRQ_RX_ALL (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED)
120 #define NVREG_IRQ_OTHER (NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RECOVER_ERROR)
122 #define NVREG_IRQ_UNKNOWN (~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR| \
123 NVREG_IRQ_TX_OK|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RX_FORCED| \
124 NVREG_IRQ_TX_FORCED|NVREG_IRQ_RECOVER_ERROR))
126 NvRegUnknownSetupReg6
= 0x008,
127 #define NVREG_UNKSETUP6_VAL 3
130 * NVREG_POLL_DEFAULT is the interval length of the timer source on the nic
131 * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms
133 NvRegPollingInterval
= 0x00c,
134 #define NVREG_POLL_DEFAULT_THROUGHPUT 970 /* backup tx cleanup if loop max reached */
135 #define NVREG_POLL_DEFAULT_CPU 13
136 NvRegMSIMap0
= 0x020,
137 NvRegMSIMap1
= 0x024,
138 NvRegMSIIrqMask
= 0x030,
139 #define NVREG_MSI_VECTOR_0_ENABLED 0x01
141 #define NVREG_MISC1_PAUSE_TX 0x01
142 #define NVREG_MISC1_HD 0x02
143 #define NVREG_MISC1_FORCE 0x3b0f3c
145 NvRegMacReset
= 0x34,
146 #define NVREG_MAC_RESET_ASSERT 0x0F3
147 NvRegTransmitterControl
= 0x084,
148 #define NVREG_XMITCTL_START 0x01
149 #define NVREG_XMITCTL_MGMT_ST 0x40000000
150 #define NVREG_XMITCTL_SYNC_MASK 0x000f0000
151 #define NVREG_XMITCTL_SYNC_NOT_READY 0x0
152 #define NVREG_XMITCTL_SYNC_PHY_INIT 0x00040000
153 #define NVREG_XMITCTL_MGMT_SEMA_MASK 0x00000f00
154 #define NVREG_XMITCTL_MGMT_SEMA_FREE 0x0
155 #define NVREG_XMITCTL_HOST_SEMA_MASK 0x0000f000
156 #define NVREG_XMITCTL_HOST_SEMA_ACQ 0x0000f000
157 #define NVREG_XMITCTL_HOST_LOADED 0x00004000
158 #define NVREG_XMITCTL_TX_PATH_EN 0x01000000
159 NvRegTransmitterStatus
= 0x088,
160 #define NVREG_XMITSTAT_BUSY 0x01
162 NvRegPacketFilterFlags
= 0x8c,
163 #define NVREG_PFF_PAUSE_RX 0x08
164 #define NVREG_PFF_ALWAYS 0x7F0000
165 #define NVREG_PFF_PROMISC 0x80
166 #define NVREG_PFF_MYADDR 0x20
167 #define NVREG_PFF_LOOPBACK 0x10
169 NvRegOffloadConfig
= 0x90,
170 #define NVREG_OFFLOAD_HOMEPHY 0x601
171 #define NVREG_OFFLOAD_NORMAL RX_NIC_BUFSIZE
172 NvRegReceiverControl
= 0x094,
173 #define NVREG_RCVCTL_START 0x01
174 #define NVREG_RCVCTL_RX_PATH_EN 0x01000000
175 NvRegReceiverStatus
= 0x98,
176 #define NVREG_RCVSTAT_BUSY 0x01
178 NvRegSlotTime
= 0x9c,
179 #define NVREG_SLOTTIME_LEGBF_ENABLED 0x80000000
180 #define NVREG_SLOTTIME_10_100_FULL 0x00007f00
181 #define NVREG_SLOTTIME_1000_FULL 0x0003ff00
182 #define NVREG_SLOTTIME_HALF 0x0000ff00
183 #define NVREG_SLOTTIME_DEFAULT 0x00007f00
184 #define NVREG_SLOTTIME_MASK 0x000000ff
186 NvRegTxDeferral
= 0xA0,
187 #define NVREG_TX_DEFERRAL_DEFAULT 0x15050f
188 #define NVREG_TX_DEFERRAL_RGMII_10_100 0x16070f
189 #define NVREG_TX_DEFERRAL_RGMII_1000 0x14050f
190 #define NVREG_TX_DEFERRAL_RGMII_STRETCH_10 0x16190f
191 #define NVREG_TX_DEFERRAL_RGMII_STRETCH_100 0x16300f
192 #define NVREG_TX_DEFERRAL_MII_STRETCH 0x152000
193 NvRegRxDeferral
= 0xA4,
194 #define NVREG_RX_DEFERRAL_DEFAULT 0x16
195 NvRegMacAddrA
= 0xA8,
196 NvRegMacAddrB
= 0xAC,
197 NvRegMulticastAddrA
= 0xB0,
198 #define NVREG_MCASTADDRA_FORCE 0x01
199 NvRegMulticastAddrB
= 0xB4,
200 NvRegMulticastMaskA
= 0xB8,
201 #define NVREG_MCASTMASKA_NONE 0xffffffff
202 NvRegMulticastMaskB
= 0xBC,
203 #define NVREG_MCASTMASKB_NONE 0xffff
205 NvRegPhyInterface
= 0xC0,
206 #define PHY_RGMII 0x10000000
207 NvRegBackOffControl
= 0xC4,
208 #define NVREG_BKOFFCTRL_DEFAULT 0x70000000
209 #define NVREG_BKOFFCTRL_SEED_MASK 0x000003ff
210 #define NVREG_BKOFFCTRL_SELECT 24
211 #define NVREG_BKOFFCTRL_GEAR 12
213 NvRegTxRingPhysAddr
= 0x100,
214 NvRegRxRingPhysAddr
= 0x104,
215 NvRegRingSizes
= 0x108,
216 #define NVREG_RINGSZ_TXSHIFT 0
217 #define NVREG_RINGSZ_RXSHIFT 16
218 NvRegTransmitPoll
= 0x10c,
219 #define NVREG_TRANSMITPOLL_MAC_ADDR_REV 0x00008000
220 NvRegLinkSpeed
= 0x110,
221 #define NVREG_LINKSPEED_FORCE 0x10000
222 #define NVREG_LINKSPEED_10 1000
223 #define NVREG_LINKSPEED_100 100
224 #define NVREG_LINKSPEED_1000 50
225 #define NVREG_LINKSPEED_MASK (0xFFF)
226 NvRegUnknownSetupReg5
= 0x130,
227 #define NVREG_UNKSETUP5_BIT31 (1<<31)
228 NvRegTxWatermark
= 0x13c,
229 #define NVREG_TX_WM_DESC1_DEFAULT 0x0200010
230 #define NVREG_TX_WM_DESC2_3_DEFAULT 0x1e08000
231 #define NVREG_TX_WM_DESC2_3_1000 0xfe08000
232 NvRegTxRxControl
= 0x144,
233 #define NVREG_TXRXCTL_KICK 0x0001
234 #define NVREG_TXRXCTL_BIT1 0x0002
235 #define NVREG_TXRXCTL_BIT2 0x0004
236 #define NVREG_TXRXCTL_IDLE 0x0008
237 #define NVREG_TXRXCTL_RESET 0x0010
238 #define NVREG_TXRXCTL_RXCHECK 0x0400
239 #define NVREG_TXRXCTL_DESC_1 0
240 #define NVREG_TXRXCTL_DESC_2 0x002100
241 #define NVREG_TXRXCTL_DESC_3 0xc02200
242 #define NVREG_TXRXCTL_VLANSTRIP 0x00040
243 #define NVREG_TXRXCTL_VLANINS 0x00080
244 NvRegTxRingPhysAddrHigh
= 0x148,
245 NvRegRxRingPhysAddrHigh
= 0x14C,
246 NvRegTxPauseFrame
= 0x170,
247 #define NVREG_TX_PAUSEFRAME_DISABLE 0x0fff0080
248 #define NVREG_TX_PAUSEFRAME_ENABLE_V1 0x01800010
249 #define NVREG_TX_PAUSEFRAME_ENABLE_V2 0x056003f0
250 #define NVREG_TX_PAUSEFRAME_ENABLE_V3 0x09f00880
251 NvRegMIIStatus
= 0x180,
252 #define NVREG_MIISTAT_ERROR 0x0001
253 #define NVREG_MIISTAT_LINKCHANGE 0x0008
254 #define NVREG_MIISTAT_MASK_RW 0x0007
255 #define NVREG_MIISTAT_MASK_ALL 0x000f
256 NvRegMIIMask
= 0x184,
257 #define NVREG_MII_LINKCHANGE 0x0008
259 NvRegAdapterControl
= 0x188,
260 #define NVREG_ADAPTCTL_START 0x02
261 #define NVREG_ADAPTCTL_LINKUP 0x04
262 #define NVREG_ADAPTCTL_PHYVALID 0x40000
263 #define NVREG_ADAPTCTL_RUNNING 0x100000
264 #define NVREG_ADAPTCTL_PHYSHIFT 24
265 NvRegMIISpeed
= 0x18c,
266 #define NVREG_MIISPEED_BIT8 (1<<8)
267 #define NVREG_MIIDELAY 5
268 NvRegMIIControl
= 0x190,
269 #define NVREG_MIICTL_INUSE 0x08000
270 #define NVREG_MIICTL_WRITE 0x00400
271 #define NVREG_MIICTL_ADDRSHIFT 5
272 NvRegMIIData
= 0x194,
273 NvRegWakeUpFlags
= 0x200,
274 #define NVREG_WAKEUPFLAGS_VAL 0x7770
275 #define NVREG_WAKEUPFLAGS_BUSYSHIFT 24
276 #define NVREG_WAKEUPFLAGS_ENABLESHIFT 16
277 #define NVREG_WAKEUPFLAGS_D3SHIFT 12
278 #define NVREG_WAKEUPFLAGS_D2SHIFT 8
279 #define NVREG_WAKEUPFLAGS_D1SHIFT 4
280 #define NVREG_WAKEUPFLAGS_D0SHIFT 0
281 #define NVREG_WAKEUPFLAGS_ACCEPT_MAGPAT 0x01
282 #define NVREG_WAKEUPFLAGS_ACCEPT_WAKEUPPAT 0x02
283 #define NVREG_WAKEUPFLAGS_ACCEPT_LINKCHANGE 0x04
284 #define NVREG_WAKEUPFLAGS_ENABLE 0x1111
286 NvRegPatternCRC
= 0x204,
287 NvRegPatternMask
= 0x208,
288 NvRegPowerCap
= 0x268,
289 #define NVREG_POWERCAP_D3SUPP (1<<30)
290 #define NVREG_POWERCAP_D2SUPP (1<<26)
291 #define NVREG_POWERCAP_D1SUPP (1<<25)
292 NvRegPowerState
= 0x26c,
293 #define NVREG_POWERSTATE_POWEREDUP 0x8000
294 #define NVREG_POWERSTATE_VALID 0x0100
295 #define NVREG_POWERSTATE_MASK 0x0003
296 #define NVREG_POWERSTATE_D0 0x0000
297 #define NVREG_POWERSTATE_D1 0x0001
298 #define NVREG_POWERSTATE_D2 0x0002
299 #define NVREG_POWERSTATE_D3 0x0003
301 NvRegTxZeroReXmt
= 0x284,
302 NvRegTxOneReXmt
= 0x288,
303 NvRegTxManyReXmt
= 0x28c,
304 NvRegTxLateCol
= 0x290,
305 NvRegTxUnderflow
= 0x294,
306 NvRegTxLossCarrier
= 0x298,
307 NvRegTxExcessDef
= 0x29c,
308 NvRegTxRetryErr
= 0x2a0,
309 NvRegRxFrameErr
= 0x2a4,
310 NvRegRxExtraByte
= 0x2a8,
311 NvRegRxLateCol
= 0x2ac,
313 NvRegRxFrameTooLong
= 0x2b4,
314 NvRegRxOverflow
= 0x2b8,
315 NvRegRxFCSErr
= 0x2bc,
316 NvRegRxFrameAlignErr
= 0x2c0,
317 NvRegRxLenErr
= 0x2c4,
318 NvRegRxUnicast
= 0x2c8,
319 NvRegRxMulticast
= 0x2cc,
320 NvRegRxBroadcast
= 0x2d0,
322 NvRegTxFrame
= 0x2d8,
324 NvRegTxPause
= 0x2e0,
325 NvRegRxPause
= 0x2e4,
326 NvRegRxDropFrame
= 0x2e8,
327 NvRegVlanControl
= 0x300,
328 #define NVREG_VLANCONTROL_ENABLE 0x2000
329 NvRegMSIXMap0
= 0x3e0,
330 NvRegMSIXMap1
= 0x3e4,
331 NvRegMSIXIrqStatus
= 0x3f0,
333 NvRegPowerState2
= 0x600,
334 #define NVREG_POWERSTATE2_POWERUP_MASK 0x0F11
335 #define NVREG_POWERSTATE2_POWERUP_REV_A3 0x0001
336 #define NVREG_POWERSTATE2_PHY_RESET 0x0004
339 /* Big endian: should work, but is untested */
345 struct ring_desc_ex
{
353 struct ring_desc
* orig
;
354 struct ring_desc_ex
* ex
;
357 #define FLAG_MASK_V1 0xffff0000
358 #define FLAG_MASK_V2 0xffffc000
359 #define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1)
360 #define LEN_MASK_V2 (0xffffffff ^ FLAG_MASK_V2)
362 #define NV_TX_LASTPACKET (1<<16)
363 #define NV_TX_RETRYERROR (1<<19)
364 #define NV_TX_RETRYCOUNT_MASK (0xF<<20)
365 #define NV_TX_FORCED_INTERRUPT (1<<24)
366 #define NV_TX_DEFERRED (1<<26)
367 #define NV_TX_CARRIERLOST (1<<27)
368 #define NV_TX_LATECOLLISION (1<<28)
369 #define NV_TX_UNDERFLOW (1<<29)
370 #define NV_TX_ERROR (1<<30)
371 #define NV_TX_VALID (1<<31)
373 #define NV_TX2_LASTPACKET (1<<29)
374 #define NV_TX2_RETRYERROR (1<<18)
375 #define NV_TX2_RETRYCOUNT_MASK (0xF<<19)
376 #define NV_TX2_FORCED_INTERRUPT (1<<30)
377 #define NV_TX2_DEFERRED (1<<25)
378 #define NV_TX2_CARRIERLOST (1<<26)
379 #define NV_TX2_LATECOLLISION (1<<27)
380 #define NV_TX2_UNDERFLOW (1<<28)
381 /* error and valid are the same for both */
382 #define NV_TX2_ERROR (1<<30)
383 #define NV_TX2_VALID (1<<31)
384 #define NV_TX2_TSO (1<<28)
385 #define NV_TX2_TSO_SHIFT 14
386 #define NV_TX2_TSO_MAX_SHIFT 14
387 #define NV_TX2_TSO_MAX_SIZE (1<<NV_TX2_TSO_MAX_SHIFT)
388 #define NV_TX2_CHECKSUM_L3 (1<<27)
389 #define NV_TX2_CHECKSUM_L4 (1<<26)
391 #define NV_TX3_VLAN_TAG_PRESENT (1<<18)
393 #define NV_RX_DESCRIPTORVALID (1<<16)
394 #define NV_RX_MISSEDFRAME (1<<17)
395 #define NV_RX_SUBSTRACT1 (1<<18)
396 #define NV_RX_ERROR1 (1<<23)
397 #define NV_RX_ERROR2 (1<<24)
398 #define NV_RX_ERROR3 (1<<25)
399 #define NV_RX_ERROR4 (1<<26)
400 #define NV_RX_CRCERR (1<<27)
401 #define NV_RX_OVERFLOW (1<<28)
402 #define NV_RX_FRAMINGERR (1<<29)
403 #define NV_RX_ERROR (1<<30)
404 #define NV_RX_AVAIL (1<<31)
406 #define NV_RX2_CHECKSUMMASK (0x1C000000)
407 #define NV_RX2_CHECKSUM_IP (0x10000000)
408 #define NV_RX2_CHECKSUM_IP_TCP (0x14000000)
409 #define NV_RX2_CHECKSUM_IP_UDP (0x18000000)
410 #define NV_RX2_DESCRIPTORVALID (1<<29)
411 #define NV_RX2_SUBSTRACT1 (1<<25)
412 #define NV_RX2_ERROR1 (1<<18)
413 #define NV_RX2_ERROR2 (1<<19)
414 #define NV_RX2_ERROR3 (1<<20)
415 #define NV_RX2_ERROR4 (1<<21)
416 #define NV_RX2_CRCERR (1<<22)
417 #define NV_RX2_OVERFLOW (1<<23)
418 #define NV_RX2_FRAMINGERR (1<<24)
419 /* error and avail are the same for both */
420 #define NV_RX2_ERROR (1<<30)
421 #define NV_RX2_AVAIL (1<<31)
423 #define NV_RX3_VLAN_TAG_PRESENT (1<<16)
424 #define NV_RX3_VLAN_TAG_MASK (0x0000FFFF)
426 /* Miscelaneous hardware related defines: */
427 #define NV_PCI_REGSZ_VER1 0x270
428 #define NV_PCI_REGSZ_VER2 0x2d4
429 #define NV_PCI_REGSZ_VER3 0x604
430 #define NV_PCI_REGSZ_MAX 0x604
432 /* various timeout delays: all in usec */
433 #define NV_TXRX_RESET_DELAY 4
434 #define NV_TXSTOP_DELAY1 10
435 #define NV_TXSTOP_DELAY1MAX 500000
436 #define NV_TXSTOP_DELAY2 100
437 #define NV_RXSTOP_DELAY1 10
438 #define NV_RXSTOP_DELAY1MAX 500000
439 #define NV_RXSTOP_DELAY2 100
440 #define NV_SETUP5_DELAY 5
441 #define NV_SETUP5_DELAYMAX 50000
442 #define NV_POWERUP_DELAY 5
443 #define NV_POWERUP_DELAYMAX 5000
444 #define NV_MIIBUSY_DELAY 50
445 #define NV_MIIPHY_DELAY 10
446 #define NV_MIIPHY_DELAYMAX 10000
447 #define NV_MAC_RESET_DELAY 64
449 #define NV_WAKEUPPATTERNS 5
450 #define NV_WAKEUPMASKENTRIES 4
452 /* General driver defaults */
453 #define NV_WATCHDOG_TIMEO (5*HZ)
455 #define RX_RING_DEFAULT 128
456 #define TX_RING_DEFAULT 256
457 #define RX_RING_MIN 128
458 #define TX_RING_MIN 64
459 #define RING_MAX_DESC_VER_1 1024
460 #define RING_MAX_DESC_VER_2_3 16384
462 /* rx/tx mac addr + type + vlan + align + slack*/
463 #define NV_RX_HEADERS (64)
464 /* even more slack. */
465 #define NV_RX_ALLOC_PAD (64)
467 /* maximum mtu size */
468 #define NV_PKTLIMIT_1 ETH_DATA_LEN /* hard limit not known */
469 #define NV_PKTLIMIT_2 9100 /* Actual limit according to NVidia: 9202 */
471 #define OOM_REFILL (1+HZ/20)
472 #define POLL_WAIT (1+HZ/100)
473 #define LINK_TIMEOUT (3*HZ)
474 #define STATS_INTERVAL (10*HZ)
478 * The nic supports three different descriptor types:
479 * - DESC_VER_1: Original
480 * - DESC_VER_2: support for jumbo frames.
481 * - DESC_VER_3: 64-bit format.
488 #define PHY_OUI_MARVELL 0x5043
489 #define PHY_OUI_CICADA 0x03f1
490 #define PHY_OUI_VITESSE 0x01c1
491 #define PHY_OUI_REALTEK 0x0732
492 #define PHY_OUI_REALTEK2 0x0020
493 #define PHYID1_OUI_MASK 0x03ff
494 #define PHYID1_OUI_SHFT 6
495 #define PHYID2_OUI_MASK 0xfc00
496 #define PHYID2_OUI_SHFT 10
497 #define PHYID2_MODEL_MASK 0x03f0
498 #define PHY_MODEL_REALTEK_8211 0x0110
499 #define PHY_REV_MASK 0x0001
500 #define PHY_REV_REALTEK_8211B 0x0000
501 #define PHY_REV_REALTEK_8211C 0x0001
502 #define PHY_MODEL_REALTEK_8201 0x0200
503 #define PHY_MODEL_MARVELL_E3016 0x0220
504 #define PHY_MARVELL_E3016_INITMASK 0x0300
505 #define PHY_CICADA_INIT1 0x0f000
506 #define PHY_CICADA_INIT2 0x0e00
507 #define PHY_CICADA_INIT3 0x01000
508 #define PHY_CICADA_INIT4 0x0200
509 #define PHY_CICADA_INIT5 0x0004
510 #define PHY_CICADA_INIT6 0x02000
511 #define PHY_VITESSE_INIT_REG1 0x1f
512 #define PHY_VITESSE_INIT_REG2 0x10
513 #define PHY_VITESSE_INIT_REG3 0x11
514 #define PHY_VITESSE_INIT_REG4 0x12
515 #define PHY_VITESSE_INIT_MSK1 0xc
516 #define PHY_VITESSE_INIT_MSK2 0x0180
517 #define PHY_VITESSE_INIT1 0x52b5
518 #define PHY_VITESSE_INIT2 0xaf8a
519 #define PHY_VITESSE_INIT3 0x8
520 #define PHY_VITESSE_INIT4 0x8f8a
521 #define PHY_VITESSE_INIT5 0xaf86
522 #define PHY_VITESSE_INIT6 0x8f86
523 #define PHY_VITESSE_INIT7 0xaf82
524 #define PHY_VITESSE_INIT8 0x0100
525 #define PHY_VITESSE_INIT9 0x8f82
526 #define PHY_VITESSE_INIT10 0x0
527 #define PHY_REALTEK_INIT_REG1 0x1f
528 #define PHY_REALTEK_INIT_REG2 0x19
529 #define PHY_REALTEK_INIT_REG3 0x13
530 #define PHY_REALTEK_INIT_REG4 0x14
531 #define PHY_REALTEK_INIT_REG5 0x18
532 #define PHY_REALTEK_INIT_REG6 0x11
533 #define PHY_REALTEK_INIT_REG7 0x01
534 #define PHY_REALTEK_INIT1 0x0000
535 #define PHY_REALTEK_INIT2 0x8e00
536 #define PHY_REALTEK_INIT3 0x0001
537 #define PHY_REALTEK_INIT4 0xad17
538 #define PHY_REALTEK_INIT5 0xfb54
539 #define PHY_REALTEK_INIT6 0xf5c7
540 #define PHY_REALTEK_INIT7 0x1000
541 #define PHY_REALTEK_INIT8 0x0003
542 #define PHY_REALTEK_INIT9 0x0008
543 #define PHY_REALTEK_INIT10 0x0005
544 #define PHY_REALTEK_INIT11 0x0200
545 #define PHY_REALTEK_INIT_MSK1 0x0003
547 #define PHY_GIGABIT 0x0100
549 #define PHY_TIMEOUT 0x1
550 #define PHY_ERROR 0x2
554 #define PHY_HALF 0x100
556 #define NV_PAUSEFRAME_RX_CAPABLE 0x0001
557 #define NV_PAUSEFRAME_TX_CAPABLE 0x0002
558 #define NV_PAUSEFRAME_RX_ENABLE 0x0004
559 #define NV_PAUSEFRAME_TX_ENABLE 0x0008
560 #define NV_PAUSEFRAME_RX_REQ 0x0010
561 #define NV_PAUSEFRAME_TX_REQ 0x0020
562 #define NV_PAUSEFRAME_AUTONEG 0x0040
564 /* MSI/MSI-X defines */
565 #define NV_MSI_X_MAX_VECTORS 8
566 #define NV_MSI_X_VECTORS_MASK 0x000f
567 #define NV_MSI_CAPABLE 0x0010
568 #define NV_MSI_X_CAPABLE 0x0020
569 #define NV_MSI_ENABLED 0x0040
570 #define NV_MSI_X_ENABLED 0x0080
572 #define NV_MSI_X_VECTOR_ALL 0x0
573 #define NV_MSI_X_VECTOR_RX 0x0
574 #define NV_MSI_X_VECTOR_TX 0x1
575 #define NV_MSI_X_VECTOR_OTHER 0x2
577 #define NV_RESTART_TX 0x1
578 #define NV_RESTART_RX 0x2
580 #define NV_TX_LIMIT_COUNT 16
583 struct nv_ethtool_str
{
584 char name
[ETH_GSTRING_LEN
];
587 static const struct nv_ethtool_str nv_estats_str
[] = {
592 { "tx_late_collision" },
593 { "tx_fifo_errors" },
594 { "tx_carrier_errors" },
595 { "tx_excess_deferral" },
596 { "tx_retry_error" },
597 { "rx_frame_error" },
599 { "rx_late_collision" },
601 { "rx_frame_too_long" },
602 { "rx_over_errors" },
604 { "rx_frame_align_error" },
605 { "rx_length_error" },
610 { "rx_errors_total" },
611 { "tx_errors_total" },
613 /* version 2 stats */
622 struct nv_ethtool_stats
{
627 u64 tx_late_collision
;
629 u64 tx_carrier_errors
;
630 u64 tx_excess_deferral
;
634 u64 rx_late_collision
;
636 u64 rx_frame_too_long
;
639 u64 rx_frame_align_error
;
648 /* version 2 stats */
657 #define NV_DEV_STATISTICS_V2_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64))
658 #define NV_DEV_STATISTICS_V1_COUNT (NV_DEV_STATISTICS_V2_COUNT - 6)
661 #define NV_TEST_COUNT_BASE 3
662 #define NV_TEST_COUNT_EXTENDED 4
664 static const struct nv_ethtool_str nv_etests_str
[] = {
665 { "link (online/offline)" },
666 { "register (offline) " },
667 { "interrupt (offline) " },
668 { "loopback (offline) " }
671 struct register_test
{
676 static const struct register_test nv_registers_test
[] = {
677 { NvRegUnknownSetupReg6
, 0x01 },
678 { NvRegMisc1
, 0x03c },
679 { NvRegOffloadConfig
, 0x03ff },
680 { NvRegMulticastAddrA
, 0xffffffff },
681 { NvRegTxWatermark
, 0x0ff },
682 { NvRegWakeUpFlags
, 0x07777 },
689 unsigned int dma_len
;
690 struct ring_desc_ex
*first_tx_desc
;
691 struct nv_skb_map
*next_tx_ctx
;
696 * All hardware access under dev->priv->lock, except the performance
698 * - rx is (pseudo-) lockless: it relies on the single-threading provided
699 * by the arch code for interrupts.
700 * - tx setup is lockless: it relies on netif_tx_lock. Actual submission
701 * needs dev->priv->lock :-(
702 * - set_multicast_list: preparation lockless, relies on netif_tx_lock.
705 /* in dev: base, irq */
709 struct net_device
*dev
;
710 struct napi_struct napi
;
713 * Locking: spin_lock(&np->lock); */
714 struct nv_ethtool_stats estats
;
722 unsigned int phy_oui
;
723 unsigned int phy_model
;
724 unsigned int phy_rev
;
729 /* General data: RO fields */
730 dma_addr_t ring_addr
;
731 struct pci_dev
*pci_dev
;
745 /* rx specific fields.
746 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
748 union ring_type get_rx
, put_rx
, first_rx
, last_rx
;
749 struct nv_skb_map
*get_rx_ctx
, *put_rx_ctx
;
750 struct nv_skb_map
*first_rx_ctx
, *last_rx_ctx
;
751 struct nv_skb_map
*rx_skb
;
753 union ring_type rx_ring
;
754 unsigned int rx_buf_sz
;
755 unsigned int pkt_limit
;
756 struct timer_list oom_kick
;
757 struct timer_list nic_poll
;
758 struct timer_list stats_poll
;
762 /* media detection workaround.
763 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
766 unsigned long link_timeout
;
768 * tx specific fields.
770 union ring_type get_tx
, put_tx
, first_tx
, last_tx
;
771 struct nv_skb_map
*get_tx_ctx
, *put_tx_ctx
;
772 struct nv_skb_map
*first_tx_ctx
, *last_tx_ctx
;
773 struct nv_skb_map
*tx_skb
;
775 union ring_type tx_ring
;
779 u32 tx_pkts_in_progress
;
780 struct nv_skb_map
*tx_change_owner
;
781 struct nv_skb_map
*tx_end_flip
;
785 struct vlan_group
*vlangrp
;
787 /* msi/msi-x fields */
789 struct msix_entry msi_x_entry
[NV_MSI_X_MAX_VECTORS
];
794 /* power saved state */
795 u32 saved_config_space
[NV_PCI_REGSZ_MAX
/4];
799 * Maximum number of loops until we assume that a bit in the irq mask
800 * is stuck. Overridable with module param.
802 static int max_interrupt_work
= 5;
805 * Optimization can be either throuput mode or cpu mode
807 * Throughput Mode: Every tx and rx packet will generate an interrupt.
808 * CPU Mode: Interrupts are controlled by a timer.
811 NV_OPTIMIZATION_MODE_THROUGHPUT
,
812 NV_OPTIMIZATION_MODE_CPU
814 static int optimization_mode
= NV_OPTIMIZATION_MODE_THROUGHPUT
;
817 * Poll interval for timer irq
819 * This interval determines how frequent an interrupt is generated.
820 * The is value is determined by [(time_in_micro_secs * 100) / (2^10)]
821 * Min = 0, and Max = 65535
823 static int poll_interval
= -1;
832 static int msi
= NV_MSI_INT_ENABLED
;
838 NV_MSIX_INT_DISABLED
,
841 static int msix
= NV_MSIX_INT_DISABLED
;
847 NV_DMA_64BIT_DISABLED
,
850 static int dma_64bit
= NV_DMA_64BIT_ENABLED
;
853 * Crossover Detection
854 * Realtek 8201 phy + some OEM boards do not work properly.
857 NV_CROSSOVER_DETECTION_DISABLED
,
858 NV_CROSSOVER_DETECTION_ENABLED
860 static int phy_cross
= NV_CROSSOVER_DETECTION_DISABLED
;
862 static inline struct fe_priv
*get_nvpriv(struct net_device
*dev
)
864 return netdev_priv(dev
);
867 static inline u8 __iomem
*get_hwbase(struct net_device
*dev
)
869 return ((struct fe_priv
*)netdev_priv(dev
))->base
;
872 static inline void pci_push(u8 __iomem
*base
)
874 /* force out pending posted writes */
878 static inline u32
nv_descr_getlength(struct ring_desc
*prd
, u32 v
)
880 return le32_to_cpu(prd
->flaglen
)
881 & ((v
== DESC_VER_1
) ? LEN_MASK_V1
: LEN_MASK_V2
);
884 static inline u32
nv_descr_getlength_ex(struct ring_desc_ex
*prd
, u32 v
)
886 return le32_to_cpu(prd
->flaglen
) & LEN_MASK_V2
;
889 static bool nv_optimized(struct fe_priv
*np
)
891 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
)
896 static int reg_delay(struct net_device
*dev
, int offset
, u32 mask
, u32 target
,
897 int delay
, int delaymax
, const char *msg
)
899 u8 __iomem
*base
= get_hwbase(dev
);
910 } while ((readl(base
+ offset
) & mask
) != target
);
914 #define NV_SETUP_RX_RING 0x01
915 #define NV_SETUP_TX_RING 0x02
917 static inline u32
dma_low(dma_addr_t addr
)
922 static inline u32
dma_high(dma_addr_t addr
)
924 return addr
>>31>>1; /* 0 if 32bit, shift down by 32 if 64bit */
927 static void setup_hw_rings(struct net_device
*dev
, int rxtx_flags
)
929 struct fe_priv
*np
= get_nvpriv(dev
);
930 u8 __iomem
*base
= get_hwbase(dev
);
932 if (!nv_optimized(np
)) {
933 if (rxtx_flags
& NV_SETUP_RX_RING
) {
934 writel(dma_low(np
->ring_addr
), base
+ NvRegRxRingPhysAddr
);
936 if (rxtx_flags
& NV_SETUP_TX_RING
) {
937 writel(dma_low(np
->ring_addr
+ np
->rx_ring_size
*sizeof(struct ring_desc
)), base
+ NvRegTxRingPhysAddr
);
940 if (rxtx_flags
& NV_SETUP_RX_RING
) {
941 writel(dma_low(np
->ring_addr
), base
+ NvRegRxRingPhysAddr
);
942 writel(dma_high(np
->ring_addr
), base
+ NvRegRxRingPhysAddrHigh
);
944 if (rxtx_flags
& NV_SETUP_TX_RING
) {
945 writel(dma_low(np
->ring_addr
+ np
->rx_ring_size
*sizeof(struct ring_desc_ex
)), base
+ NvRegTxRingPhysAddr
);
946 writel(dma_high(np
->ring_addr
+ np
->rx_ring_size
*sizeof(struct ring_desc_ex
)), base
+ NvRegTxRingPhysAddrHigh
);
951 static void free_rings(struct net_device
*dev
)
953 struct fe_priv
*np
= get_nvpriv(dev
);
955 if (!nv_optimized(np
)) {
956 if (np
->rx_ring
.orig
)
957 pci_free_consistent(np
->pci_dev
, sizeof(struct ring_desc
) * (np
->rx_ring_size
+ np
->tx_ring_size
),
958 np
->rx_ring
.orig
, np
->ring_addr
);
961 pci_free_consistent(np
->pci_dev
, sizeof(struct ring_desc_ex
) * (np
->rx_ring_size
+ np
->tx_ring_size
),
962 np
->rx_ring
.ex
, np
->ring_addr
);
970 static int using_multi_irqs(struct net_device
*dev
)
972 struct fe_priv
*np
= get_nvpriv(dev
);
974 if (!(np
->msi_flags
& NV_MSI_X_ENABLED
) ||
975 ((np
->msi_flags
& NV_MSI_X_ENABLED
) &&
976 ((np
->msi_flags
& NV_MSI_X_VECTORS_MASK
) == 0x1)))
982 static void nv_enable_irq(struct net_device
*dev
)
984 struct fe_priv
*np
= get_nvpriv(dev
);
986 if (!using_multi_irqs(dev
)) {
987 if (np
->msi_flags
& NV_MSI_X_ENABLED
)
988 enable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_ALL
].vector
);
990 enable_irq(np
->pci_dev
->irq
);
992 enable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_RX
].vector
);
993 enable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_TX
].vector
);
994 enable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_OTHER
].vector
);
998 static void nv_disable_irq(struct net_device
*dev
)
1000 struct fe_priv
*np
= get_nvpriv(dev
);
1002 if (!using_multi_irqs(dev
)) {
1003 if (np
->msi_flags
& NV_MSI_X_ENABLED
)
1004 disable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_ALL
].vector
);
1006 disable_irq(np
->pci_dev
->irq
);
1008 disable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_RX
].vector
);
1009 disable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_TX
].vector
);
1010 disable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_OTHER
].vector
);
1014 /* In MSIX mode, a write to irqmask behaves as XOR */
1015 static void nv_enable_hw_interrupts(struct net_device
*dev
, u32 mask
)
1017 u8 __iomem
*base
= get_hwbase(dev
);
1019 writel(mask
, base
+ NvRegIrqMask
);
1022 static void nv_disable_hw_interrupts(struct net_device
*dev
, u32 mask
)
1024 struct fe_priv
*np
= get_nvpriv(dev
);
1025 u8 __iomem
*base
= get_hwbase(dev
);
1027 if (np
->msi_flags
& NV_MSI_X_ENABLED
) {
1028 writel(mask
, base
+ NvRegIrqMask
);
1030 if (np
->msi_flags
& NV_MSI_ENABLED
)
1031 writel(0, base
+ NvRegMSIIrqMask
);
1032 writel(0, base
+ NvRegIrqMask
);
1036 #define MII_READ (-1)
1037 /* mii_rw: read/write a register on the PHY.
1039 * Caller must guarantee serialization
1041 static int mii_rw(struct net_device
*dev
, int addr
, int miireg
, int value
)
1043 u8 __iomem
*base
= get_hwbase(dev
);
1047 writel(NVREG_MIISTAT_MASK_RW
, base
+ NvRegMIIStatus
);
1049 reg
= readl(base
+ NvRegMIIControl
);
1050 if (reg
& NVREG_MIICTL_INUSE
) {
1051 writel(NVREG_MIICTL_INUSE
, base
+ NvRegMIIControl
);
1052 udelay(NV_MIIBUSY_DELAY
);
1055 reg
= (addr
<< NVREG_MIICTL_ADDRSHIFT
) | miireg
;
1056 if (value
!= MII_READ
) {
1057 writel(value
, base
+ NvRegMIIData
);
1058 reg
|= NVREG_MIICTL_WRITE
;
1060 writel(reg
, base
+ NvRegMIIControl
);
1062 if (reg_delay(dev
, NvRegMIIControl
, NVREG_MIICTL_INUSE
, 0,
1063 NV_MIIPHY_DELAY
, NV_MIIPHY_DELAYMAX
, NULL
)) {
1064 dprintk(KERN_DEBUG
"%s: mii_rw of reg %d at PHY %d timed out.\n",
1065 dev
->name
, miireg
, addr
);
1067 } else if (value
!= MII_READ
) {
1068 /* it was a write operation - fewer failures are detectable */
1069 dprintk(KERN_DEBUG
"%s: mii_rw wrote 0x%x to reg %d at PHY %d\n",
1070 dev
->name
, value
, miireg
, addr
);
1072 } else if (readl(base
+ NvRegMIIStatus
) & NVREG_MIISTAT_ERROR
) {
1073 dprintk(KERN_DEBUG
"%s: mii_rw of reg %d at PHY %d failed.\n",
1074 dev
->name
, miireg
, addr
);
1077 retval
= readl(base
+ NvRegMIIData
);
1078 dprintk(KERN_DEBUG
"%s: mii_rw read from reg %d at PHY %d: 0x%x.\n",
1079 dev
->name
, miireg
, addr
, retval
);
1085 static int phy_reset(struct net_device
*dev
, u32 bmcr_setup
)
1087 struct fe_priv
*np
= netdev_priv(dev
);
1089 unsigned int tries
= 0;
1091 miicontrol
= BMCR_RESET
| bmcr_setup
;
1092 if (mii_rw(dev
, np
->phyaddr
, MII_BMCR
, miicontrol
)) {
1096 /* wait for 500ms */
1099 /* must wait till reset is deasserted */
1100 while (miicontrol
& BMCR_RESET
) {
1102 miicontrol
= mii_rw(dev
, np
->phyaddr
, MII_BMCR
, MII_READ
);
1103 /* FIXME: 100 tries seem excessive */
1110 static int phy_init(struct net_device
*dev
)
1112 struct fe_priv
*np
= get_nvpriv(dev
);
1113 u8 __iomem
*base
= get_hwbase(dev
);
1114 u32 phyinterface
, phy_reserved
, mii_status
, mii_control
, mii_control_1000
,reg
;
1116 /* phy errata for E3016 phy */
1117 if (np
->phy_model
== PHY_MODEL_MARVELL_E3016
) {
1118 reg
= mii_rw(dev
, np
->phyaddr
, MII_NCONFIG
, MII_READ
);
1119 reg
&= ~PHY_MARVELL_E3016_INITMASK
;
1120 if (mii_rw(dev
, np
->phyaddr
, MII_NCONFIG
, reg
)) {
1121 printk(KERN_INFO
"%s: phy write to errata reg failed.\n", pci_name(np
->pci_dev
));
1125 if (np
->phy_oui
== PHY_OUI_REALTEK
) {
1126 if (np
->phy_model
== PHY_MODEL_REALTEK_8211
&&
1127 np
->phy_rev
== PHY_REV_REALTEK_8211B
) {
1128 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG1
, PHY_REALTEK_INIT1
)) {
1129 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1132 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG2
, PHY_REALTEK_INIT2
)) {
1133 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1136 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG1
, PHY_REALTEK_INIT3
)) {
1137 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1140 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG3
, PHY_REALTEK_INIT4
)) {
1141 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1144 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG4
, PHY_REALTEK_INIT5
)) {
1145 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1148 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG5
, PHY_REALTEK_INIT6
)) {
1149 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1152 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG1
, PHY_REALTEK_INIT1
)) {
1153 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1157 if (np
->phy_model
== PHY_MODEL_REALTEK_8211
&&
1158 np
->phy_rev
== PHY_REV_REALTEK_8211C
) {
1159 u32 powerstate
= readl(base
+ NvRegPowerState2
);
1161 /* need to perform hw phy reset */
1162 powerstate
|= NVREG_POWERSTATE2_PHY_RESET
;
1163 writel(powerstate
, base
+ NvRegPowerState2
);
1166 powerstate
&= ~NVREG_POWERSTATE2_PHY_RESET
;
1167 writel(powerstate
, base
+ NvRegPowerState2
);
1170 reg
= mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG6
, MII_READ
);
1171 reg
|= PHY_REALTEK_INIT9
;
1172 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG6
, reg
)) {
1173 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1176 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG1
, PHY_REALTEK_INIT10
)) {
1177 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1180 reg
= mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG7
, MII_READ
);
1181 if (!(reg
& PHY_REALTEK_INIT11
)) {
1182 reg
|= PHY_REALTEK_INIT11
;
1183 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG7
, reg
)) {
1184 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1188 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG1
, PHY_REALTEK_INIT1
)) {
1189 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1193 if (np
->phy_model
== PHY_MODEL_REALTEK_8201
) {
1194 if (np
->device_id
== PCI_DEVICE_ID_NVIDIA_NVENET_32
||
1195 np
->device_id
== PCI_DEVICE_ID_NVIDIA_NVENET_33
||
1196 np
->device_id
== PCI_DEVICE_ID_NVIDIA_NVENET_34
||
1197 np
->device_id
== PCI_DEVICE_ID_NVIDIA_NVENET_35
||
1198 np
->device_id
== PCI_DEVICE_ID_NVIDIA_NVENET_36
||
1199 np
->device_id
== PCI_DEVICE_ID_NVIDIA_NVENET_37
||
1200 np
->device_id
== PCI_DEVICE_ID_NVIDIA_NVENET_38
||
1201 np
->device_id
== PCI_DEVICE_ID_NVIDIA_NVENET_39
) {
1202 phy_reserved
= mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG6
, MII_READ
);
1203 phy_reserved
|= PHY_REALTEK_INIT7
;
1204 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG6
, phy_reserved
)) {
1205 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1212 /* set advertise register */
1213 reg
= mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, MII_READ
);
1214 reg
|= (ADVERTISE_10HALF
|ADVERTISE_10FULL
|ADVERTISE_100HALF
|ADVERTISE_100FULL
|ADVERTISE_PAUSE_ASYM
|ADVERTISE_PAUSE_CAP
);
1215 if (mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, reg
)) {
1216 printk(KERN_INFO
"%s: phy write to advertise failed.\n", pci_name(np
->pci_dev
));
1220 /* get phy interface type */
1221 phyinterface
= readl(base
+ NvRegPhyInterface
);
1223 /* see if gigabit phy */
1224 mii_status
= mii_rw(dev
, np
->phyaddr
, MII_BMSR
, MII_READ
);
1225 if (mii_status
& PHY_GIGABIT
) {
1226 np
->gigabit
= PHY_GIGABIT
;
1227 mii_control_1000
= mii_rw(dev
, np
->phyaddr
, MII_CTRL1000
, MII_READ
);
1228 mii_control_1000
&= ~ADVERTISE_1000HALF
;
1229 if (phyinterface
& PHY_RGMII
)
1230 mii_control_1000
|= ADVERTISE_1000FULL
;
1232 mii_control_1000
&= ~ADVERTISE_1000FULL
;
1234 if (mii_rw(dev
, np
->phyaddr
, MII_CTRL1000
, mii_control_1000
)) {
1235 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1242 mii_control
= mii_rw(dev
, np
->phyaddr
, MII_BMCR
, MII_READ
);
1243 mii_control
|= BMCR_ANENABLE
;
1245 if (np
->phy_oui
== PHY_OUI_REALTEK
&&
1246 np
->phy_model
== PHY_MODEL_REALTEK_8211
&&
1247 np
->phy_rev
== PHY_REV_REALTEK_8211C
) {
1248 /* start autoneg since we already performed hw reset above */
1249 mii_control
|= BMCR_ANRESTART
;
1250 if (mii_rw(dev
, np
->phyaddr
, MII_BMCR
, mii_control
)) {
1251 printk(KERN_INFO
"%s: phy init failed\n", pci_name(np
->pci_dev
));
1256 * (certain phys need bmcr to be setup with reset)
1258 if (phy_reset(dev
, mii_control
)) {
1259 printk(KERN_INFO
"%s: phy reset failed\n", pci_name(np
->pci_dev
));
1264 /* phy vendor specific configuration */
1265 if ((np
->phy_oui
== PHY_OUI_CICADA
) && (phyinterface
& PHY_RGMII
) ) {
1266 phy_reserved
= mii_rw(dev
, np
->phyaddr
, MII_RESV1
, MII_READ
);
1267 phy_reserved
&= ~(PHY_CICADA_INIT1
| PHY_CICADA_INIT2
);
1268 phy_reserved
|= (PHY_CICADA_INIT3
| PHY_CICADA_INIT4
);
1269 if (mii_rw(dev
, np
->phyaddr
, MII_RESV1
, phy_reserved
)) {
1270 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1273 phy_reserved
= mii_rw(dev
, np
->phyaddr
, MII_NCONFIG
, MII_READ
);
1274 phy_reserved
|= PHY_CICADA_INIT5
;
1275 if (mii_rw(dev
, np
->phyaddr
, MII_NCONFIG
, phy_reserved
)) {
1276 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1280 if (np
->phy_oui
== PHY_OUI_CICADA
) {
1281 phy_reserved
= mii_rw(dev
, np
->phyaddr
, MII_SREVISION
, MII_READ
);
1282 phy_reserved
|= PHY_CICADA_INIT6
;
1283 if (mii_rw(dev
, np
->phyaddr
, MII_SREVISION
, phy_reserved
)) {
1284 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1288 if (np
->phy_oui
== PHY_OUI_VITESSE
) {
1289 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG1
, PHY_VITESSE_INIT1
)) {
1290 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1293 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG2
, PHY_VITESSE_INIT2
)) {
1294 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1297 phy_reserved
= mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG4
, MII_READ
);
1298 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG4
, phy_reserved
)) {
1299 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1302 phy_reserved
= mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG3
, MII_READ
);
1303 phy_reserved
&= ~PHY_VITESSE_INIT_MSK1
;
1304 phy_reserved
|= PHY_VITESSE_INIT3
;
1305 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG3
, phy_reserved
)) {
1306 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1309 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG2
, PHY_VITESSE_INIT4
)) {
1310 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1313 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG2
, PHY_VITESSE_INIT5
)) {
1314 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1317 phy_reserved
= mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG4
, MII_READ
);
1318 phy_reserved
&= ~PHY_VITESSE_INIT_MSK1
;
1319 phy_reserved
|= PHY_VITESSE_INIT3
;
1320 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG4
, phy_reserved
)) {
1321 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1324 phy_reserved
= mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG3
, MII_READ
);
1325 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG3
, phy_reserved
)) {
1326 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1329 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG2
, PHY_VITESSE_INIT6
)) {
1330 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1333 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG2
, PHY_VITESSE_INIT7
)) {
1334 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1337 phy_reserved
= mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG4
, MII_READ
);
1338 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG4
, phy_reserved
)) {
1339 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1342 phy_reserved
= mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG3
, MII_READ
);
1343 phy_reserved
&= ~PHY_VITESSE_INIT_MSK2
;
1344 phy_reserved
|= PHY_VITESSE_INIT8
;
1345 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG3
, phy_reserved
)) {
1346 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1349 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG2
, PHY_VITESSE_INIT9
)) {
1350 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1353 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG1
, PHY_VITESSE_INIT10
)) {
1354 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1358 if (np
->phy_oui
== PHY_OUI_REALTEK
) {
1359 if (np
->phy_model
== PHY_MODEL_REALTEK_8211
&&
1360 np
->phy_rev
== PHY_REV_REALTEK_8211B
) {
1361 /* reset could have cleared these out, set them back */
1362 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG1
, PHY_REALTEK_INIT1
)) {
1363 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1366 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG2
, PHY_REALTEK_INIT2
)) {
1367 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1370 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG1
, PHY_REALTEK_INIT3
)) {
1371 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1374 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG3
, PHY_REALTEK_INIT4
)) {
1375 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1378 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG4
, PHY_REALTEK_INIT5
)) {
1379 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1382 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG5
, PHY_REALTEK_INIT6
)) {
1383 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1386 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG1
, PHY_REALTEK_INIT1
)) {
1387 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1391 if (np
->phy_model
== PHY_MODEL_REALTEK_8201
) {
1392 if (np
->device_id
== PCI_DEVICE_ID_NVIDIA_NVENET_32
||
1393 np
->device_id
== PCI_DEVICE_ID_NVIDIA_NVENET_33
||
1394 np
->device_id
== PCI_DEVICE_ID_NVIDIA_NVENET_34
||
1395 np
->device_id
== PCI_DEVICE_ID_NVIDIA_NVENET_35
||
1396 np
->device_id
== PCI_DEVICE_ID_NVIDIA_NVENET_36
||
1397 np
->device_id
== PCI_DEVICE_ID_NVIDIA_NVENET_37
||
1398 np
->device_id
== PCI_DEVICE_ID_NVIDIA_NVENET_38
||
1399 np
->device_id
== PCI_DEVICE_ID_NVIDIA_NVENET_39
) {
1400 phy_reserved
= mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG6
, MII_READ
);
1401 phy_reserved
|= PHY_REALTEK_INIT7
;
1402 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG6
, phy_reserved
)) {
1403 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1407 if (phy_cross
== NV_CROSSOVER_DETECTION_DISABLED
) {
1408 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG1
, PHY_REALTEK_INIT3
)) {
1409 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1412 phy_reserved
= mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG2
, MII_READ
);
1413 phy_reserved
&= ~PHY_REALTEK_INIT_MSK1
;
1414 phy_reserved
|= PHY_REALTEK_INIT3
;
1415 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG2
, phy_reserved
)) {
1416 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1419 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG1
, PHY_REALTEK_INIT1
)) {
1420 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1427 /* some phys clear out pause advertisment on reset, set it back */
1428 mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, reg
);
1430 /* restart auto negotiation */
1431 mii_control
= mii_rw(dev
, np
->phyaddr
, MII_BMCR
, MII_READ
);
1432 mii_control
|= (BMCR_ANRESTART
| BMCR_ANENABLE
);
1433 if (mii_rw(dev
, np
->phyaddr
, MII_BMCR
, mii_control
)) {
1440 static void nv_start_rx(struct net_device
*dev
)
1442 struct fe_priv
*np
= netdev_priv(dev
);
1443 u8 __iomem
*base
= get_hwbase(dev
);
1444 u32 rx_ctrl
= readl(base
+ NvRegReceiverControl
);
1446 dprintk(KERN_DEBUG
"%s: nv_start_rx\n", dev
->name
);
1447 /* Already running? Stop it. */
1448 if ((readl(base
+ NvRegReceiverControl
) & NVREG_RCVCTL_START
) && !np
->mac_in_use
) {
1449 rx_ctrl
&= ~NVREG_RCVCTL_START
;
1450 writel(rx_ctrl
, base
+ NvRegReceiverControl
);
1453 writel(np
->linkspeed
, base
+ NvRegLinkSpeed
);
1455 rx_ctrl
|= NVREG_RCVCTL_START
;
1457 rx_ctrl
&= ~NVREG_RCVCTL_RX_PATH_EN
;
1458 writel(rx_ctrl
, base
+ NvRegReceiverControl
);
1459 dprintk(KERN_DEBUG
"%s: nv_start_rx to duplex %d, speed 0x%08x.\n",
1460 dev
->name
, np
->duplex
, np
->linkspeed
);
1464 static void nv_stop_rx(struct net_device
*dev
)
1466 struct fe_priv
*np
= netdev_priv(dev
);
1467 u8 __iomem
*base
= get_hwbase(dev
);
1468 u32 rx_ctrl
= readl(base
+ NvRegReceiverControl
);
1470 dprintk(KERN_DEBUG
"%s: nv_stop_rx\n", dev
->name
);
1471 if (!np
->mac_in_use
)
1472 rx_ctrl
&= ~NVREG_RCVCTL_START
;
1474 rx_ctrl
|= NVREG_RCVCTL_RX_PATH_EN
;
1475 writel(rx_ctrl
, base
+ NvRegReceiverControl
);
1476 reg_delay(dev
, NvRegReceiverStatus
, NVREG_RCVSTAT_BUSY
, 0,
1477 NV_RXSTOP_DELAY1
, NV_RXSTOP_DELAY1MAX
,
1478 KERN_INFO
"nv_stop_rx: ReceiverStatus remained busy");
1480 udelay(NV_RXSTOP_DELAY2
);
1481 if (!np
->mac_in_use
)
1482 writel(0, base
+ NvRegLinkSpeed
);
1485 static void nv_start_tx(struct net_device
*dev
)
1487 struct fe_priv
*np
= netdev_priv(dev
);
1488 u8 __iomem
*base
= get_hwbase(dev
);
1489 u32 tx_ctrl
= readl(base
+ NvRegTransmitterControl
);
1491 dprintk(KERN_DEBUG
"%s: nv_start_tx\n", dev
->name
);
1492 tx_ctrl
|= NVREG_XMITCTL_START
;
1494 tx_ctrl
&= ~NVREG_XMITCTL_TX_PATH_EN
;
1495 writel(tx_ctrl
, base
+ NvRegTransmitterControl
);
1499 static void nv_stop_tx(struct net_device
*dev
)
1501 struct fe_priv
*np
= netdev_priv(dev
);
1502 u8 __iomem
*base
= get_hwbase(dev
);
1503 u32 tx_ctrl
= readl(base
+ NvRegTransmitterControl
);
1505 dprintk(KERN_DEBUG
"%s: nv_stop_tx\n", dev
->name
);
1506 if (!np
->mac_in_use
)
1507 tx_ctrl
&= ~NVREG_XMITCTL_START
;
1509 tx_ctrl
|= NVREG_XMITCTL_TX_PATH_EN
;
1510 writel(tx_ctrl
, base
+ NvRegTransmitterControl
);
1511 reg_delay(dev
, NvRegTransmitterStatus
, NVREG_XMITSTAT_BUSY
, 0,
1512 NV_TXSTOP_DELAY1
, NV_TXSTOP_DELAY1MAX
,
1513 KERN_INFO
"nv_stop_tx: TransmitterStatus remained busy");
1515 udelay(NV_TXSTOP_DELAY2
);
1516 if (!np
->mac_in_use
)
1517 writel(readl(base
+ NvRegTransmitPoll
) & NVREG_TRANSMITPOLL_MAC_ADDR_REV
,
1518 base
+ NvRegTransmitPoll
);
1521 static void nv_start_rxtx(struct net_device
*dev
)
1527 static void nv_stop_rxtx(struct net_device
*dev
)
1533 static void nv_txrx_reset(struct net_device
*dev
)
1535 struct fe_priv
*np
= netdev_priv(dev
);
1536 u8 __iomem
*base
= get_hwbase(dev
);
1538 dprintk(KERN_DEBUG
"%s: nv_txrx_reset\n", dev
->name
);
1539 writel(NVREG_TXRXCTL_BIT2
| NVREG_TXRXCTL_RESET
| np
->txrxctl_bits
, base
+ NvRegTxRxControl
);
1541 udelay(NV_TXRX_RESET_DELAY
);
1542 writel(NVREG_TXRXCTL_BIT2
| np
->txrxctl_bits
, base
+ NvRegTxRxControl
);
1546 static void nv_mac_reset(struct net_device
*dev
)
1548 struct fe_priv
*np
= netdev_priv(dev
);
1549 u8 __iomem
*base
= get_hwbase(dev
);
1550 u32 temp1
, temp2
, temp3
;
1552 dprintk(KERN_DEBUG
"%s: nv_mac_reset\n", dev
->name
);
1554 writel(NVREG_TXRXCTL_BIT2
| NVREG_TXRXCTL_RESET
| np
->txrxctl_bits
, base
+ NvRegTxRxControl
);
1557 /* save registers since they will be cleared on reset */
1558 temp1
= readl(base
+ NvRegMacAddrA
);
1559 temp2
= readl(base
+ NvRegMacAddrB
);
1560 temp3
= readl(base
+ NvRegTransmitPoll
);
1562 writel(NVREG_MAC_RESET_ASSERT
, base
+ NvRegMacReset
);
1564 udelay(NV_MAC_RESET_DELAY
);
1565 writel(0, base
+ NvRegMacReset
);
1567 udelay(NV_MAC_RESET_DELAY
);
1569 /* restore saved registers */
1570 writel(temp1
, base
+ NvRegMacAddrA
);
1571 writel(temp2
, base
+ NvRegMacAddrB
);
1572 writel(temp3
, base
+ NvRegTransmitPoll
);
1574 writel(NVREG_TXRXCTL_BIT2
| np
->txrxctl_bits
, base
+ NvRegTxRxControl
);
1578 static void nv_get_hw_stats(struct net_device
*dev
)
1580 struct fe_priv
*np
= netdev_priv(dev
);
1581 u8 __iomem
*base
= get_hwbase(dev
);
1583 np
->estats
.tx_bytes
+= readl(base
+ NvRegTxCnt
);
1584 np
->estats
.tx_zero_rexmt
+= readl(base
+ NvRegTxZeroReXmt
);
1585 np
->estats
.tx_one_rexmt
+= readl(base
+ NvRegTxOneReXmt
);
1586 np
->estats
.tx_many_rexmt
+= readl(base
+ NvRegTxManyReXmt
);
1587 np
->estats
.tx_late_collision
+= readl(base
+ NvRegTxLateCol
);
1588 np
->estats
.tx_fifo_errors
+= readl(base
+ NvRegTxUnderflow
);
1589 np
->estats
.tx_carrier_errors
+= readl(base
+ NvRegTxLossCarrier
);
1590 np
->estats
.tx_excess_deferral
+= readl(base
+ NvRegTxExcessDef
);
1591 np
->estats
.tx_retry_error
+= readl(base
+ NvRegTxRetryErr
);
1592 np
->estats
.rx_frame_error
+= readl(base
+ NvRegRxFrameErr
);
1593 np
->estats
.rx_extra_byte
+= readl(base
+ NvRegRxExtraByte
);
1594 np
->estats
.rx_late_collision
+= readl(base
+ NvRegRxLateCol
);
1595 np
->estats
.rx_runt
+= readl(base
+ NvRegRxRunt
);
1596 np
->estats
.rx_frame_too_long
+= readl(base
+ NvRegRxFrameTooLong
);
1597 np
->estats
.rx_over_errors
+= readl(base
+ NvRegRxOverflow
);
1598 np
->estats
.rx_crc_errors
+= readl(base
+ NvRegRxFCSErr
);
1599 np
->estats
.rx_frame_align_error
+= readl(base
+ NvRegRxFrameAlignErr
);
1600 np
->estats
.rx_length_error
+= readl(base
+ NvRegRxLenErr
);
1601 np
->estats
.rx_unicast
+= readl(base
+ NvRegRxUnicast
);
1602 np
->estats
.rx_multicast
+= readl(base
+ NvRegRxMulticast
);
1603 np
->estats
.rx_broadcast
+= readl(base
+ NvRegRxBroadcast
);
1604 np
->estats
.rx_packets
=
1605 np
->estats
.rx_unicast
+
1606 np
->estats
.rx_multicast
+
1607 np
->estats
.rx_broadcast
;
1608 np
->estats
.rx_errors_total
=
1609 np
->estats
.rx_crc_errors
+
1610 np
->estats
.rx_over_errors
+
1611 np
->estats
.rx_frame_error
+
1612 (np
->estats
.rx_frame_align_error
- np
->estats
.rx_extra_byte
) +
1613 np
->estats
.rx_late_collision
+
1614 np
->estats
.rx_runt
+
1615 np
->estats
.rx_frame_too_long
;
1616 np
->estats
.tx_errors_total
=
1617 np
->estats
.tx_late_collision
+
1618 np
->estats
.tx_fifo_errors
+
1619 np
->estats
.tx_carrier_errors
+
1620 np
->estats
.tx_excess_deferral
+
1621 np
->estats
.tx_retry_error
;
1623 if (np
->driver_data
& DEV_HAS_STATISTICS_V2
) {
1624 np
->estats
.tx_deferral
+= readl(base
+ NvRegTxDef
);
1625 np
->estats
.tx_packets
+= readl(base
+ NvRegTxFrame
);
1626 np
->estats
.rx_bytes
+= readl(base
+ NvRegRxCnt
);
1627 np
->estats
.tx_pause
+= readl(base
+ NvRegTxPause
);
1628 np
->estats
.rx_pause
+= readl(base
+ NvRegRxPause
);
1629 np
->estats
.rx_drop_frame
+= readl(base
+ NvRegRxDropFrame
);
1634 * nv_get_stats: dev->get_stats function
1635 * Get latest stats value from the nic.
1636 * Called with read_lock(&dev_base_lock) held for read -
1637 * only synchronized against unregister_netdevice.
1639 static struct net_device_stats
*nv_get_stats(struct net_device
*dev
)
1641 struct fe_priv
*np
= netdev_priv(dev
);
1643 /* If the nic supports hw counters then retrieve latest values */
1644 if (np
->driver_data
& (DEV_HAS_STATISTICS_V1
|DEV_HAS_STATISTICS_V2
)) {
1645 nv_get_hw_stats(dev
);
1647 /* copy to net_device stats */
1648 dev
->stats
.tx_bytes
= np
->estats
.tx_bytes
;
1649 dev
->stats
.tx_fifo_errors
= np
->estats
.tx_fifo_errors
;
1650 dev
->stats
.tx_carrier_errors
= np
->estats
.tx_carrier_errors
;
1651 dev
->stats
.rx_crc_errors
= np
->estats
.rx_crc_errors
;
1652 dev
->stats
.rx_over_errors
= np
->estats
.rx_over_errors
;
1653 dev
->stats
.rx_errors
= np
->estats
.rx_errors_total
;
1654 dev
->stats
.tx_errors
= np
->estats
.tx_errors_total
;
1661 * nv_alloc_rx: fill rx ring entries.
1662 * Return 1 if the allocations for the skbs failed and the
1663 * rx engine is without Available descriptors
1665 static int nv_alloc_rx(struct net_device
*dev
)
1667 struct fe_priv
*np
= netdev_priv(dev
);
1668 struct ring_desc
* less_rx
;
1670 less_rx
= np
->get_rx
.orig
;
1671 if (less_rx
-- == np
->first_rx
.orig
)
1672 less_rx
= np
->last_rx
.orig
;
1674 while (np
->put_rx
.orig
!= less_rx
) {
1675 struct sk_buff
*skb
= dev_alloc_skb(np
->rx_buf_sz
+ NV_RX_ALLOC_PAD
);
1677 np
->put_rx_ctx
->skb
= skb
;
1678 np
->put_rx_ctx
->dma
= pci_map_single(np
->pci_dev
,
1681 PCI_DMA_FROMDEVICE
);
1682 np
->put_rx_ctx
->dma_len
= skb_tailroom(skb
);
1683 np
->put_rx
.orig
->buf
= cpu_to_le32(np
->put_rx_ctx
->dma
);
1685 np
->put_rx
.orig
->flaglen
= cpu_to_le32(np
->rx_buf_sz
| NV_RX_AVAIL
);
1686 if (unlikely(np
->put_rx
.orig
++ == np
->last_rx
.orig
))
1687 np
->put_rx
.orig
= np
->first_rx
.orig
;
1688 if (unlikely(np
->put_rx_ctx
++ == np
->last_rx_ctx
))
1689 np
->put_rx_ctx
= np
->first_rx_ctx
;
1697 static int nv_alloc_rx_optimized(struct net_device
*dev
)
1699 struct fe_priv
*np
= netdev_priv(dev
);
1700 struct ring_desc_ex
* less_rx
;
1702 less_rx
= np
->get_rx
.ex
;
1703 if (less_rx
-- == np
->first_rx
.ex
)
1704 less_rx
= np
->last_rx
.ex
;
1706 while (np
->put_rx
.ex
!= less_rx
) {
1707 struct sk_buff
*skb
= dev_alloc_skb(np
->rx_buf_sz
+ NV_RX_ALLOC_PAD
);
1709 np
->put_rx_ctx
->skb
= skb
;
1710 np
->put_rx_ctx
->dma
= pci_map_single(np
->pci_dev
,
1713 PCI_DMA_FROMDEVICE
);
1714 np
->put_rx_ctx
->dma_len
= skb_tailroom(skb
);
1715 np
->put_rx
.ex
->bufhigh
= cpu_to_le32(dma_high(np
->put_rx_ctx
->dma
));
1716 np
->put_rx
.ex
->buflow
= cpu_to_le32(dma_low(np
->put_rx_ctx
->dma
));
1718 np
->put_rx
.ex
->flaglen
= cpu_to_le32(np
->rx_buf_sz
| NV_RX2_AVAIL
);
1719 if (unlikely(np
->put_rx
.ex
++ == np
->last_rx
.ex
))
1720 np
->put_rx
.ex
= np
->first_rx
.ex
;
1721 if (unlikely(np
->put_rx_ctx
++ == np
->last_rx_ctx
))
1722 np
->put_rx_ctx
= np
->first_rx_ctx
;
1730 /* If rx bufs are exhausted called after 50ms to attempt to refresh */
1731 #ifdef CONFIG_FORCEDETH_NAPI
1732 static void nv_do_rx_refill(unsigned long data
)
1734 struct net_device
*dev
= (struct net_device
*) data
;
1735 struct fe_priv
*np
= netdev_priv(dev
);
1737 /* Just reschedule NAPI rx processing */
1738 netif_rx_schedule(dev
, &np
->napi
);
1741 static void nv_do_rx_refill(unsigned long data
)
1743 struct net_device
*dev
= (struct net_device
*) data
;
1744 struct fe_priv
*np
= netdev_priv(dev
);
1747 if (!using_multi_irqs(dev
)) {
1748 if (np
->msi_flags
& NV_MSI_X_ENABLED
)
1749 disable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_ALL
].vector
);
1751 disable_irq(np
->pci_dev
->irq
);
1753 disable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_RX
].vector
);
1755 if (!nv_optimized(np
))
1756 retcode
= nv_alloc_rx(dev
);
1758 retcode
= nv_alloc_rx_optimized(dev
);
1760 spin_lock_irq(&np
->lock
);
1761 if (!np
->in_shutdown
)
1762 mod_timer(&np
->oom_kick
, jiffies
+ OOM_REFILL
);
1763 spin_unlock_irq(&np
->lock
);
1765 if (!using_multi_irqs(dev
)) {
1766 if (np
->msi_flags
& NV_MSI_X_ENABLED
)
1767 enable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_ALL
].vector
);
1769 enable_irq(np
->pci_dev
->irq
);
1771 enable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_RX
].vector
);
1776 static void nv_init_rx(struct net_device
*dev
)
1778 struct fe_priv
*np
= netdev_priv(dev
);
1781 np
->get_rx
= np
->put_rx
= np
->first_rx
= np
->rx_ring
;
1783 if (!nv_optimized(np
))
1784 np
->last_rx
.orig
= &np
->rx_ring
.orig
[np
->rx_ring_size
-1];
1786 np
->last_rx
.ex
= &np
->rx_ring
.ex
[np
->rx_ring_size
-1];
1787 np
->get_rx_ctx
= np
->put_rx_ctx
= np
->first_rx_ctx
= np
->rx_skb
;
1788 np
->last_rx_ctx
= &np
->rx_skb
[np
->rx_ring_size
-1];
1790 for (i
= 0; i
< np
->rx_ring_size
; i
++) {
1791 if (!nv_optimized(np
)) {
1792 np
->rx_ring
.orig
[i
].flaglen
= 0;
1793 np
->rx_ring
.orig
[i
].buf
= 0;
1795 np
->rx_ring
.ex
[i
].flaglen
= 0;
1796 np
->rx_ring
.ex
[i
].txvlan
= 0;
1797 np
->rx_ring
.ex
[i
].bufhigh
= 0;
1798 np
->rx_ring
.ex
[i
].buflow
= 0;
1800 np
->rx_skb
[i
].skb
= NULL
;
1801 np
->rx_skb
[i
].dma
= 0;
1805 static void nv_init_tx(struct net_device
*dev
)
1807 struct fe_priv
*np
= netdev_priv(dev
);
1810 np
->get_tx
= np
->put_tx
= np
->first_tx
= np
->tx_ring
;
1812 if (!nv_optimized(np
))
1813 np
->last_tx
.orig
= &np
->tx_ring
.orig
[np
->tx_ring_size
-1];
1815 np
->last_tx
.ex
= &np
->tx_ring
.ex
[np
->tx_ring_size
-1];
1816 np
->get_tx_ctx
= np
->put_tx_ctx
= np
->first_tx_ctx
= np
->tx_skb
;
1817 np
->last_tx_ctx
= &np
->tx_skb
[np
->tx_ring_size
-1];
1818 np
->tx_pkts_in_progress
= 0;
1819 np
->tx_change_owner
= NULL
;
1820 np
->tx_end_flip
= NULL
;
1822 for (i
= 0; i
< np
->tx_ring_size
; i
++) {
1823 if (!nv_optimized(np
)) {
1824 np
->tx_ring
.orig
[i
].flaglen
= 0;
1825 np
->tx_ring
.orig
[i
].buf
= 0;
1827 np
->tx_ring
.ex
[i
].flaglen
= 0;
1828 np
->tx_ring
.ex
[i
].txvlan
= 0;
1829 np
->tx_ring
.ex
[i
].bufhigh
= 0;
1830 np
->tx_ring
.ex
[i
].buflow
= 0;
1832 np
->tx_skb
[i
].skb
= NULL
;
1833 np
->tx_skb
[i
].dma
= 0;
1834 np
->tx_skb
[i
].dma_len
= 0;
1835 np
->tx_skb
[i
].first_tx_desc
= NULL
;
1836 np
->tx_skb
[i
].next_tx_ctx
= NULL
;
1840 static int nv_init_ring(struct net_device
*dev
)
1842 struct fe_priv
*np
= netdev_priv(dev
);
1847 if (!nv_optimized(np
))
1848 return nv_alloc_rx(dev
);
1850 return nv_alloc_rx_optimized(dev
);
1853 static int nv_release_txskb(struct net_device
*dev
, struct nv_skb_map
* tx_skb
)
1855 struct fe_priv
*np
= netdev_priv(dev
);
1858 pci_unmap_page(np
->pci_dev
, tx_skb
->dma
,
1864 dev_kfree_skb_any(tx_skb
->skb
);
1872 static void nv_drain_tx(struct net_device
*dev
)
1874 struct fe_priv
*np
= netdev_priv(dev
);
1877 for (i
= 0; i
< np
->tx_ring_size
; i
++) {
1878 if (!nv_optimized(np
)) {
1879 np
->tx_ring
.orig
[i
].flaglen
= 0;
1880 np
->tx_ring
.orig
[i
].buf
= 0;
1882 np
->tx_ring
.ex
[i
].flaglen
= 0;
1883 np
->tx_ring
.ex
[i
].txvlan
= 0;
1884 np
->tx_ring
.ex
[i
].bufhigh
= 0;
1885 np
->tx_ring
.ex
[i
].buflow
= 0;
1887 if (nv_release_txskb(dev
, &np
->tx_skb
[i
]))
1888 dev
->stats
.tx_dropped
++;
1889 np
->tx_skb
[i
].dma
= 0;
1890 np
->tx_skb
[i
].dma_len
= 0;
1891 np
->tx_skb
[i
].first_tx_desc
= NULL
;
1892 np
->tx_skb
[i
].next_tx_ctx
= NULL
;
1894 np
->tx_pkts_in_progress
= 0;
1895 np
->tx_change_owner
= NULL
;
1896 np
->tx_end_flip
= NULL
;
1899 static void nv_drain_rx(struct net_device
*dev
)
1901 struct fe_priv
*np
= netdev_priv(dev
);
1904 for (i
= 0; i
< np
->rx_ring_size
; i
++) {
1905 if (!nv_optimized(np
)) {
1906 np
->rx_ring
.orig
[i
].flaglen
= 0;
1907 np
->rx_ring
.orig
[i
].buf
= 0;
1909 np
->rx_ring
.ex
[i
].flaglen
= 0;
1910 np
->rx_ring
.ex
[i
].txvlan
= 0;
1911 np
->rx_ring
.ex
[i
].bufhigh
= 0;
1912 np
->rx_ring
.ex
[i
].buflow
= 0;
1915 if (np
->rx_skb
[i
].skb
) {
1916 pci_unmap_single(np
->pci_dev
, np
->rx_skb
[i
].dma
,
1917 (skb_end_pointer(np
->rx_skb
[i
].skb
) -
1918 np
->rx_skb
[i
].skb
->data
),
1919 PCI_DMA_FROMDEVICE
);
1920 dev_kfree_skb(np
->rx_skb
[i
].skb
);
1921 np
->rx_skb
[i
].skb
= NULL
;
1926 static void nv_drain_rxtx(struct net_device
*dev
)
1932 static inline u32
nv_get_empty_tx_slots(struct fe_priv
*np
)
1934 return (u32
)(np
->tx_ring_size
- ((np
->tx_ring_size
+ (np
->put_tx_ctx
- np
->get_tx_ctx
)) % np
->tx_ring_size
));
1937 static void nv_legacybackoff_reseed(struct net_device
*dev
)
1939 u8 __iomem
*base
= get_hwbase(dev
);
1944 reg
= readl(base
+ NvRegSlotTime
) & ~NVREG_SLOTTIME_MASK
;
1945 get_random_bytes(&low
, sizeof(low
));
1946 reg
|= low
& NVREG_SLOTTIME_MASK
;
1948 /* Need to stop tx before change takes effect.
1949 * Caller has already gained np->lock.
1951 tx_status
= readl(base
+ NvRegTransmitterControl
) & NVREG_XMITCTL_START
;
1955 writel(reg
, base
+ NvRegSlotTime
);
1961 /* Gear Backoff Seeds */
1962 #define BACKOFF_SEEDSET_ROWS 8
1963 #define BACKOFF_SEEDSET_LFSRS 15
1965 /* Known Good seed sets */
1966 static const u32 main_seedset
[BACKOFF_SEEDSET_ROWS
][BACKOFF_SEEDSET_LFSRS
] = {
1967 {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
1968 {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 385, 761, 790, 974},
1969 {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
1970 {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 386, 761, 790, 974},
1971 {266, 265, 276, 585, 397, 208, 345, 355, 365, 376, 385, 396, 771, 700, 984},
1972 {266, 265, 276, 586, 397, 208, 346, 355, 365, 376, 285, 396, 771, 700, 984},
1973 {366, 365, 376, 686, 497, 308, 447, 455, 466, 476, 485, 496, 871, 800, 84},
1974 {466, 465, 476, 786, 597, 408, 547, 555, 566, 576, 585, 597, 971, 900, 184}};
1976 static const u32 gear_seedset
[BACKOFF_SEEDSET_ROWS
][BACKOFF_SEEDSET_LFSRS
] = {
1977 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
1978 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
1979 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 397},
1980 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
1981 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
1982 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
1983 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
1984 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395}};
1986 static void nv_gear_backoff_reseed(struct net_device
*dev
)
1988 u8 __iomem
*base
= get_hwbase(dev
);
1989 u32 miniseed1
, miniseed2
, miniseed2_reversed
, miniseed3
, miniseed3_reversed
;
1990 u32 temp
, seedset
, combinedSeed
;
1993 /* Setup seed for free running LFSR */
1994 /* We are going to read the time stamp counter 3 times
1995 and swizzle bits around to increase randomness */
1996 get_random_bytes(&miniseed1
, sizeof(miniseed1
));
1997 miniseed1
&= 0x0fff;
2001 get_random_bytes(&miniseed2
, sizeof(miniseed2
));
2002 miniseed2
&= 0x0fff;
2005 miniseed2_reversed
=
2006 ((miniseed2
& 0xF00) >> 8) |
2007 (miniseed2
& 0x0F0) |
2008 ((miniseed2
& 0x00F) << 8);
2010 get_random_bytes(&miniseed3
, sizeof(miniseed3
));
2011 miniseed3
&= 0x0fff;
2014 miniseed3_reversed
=
2015 ((miniseed3
& 0xF00) >> 8) |
2016 (miniseed3
& 0x0F0) |
2017 ((miniseed3
& 0x00F) << 8);
2019 combinedSeed
= ((miniseed1
^ miniseed2_reversed
) << 12) |
2020 (miniseed2
^ miniseed3_reversed
);
2022 /* Seeds can not be zero */
2023 if ((combinedSeed
& NVREG_BKOFFCTRL_SEED_MASK
) == 0)
2024 combinedSeed
|= 0x08;
2025 if ((combinedSeed
& (NVREG_BKOFFCTRL_SEED_MASK
<< NVREG_BKOFFCTRL_GEAR
)) == 0)
2026 combinedSeed
|= 0x8000;
2028 /* No need to disable tx here */
2029 temp
= NVREG_BKOFFCTRL_DEFAULT
| (0 << NVREG_BKOFFCTRL_SELECT
);
2030 temp
|= combinedSeed
& NVREG_BKOFFCTRL_SEED_MASK
;
2031 temp
|= combinedSeed
>> NVREG_BKOFFCTRL_GEAR
;
2032 writel(temp
,base
+ NvRegBackOffControl
);
2034 /* Setup seeds for all gear LFSRs. */
2035 get_random_bytes(&seedset
, sizeof(seedset
));
2036 seedset
= seedset
% BACKOFF_SEEDSET_ROWS
;
2037 for (i
= 1; i
<= BACKOFF_SEEDSET_LFSRS
; i
++)
2039 temp
= NVREG_BKOFFCTRL_DEFAULT
| (i
<< NVREG_BKOFFCTRL_SELECT
);
2040 temp
|= main_seedset
[seedset
][i
-1] & 0x3ff;
2041 temp
|= ((gear_seedset
[seedset
][i
-1] & 0x3ff) << NVREG_BKOFFCTRL_GEAR
);
2042 writel(temp
, base
+ NvRegBackOffControl
);
2047 * nv_start_xmit: dev->hard_start_xmit function
2048 * Called with netif_tx_lock held.
2050 static int nv_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
2052 struct fe_priv
*np
= netdev_priv(dev
);
2054 u32 tx_flags_extra
= (np
->desc_ver
== DESC_VER_1
? NV_TX_LASTPACKET
: NV_TX2_LASTPACKET
);
2055 unsigned int fragments
= skb_shinfo(skb
)->nr_frags
;
2059 u32 size
= skb
->len
-skb
->data_len
;
2060 u32 entries
= (size
>> NV_TX2_TSO_MAX_SHIFT
) + ((size
& (NV_TX2_TSO_MAX_SIZE
-1)) ? 1 : 0);
2062 struct ring_desc
* put_tx
;
2063 struct ring_desc
* start_tx
;
2064 struct ring_desc
* prev_tx
;
2065 struct nv_skb_map
* prev_tx_ctx
;
2066 unsigned long flags
;
2068 /* add fragments to entries count */
2069 for (i
= 0; i
< fragments
; i
++) {
2070 entries
+= (skb_shinfo(skb
)->frags
[i
].size
>> NV_TX2_TSO_MAX_SHIFT
) +
2071 ((skb_shinfo(skb
)->frags
[i
].size
& (NV_TX2_TSO_MAX_SIZE
-1)) ? 1 : 0);
2074 empty_slots
= nv_get_empty_tx_slots(np
);
2075 if (unlikely(empty_slots
<= entries
)) {
2076 spin_lock_irqsave(&np
->lock
, flags
);
2077 netif_stop_queue(dev
);
2079 spin_unlock_irqrestore(&np
->lock
, flags
);
2080 return NETDEV_TX_BUSY
;
2083 start_tx
= put_tx
= np
->put_tx
.orig
;
2085 /* setup the header buffer */
2088 prev_tx_ctx
= np
->put_tx_ctx
;
2089 bcnt
= (size
> NV_TX2_TSO_MAX_SIZE
) ? NV_TX2_TSO_MAX_SIZE
: size
;
2090 np
->put_tx_ctx
->dma
= pci_map_single(np
->pci_dev
, skb
->data
+ offset
, bcnt
,
2092 np
->put_tx_ctx
->dma_len
= bcnt
;
2093 put_tx
->buf
= cpu_to_le32(np
->put_tx_ctx
->dma
);
2094 put_tx
->flaglen
= cpu_to_le32((bcnt
-1) | tx_flags
);
2096 tx_flags
= np
->tx_flags
;
2099 if (unlikely(put_tx
++ == np
->last_tx
.orig
))
2100 put_tx
= np
->first_tx
.orig
;
2101 if (unlikely(np
->put_tx_ctx
++ == np
->last_tx_ctx
))
2102 np
->put_tx_ctx
= np
->first_tx_ctx
;
2105 /* setup the fragments */
2106 for (i
= 0; i
< fragments
; i
++) {
2107 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
2108 u32 size
= frag
->size
;
2113 prev_tx_ctx
= np
->put_tx_ctx
;
2114 bcnt
= (size
> NV_TX2_TSO_MAX_SIZE
) ? NV_TX2_TSO_MAX_SIZE
: size
;
2115 np
->put_tx_ctx
->dma
= pci_map_page(np
->pci_dev
, frag
->page
, frag
->page_offset
+offset
, bcnt
,
2117 np
->put_tx_ctx
->dma_len
= bcnt
;
2118 put_tx
->buf
= cpu_to_le32(np
->put_tx_ctx
->dma
);
2119 put_tx
->flaglen
= cpu_to_le32((bcnt
-1) | tx_flags
);
2123 if (unlikely(put_tx
++ == np
->last_tx
.orig
))
2124 put_tx
= np
->first_tx
.orig
;
2125 if (unlikely(np
->put_tx_ctx
++ == np
->last_tx_ctx
))
2126 np
->put_tx_ctx
= np
->first_tx_ctx
;
2130 /* set last fragment flag */
2131 prev_tx
->flaglen
|= cpu_to_le32(tx_flags_extra
);
2133 /* save skb in this slot's context area */
2134 prev_tx_ctx
->skb
= skb
;
2136 if (skb_is_gso(skb
))
2137 tx_flags_extra
= NV_TX2_TSO
| (skb_shinfo(skb
)->gso_size
<< NV_TX2_TSO_SHIFT
);
2139 tx_flags_extra
= skb
->ip_summed
== CHECKSUM_PARTIAL
?
2140 NV_TX2_CHECKSUM_L3
| NV_TX2_CHECKSUM_L4
: 0;
2142 spin_lock_irqsave(&np
->lock
, flags
);
2145 start_tx
->flaglen
|= cpu_to_le32(tx_flags
| tx_flags_extra
);
2146 np
->put_tx
.orig
= put_tx
;
2148 spin_unlock_irqrestore(&np
->lock
, flags
);
2150 dprintk(KERN_DEBUG
"%s: nv_start_xmit: entries %d queued for transmission. tx_flags_extra: %x\n",
2151 dev
->name
, entries
, tx_flags_extra
);
2154 for (j
=0; j
<64; j
++) {
2156 dprintk("\n%03x:", j
);
2157 dprintk(" %02x", ((unsigned char*)skb
->data
)[j
]);
2162 dev
->trans_start
= jiffies
;
2163 writel(NVREG_TXRXCTL_KICK
|np
->txrxctl_bits
, get_hwbase(dev
) + NvRegTxRxControl
);
2164 return NETDEV_TX_OK
;
2167 static int nv_start_xmit_optimized(struct sk_buff
*skb
, struct net_device
*dev
)
2169 struct fe_priv
*np
= netdev_priv(dev
);
2172 unsigned int fragments
= skb_shinfo(skb
)->nr_frags
;
2176 u32 size
= skb
->len
-skb
->data_len
;
2177 u32 entries
= (size
>> NV_TX2_TSO_MAX_SHIFT
) + ((size
& (NV_TX2_TSO_MAX_SIZE
-1)) ? 1 : 0);
2179 struct ring_desc_ex
* put_tx
;
2180 struct ring_desc_ex
* start_tx
;
2181 struct ring_desc_ex
* prev_tx
;
2182 struct nv_skb_map
* prev_tx_ctx
;
2183 struct nv_skb_map
* start_tx_ctx
;
2184 unsigned long flags
;
2186 /* add fragments to entries count */
2187 for (i
= 0; i
< fragments
; i
++) {
2188 entries
+= (skb_shinfo(skb
)->frags
[i
].size
>> NV_TX2_TSO_MAX_SHIFT
) +
2189 ((skb_shinfo(skb
)->frags
[i
].size
& (NV_TX2_TSO_MAX_SIZE
-1)) ? 1 : 0);
2192 empty_slots
= nv_get_empty_tx_slots(np
);
2193 if (unlikely(empty_slots
<= entries
)) {
2194 spin_lock_irqsave(&np
->lock
, flags
);
2195 netif_stop_queue(dev
);
2197 spin_unlock_irqrestore(&np
->lock
, flags
);
2198 return NETDEV_TX_BUSY
;
2201 start_tx
= put_tx
= np
->put_tx
.ex
;
2202 start_tx_ctx
= np
->put_tx_ctx
;
2204 /* setup the header buffer */
2207 prev_tx_ctx
= np
->put_tx_ctx
;
2208 bcnt
= (size
> NV_TX2_TSO_MAX_SIZE
) ? NV_TX2_TSO_MAX_SIZE
: size
;
2209 np
->put_tx_ctx
->dma
= pci_map_single(np
->pci_dev
, skb
->data
+ offset
, bcnt
,
2211 np
->put_tx_ctx
->dma_len
= bcnt
;
2212 put_tx
->bufhigh
= cpu_to_le32(dma_high(np
->put_tx_ctx
->dma
));
2213 put_tx
->buflow
= cpu_to_le32(dma_low(np
->put_tx_ctx
->dma
));
2214 put_tx
->flaglen
= cpu_to_le32((bcnt
-1) | tx_flags
);
2216 tx_flags
= NV_TX2_VALID
;
2219 if (unlikely(put_tx
++ == np
->last_tx
.ex
))
2220 put_tx
= np
->first_tx
.ex
;
2221 if (unlikely(np
->put_tx_ctx
++ == np
->last_tx_ctx
))
2222 np
->put_tx_ctx
= np
->first_tx_ctx
;
2225 /* setup the fragments */
2226 for (i
= 0; i
< fragments
; i
++) {
2227 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
2228 u32 size
= frag
->size
;
2233 prev_tx_ctx
= np
->put_tx_ctx
;
2234 bcnt
= (size
> NV_TX2_TSO_MAX_SIZE
) ? NV_TX2_TSO_MAX_SIZE
: size
;
2235 np
->put_tx_ctx
->dma
= pci_map_page(np
->pci_dev
, frag
->page
, frag
->page_offset
+offset
, bcnt
,
2237 np
->put_tx_ctx
->dma_len
= bcnt
;
2238 put_tx
->bufhigh
= cpu_to_le32(dma_high(np
->put_tx_ctx
->dma
));
2239 put_tx
->buflow
= cpu_to_le32(dma_low(np
->put_tx_ctx
->dma
));
2240 put_tx
->flaglen
= cpu_to_le32((bcnt
-1) | tx_flags
);
2244 if (unlikely(put_tx
++ == np
->last_tx
.ex
))
2245 put_tx
= np
->first_tx
.ex
;
2246 if (unlikely(np
->put_tx_ctx
++ == np
->last_tx_ctx
))
2247 np
->put_tx_ctx
= np
->first_tx_ctx
;
2251 /* set last fragment flag */
2252 prev_tx
->flaglen
|= cpu_to_le32(NV_TX2_LASTPACKET
);
2254 /* save skb in this slot's context area */
2255 prev_tx_ctx
->skb
= skb
;
2257 if (skb_is_gso(skb
))
2258 tx_flags_extra
= NV_TX2_TSO
| (skb_shinfo(skb
)->gso_size
<< NV_TX2_TSO_SHIFT
);
2260 tx_flags_extra
= skb
->ip_summed
== CHECKSUM_PARTIAL
?
2261 NV_TX2_CHECKSUM_L3
| NV_TX2_CHECKSUM_L4
: 0;
2264 if (likely(!np
->vlangrp
)) {
2265 start_tx
->txvlan
= 0;
2267 if (vlan_tx_tag_present(skb
))
2268 start_tx
->txvlan
= cpu_to_le32(NV_TX3_VLAN_TAG_PRESENT
| vlan_tx_tag_get(skb
));
2270 start_tx
->txvlan
= 0;
2273 spin_lock_irqsave(&np
->lock
, flags
);
2276 /* Limit the number of outstanding tx. Setup all fragments, but
2277 * do not set the VALID bit on the first descriptor. Save a pointer
2278 * to that descriptor and also for next skb_map element.
2281 if (np
->tx_pkts_in_progress
== NV_TX_LIMIT_COUNT
) {
2282 if (!np
->tx_change_owner
)
2283 np
->tx_change_owner
= start_tx_ctx
;
2285 /* remove VALID bit */
2286 tx_flags
&= ~NV_TX2_VALID
;
2287 start_tx_ctx
->first_tx_desc
= start_tx
;
2288 start_tx_ctx
->next_tx_ctx
= np
->put_tx_ctx
;
2289 np
->tx_end_flip
= np
->put_tx_ctx
;
2291 np
->tx_pkts_in_progress
++;
2296 start_tx
->flaglen
|= cpu_to_le32(tx_flags
| tx_flags_extra
);
2297 np
->put_tx
.ex
= put_tx
;
2299 spin_unlock_irqrestore(&np
->lock
, flags
);
2301 dprintk(KERN_DEBUG
"%s: nv_start_xmit_optimized: entries %d queued for transmission. tx_flags_extra: %x\n",
2302 dev
->name
, entries
, tx_flags_extra
);
2305 for (j
=0; j
<64; j
++) {
2307 dprintk("\n%03x:", j
);
2308 dprintk(" %02x", ((unsigned char*)skb
->data
)[j
]);
2313 dev
->trans_start
= jiffies
;
2314 writel(NVREG_TXRXCTL_KICK
|np
->txrxctl_bits
, get_hwbase(dev
) + NvRegTxRxControl
);
2315 return NETDEV_TX_OK
;
2318 static inline void nv_tx_flip_ownership(struct net_device
*dev
)
2320 struct fe_priv
*np
= netdev_priv(dev
);
2322 np
->tx_pkts_in_progress
--;
2323 if (np
->tx_change_owner
) {
2324 np
->tx_change_owner
->first_tx_desc
->flaglen
|=
2325 cpu_to_le32(NV_TX2_VALID
);
2326 np
->tx_pkts_in_progress
++;
2328 np
->tx_change_owner
= np
->tx_change_owner
->next_tx_ctx
;
2329 if (np
->tx_change_owner
== np
->tx_end_flip
)
2330 np
->tx_change_owner
= NULL
;
2332 writel(NVREG_TXRXCTL_KICK
|np
->txrxctl_bits
, get_hwbase(dev
) + NvRegTxRxControl
);
2337 * nv_tx_done: check for completed packets, release the skbs.
2339 * Caller must own np->lock.
2341 static void nv_tx_done(struct net_device
*dev
)
2343 struct fe_priv
*np
= netdev_priv(dev
);
2345 struct ring_desc
* orig_get_tx
= np
->get_tx
.orig
;
2347 while ((np
->get_tx
.orig
!= np
->put_tx
.orig
) &&
2348 !((flags
= le32_to_cpu(np
->get_tx
.orig
->flaglen
)) & NV_TX_VALID
)) {
2350 dprintk(KERN_DEBUG
"%s: nv_tx_done: flags 0x%x.\n",
2353 pci_unmap_page(np
->pci_dev
, np
->get_tx_ctx
->dma
,
2354 np
->get_tx_ctx
->dma_len
,
2356 np
->get_tx_ctx
->dma
= 0;
2358 if (np
->desc_ver
== DESC_VER_1
) {
2359 if (flags
& NV_TX_LASTPACKET
) {
2360 if (flags
& NV_TX_ERROR
) {
2361 if (flags
& NV_TX_UNDERFLOW
)
2362 dev
->stats
.tx_fifo_errors
++;
2363 if (flags
& NV_TX_CARRIERLOST
)
2364 dev
->stats
.tx_carrier_errors
++;
2365 if ((flags
& NV_TX_RETRYERROR
) && !(flags
& NV_TX_RETRYCOUNT_MASK
))
2366 nv_legacybackoff_reseed(dev
);
2367 dev
->stats
.tx_errors
++;
2369 dev
->stats
.tx_packets
++;
2370 dev
->stats
.tx_bytes
+= np
->get_tx_ctx
->skb
->len
;
2372 dev_kfree_skb_any(np
->get_tx_ctx
->skb
);
2373 np
->get_tx_ctx
->skb
= NULL
;
2376 if (flags
& NV_TX2_LASTPACKET
) {
2377 if (flags
& NV_TX2_ERROR
) {
2378 if (flags
& NV_TX2_UNDERFLOW
)
2379 dev
->stats
.tx_fifo_errors
++;
2380 if (flags
& NV_TX2_CARRIERLOST
)
2381 dev
->stats
.tx_carrier_errors
++;
2382 if ((flags
& NV_TX2_RETRYERROR
) && !(flags
& NV_TX2_RETRYCOUNT_MASK
))
2383 nv_legacybackoff_reseed(dev
);
2384 dev
->stats
.tx_errors
++;
2386 dev
->stats
.tx_packets
++;
2387 dev
->stats
.tx_bytes
+= np
->get_tx_ctx
->skb
->len
;
2389 dev_kfree_skb_any(np
->get_tx_ctx
->skb
);
2390 np
->get_tx_ctx
->skb
= NULL
;
2393 if (unlikely(np
->get_tx
.orig
++ == np
->last_tx
.orig
))
2394 np
->get_tx
.orig
= np
->first_tx
.orig
;
2395 if (unlikely(np
->get_tx_ctx
++ == np
->last_tx_ctx
))
2396 np
->get_tx_ctx
= np
->first_tx_ctx
;
2398 if (unlikely((np
->tx_stop
== 1) && (np
->get_tx
.orig
!= orig_get_tx
))) {
2400 netif_wake_queue(dev
);
2404 static void nv_tx_done_optimized(struct net_device
*dev
, int limit
)
2406 struct fe_priv
*np
= netdev_priv(dev
);
2408 struct ring_desc_ex
* orig_get_tx
= np
->get_tx
.ex
;
2410 while ((np
->get_tx
.ex
!= np
->put_tx
.ex
) &&
2411 !((flags
= le32_to_cpu(np
->get_tx
.ex
->flaglen
)) & NV_TX_VALID
) &&
2414 dprintk(KERN_DEBUG
"%s: nv_tx_done_optimized: flags 0x%x.\n",
2417 pci_unmap_page(np
->pci_dev
, np
->get_tx_ctx
->dma
,
2418 np
->get_tx_ctx
->dma_len
,
2420 np
->get_tx_ctx
->dma
= 0;
2422 if (flags
& NV_TX2_LASTPACKET
) {
2423 if (!(flags
& NV_TX2_ERROR
))
2424 dev
->stats
.tx_packets
++;
2426 if ((flags
& NV_TX2_RETRYERROR
) && !(flags
& NV_TX2_RETRYCOUNT_MASK
)) {
2427 if (np
->driver_data
& DEV_HAS_GEAR_MODE
)
2428 nv_gear_backoff_reseed(dev
);
2430 nv_legacybackoff_reseed(dev
);
2434 dev_kfree_skb_any(np
->get_tx_ctx
->skb
);
2435 np
->get_tx_ctx
->skb
= NULL
;
2438 nv_tx_flip_ownership(dev
);
2441 if (unlikely(np
->get_tx
.ex
++ == np
->last_tx
.ex
))
2442 np
->get_tx
.ex
= np
->first_tx
.ex
;
2443 if (unlikely(np
->get_tx_ctx
++ == np
->last_tx_ctx
))
2444 np
->get_tx_ctx
= np
->first_tx_ctx
;
2446 if (unlikely((np
->tx_stop
== 1) && (np
->get_tx
.ex
!= orig_get_tx
))) {
2448 netif_wake_queue(dev
);
2453 * nv_tx_timeout: dev->tx_timeout function
2454 * Called with netif_tx_lock held.
2456 static void nv_tx_timeout(struct net_device
*dev
)
2458 struct fe_priv
*np
= netdev_priv(dev
);
2459 u8 __iomem
*base
= get_hwbase(dev
);
2462 if (np
->msi_flags
& NV_MSI_X_ENABLED
)
2463 status
= readl(base
+ NvRegMSIXIrqStatus
) & NVREG_IRQSTAT_MASK
;
2465 status
= readl(base
+ NvRegIrqStatus
) & NVREG_IRQSTAT_MASK
;
2467 printk(KERN_INFO
"%s: Got tx_timeout. irq: %08x\n", dev
->name
, status
);
2472 printk(KERN_INFO
"%s: Ring at %lx\n",
2473 dev
->name
, (unsigned long)np
->ring_addr
);
2474 printk(KERN_INFO
"%s: Dumping tx registers\n", dev
->name
);
2475 for (i
=0;i
<=np
->register_size
;i
+= 32) {
2476 printk(KERN_INFO
"%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
2478 readl(base
+ i
+ 0), readl(base
+ i
+ 4),
2479 readl(base
+ i
+ 8), readl(base
+ i
+ 12),
2480 readl(base
+ i
+ 16), readl(base
+ i
+ 20),
2481 readl(base
+ i
+ 24), readl(base
+ i
+ 28));
2483 printk(KERN_INFO
"%s: Dumping tx ring\n", dev
->name
);
2484 for (i
=0;i
<np
->tx_ring_size
;i
+= 4) {
2485 if (!nv_optimized(np
)) {
2486 printk(KERN_INFO
"%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
2488 le32_to_cpu(np
->tx_ring
.orig
[i
].buf
),
2489 le32_to_cpu(np
->tx_ring
.orig
[i
].flaglen
),
2490 le32_to_cpu(np
->tx_ring
.orig
[i
+1].buf
),
2491 le32_to_cpu(np
->tx_ring
.orig
[i
+1].flaglen
),
2492 le32_to_cpu(np
->tx_ring
.orig
[i
+2].buf
),
2493 le32_to_cpu(np
->tx_ring
.orig
[i
+2].flaglen
),
2494 le32_to_cpu(np
->tx_ring
.orig
[i
+3].buf
),
2495 le32_to_cpu(np
->tx_ring
.orig
[i
+3].flaglen
));
2497 printk(KERN_INFO
"%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n",
2499 le32_to_cpu(np
->tx_ring
.ex
[i
].bufhigh
),
2500 le32_to_cpu(np
->tx_ring
.ex
[i
].buflow
),
2501 le32_to_cpu(np
->tx_ring
.ex
[i
].flaglen
),
2502 le32_to_cpu(np
->tx_ring
.ex
[i
+1].bufhigh
),
2503 le32_to_cpu(np
->tx_ring
.ex
[i
+1].buflow
),
2504 le32_to_cpu(np
->tx_ring
.ex
[i
+1].flaglen
),
2505 le32_to_cpu(np
->tx_ring
.ex
[i
+2].bufhigh
),
2506 le32_to_cpu(np
->tx_ring
.ex
[i
+2].buflow
),
2507 le32_to_cpu(np
->tx_ring
.ex
[i
+2].flaglen
),
2508 le32_to_cpu(np
->tx_ring
.ex
[i
+3].bufhigh
),
2509 le32_to_cpu(np
->tx_ring
.ex
[i
+3].buflow
),
2510 le32_to_cpu(np
->tx_ring
.ex
[i
+3].flaglen
));
2515 spin_lock_irq(&np
->lock
);
2517 /* 1) stop tx engine */
2520 /* 2) check that the packets were not sent already: */
2521 if (!nv_optimized(np
))
2524 nv_tx_done_optimized(dev
, np
->tx_ring_size
);
2526 /* 3) if there are dead entries: clear everything */
2527 if (np
->get_tx_ctx
!= np
->put_tx_ctx
) {
2528 printk(KERN_DEBUG
"%s: tx_timeout: dead entries!\n", dev
->name
);
2531 setup_hw_rings(dev
, NV_SETUP_TX_RING
);
2534 netif_wake_queue(dev
);
2536 /* 4) restart tx engine */
2538 spin_unlock_irq(&np
->lock
);
2542 * Called when the nic notices a mismatch between the actual data len on the
2543 * wire and the len indicated in the 802 header
2545 static int nv_getlen(struct net_device
*dev
, void *packet
, int datalen
)
2547 int hdrlen
; /* length of the 802 header */
2548 int protolen
; /* length as stored in the proto field */
2550 /* 1) calculate len according to header */
2551 if ( ((struct vlan_ethhdr
*)packet
)->h_vlan_proto
== htons(ETH_P_8021Q
)) {
2552 protolen
= ntohs( ((struct vlan_ethhdr
*)packet
)->h_vlan_encapsulated_proto
);
2555 protolen
= ntohs( ((struct ethhdr
*)packet
)->h_proto
);
2558 dprintk(KERN_DEBUG
"%s: nv_getlen: datalen %d, protolen %d, hdrlen %d\n",
2559 dev
->name
, datalen
, protolen
, hdrlen
);
2560 if (protolen
> ETH_DATA_LEN
)
2561 return datalen
; /* Value in proto field not a len, no checks possible */
2564 /* consistency checks: */
2565 if (datalen
> ETH_ZLEN
) {
2566 if (datalen
>= protolen
) {
2567 /* more data on wire than in 802 header, trim of
2570 dprintk(KERN_DEBUG
"%s: nv_getlen: accepting %d bytes.\n",
2571 dev
->name
, protolen
);
2574 /* less data on wire than mentioned in header.
2575 * Discard the packet.
2577 dprintk(KERN_DEBUG
"%s: nv_getlen: discarding long packet.\n",
2582 /* short packet. Accept only if 802 values are also short */
2583 if (protolen
> ETH_ZLEN
) {
2584 dprintk(KERN_DEBUG
"%s: nv_getlen: discarding short packet.\n",
2588 dprintk(KERN_DEBUG
"%s: nv_getlen: accepting %d bytes.\n",
2589 dev
->name
, datalen
);
2594 static int nv_rx_process(struct net_device
*dev
, int limit
)
2596 struct fe_priv
*np
= netdev_priv(dev
);
2599 struct sk_buff
*skb
;
2602 while((np
->get_rx
.orig
!= np
->put_rx
.orig
) &&
2603 !((flags
= le32_to_cpu(np
->get_rx
.orig
->flaglen
)) & NV_RX_AVAIL
) &&
2604 (rx_work
< limit
)) {
2606 dprintk(KERN_DEBUG
"%s: nv_rx_process: flags 0x%x.\n",
2610 * the packet is for us - immediately tear down the pci mapping.
2611 * TODO: check if a prefetch of the first cacheline improves
2614 pci_unmap_single(np
->pci_dev
, np
->get_rx_ctx
->dma
,
2615 np
->get_rx_ctx
->dma_len
,
2616 PCI_DMA_FROMDEVICE
);
2617 skb
= np
->get_rx_ctx
->skb
;
2618 np
->get_rx_ctx
->skb
= NULL
;
2622 dprintk(KERN_DEBUG
"Dumping packet (flags 0x%x).",flags
);
2623 for (j
=0; j
<64; j
++) {
2625 dprintk("\n%03x:", j
);
2626 dprintk(" %02x", ((unsigned char*)skb
->data
)[j
]);
2630 /* look at what we actually got: */
2631 if (np
->desc_ver
== DESC_VER_1
) {
2632 if (likely(flags
& NV_RX_DESCRIPTORVALID
)) {
2633 len
= flags
& LEN_MASK_V1
;
2634 if (unlikely(flags
& NV_RX_ERROR
)) {
2635 if (flags
& NV_RX_ERROR4
) {
2636 len
= nv_getlen(dev
, skb
->data
, len
);
2638 dev
->stats
.rx_errors
++;
2643 /* framing errors are soft errors */
2644 else if (flags
& NV_RX_FRAMINGERR
) {
2645 if (flags
& NV_RX_SUBSTRACT1
) {
2649 /* the rest are hard errors */
2651 if (flags
& NV_RX_MISSEDFRAME
)
2652 dev
->stats
.rx_missed_errors
++;
2653 if (flags
& NV_RX_CRCERR
)
2654 dev
->stats
.rx_crc_errors
++;
2655 if (flags
& NV_RX_OVERFLOW
)
2656 dev
->stats
.rx_over_errors
++;
2657 dev
->stats
.rx_errors
++;
2667 if (likely(flags
& NV_RX2_DESCRIPTORVALID
)) {
2668 len
= flags
& LEN_MASK_V2
;
2669 if (unlikely(flags
& NV_RX2_ERROR
)) {
2670 if (flags
& NV_RX2_ERROR4
) {
2671 len
= nv_getlen(dev
, skb
->data
, len
);
2673 dev
->stats
.rx_errors
++;
2678 /* framing errors are soft errors */
2679 else if (flags
& NV_RX2_FRAMINGERR
) {
2680 if (flags
& NV_RX2_SUBSTRACT1
) {
2684 /* the rest are hard errors */
2686 if (flags
& NV_RX2_CRCERR
)
2687 dev
->stats
.rx_crc_errors
++;
2688 if (flags
& NV_RX2_OVERFLOW
)
2689 dev
->stats
.rx_over_errors
++;
2690 dev
->stats
.rx_errors
++;
2695 if (((flags
& NV_RX2_CHECKSUMMASK
) == NV_RX2_CHECKSUM_IP_TCP
) || /*ip and tcp */
2696 ((flags
& NV_RX2_CHECKSUMMASK
) == NV_RX2_CHECKSUM_IP_UDP
)) /*ip and udp */
2697 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2703 /* got a valid packet - forward it to the network core */
2705 skb
->protocol
= eth_type_trans(skb
, dev
);
2706 dprintk(KERN_DEBUG
"%s: nv_rx_process: %d bytes, proto %d accepted.\n",
2707 dev
->name
, len
, skb
->protocol
);
2708 #ifdef CONFIG_FORCEDETH_NAPI
2709 netif_receive_skb(skb
);
2713 dev
->last_rx
= jiffies
;
2714 dev
->stats
.rx_packets
++;
2715 dev
->stats
.rx_bytes
+= len
;
2717 if (unlikely(np
->get_rx
.orig
++ == np
->last_rx
.orig
))
2718 np
->get_rx
.orig
= np
->first_rx
.orig
;
2719 if (unlikely(np
->get_rx_ctx
++ == np
->last_rx_ctx
))
2720 np
->get_rx_ctx
= np
->first_rx_ctx
;
2728 static int nv_rx_process_optimized(struct net_device
*dev
, int limit
)
2730 struct fe_priv
*np
= netdev_priv(dev
);
2734 struct sk_buff
*skb
;
2737 while((np
->get_rx
.ex
!= np
->put_rx
.ex
) &&
2738 !((flags
= le32_to_cpu(np
->get_rx
.ex
->flaglen
)) & NV_RX2_AVAIL
) &&
2739 (rx_work
< limit
)) {
2741 dprintk(KERN_DEBUG
"%s: nv_rx_process_optimized: flags 0x%x.\n",
2745 * the packet is for us - immediately tear down the pci mapping.
2746 * TODO: check if a prefetch of the first cacheline improves
2749 pci_unmap_single(np
->pci_dev
, np
->get_rx_ctx
->dma
,
2750 np
->get_rx_ctx
->dma_len
,
2751 PCI_DMA_FROMDEVICE
);
2752 skb
= np
->get_rx_ctx
->skb
;
2753 np
->get_rx_ctx
->skb
= NULL
;
2757 dprintk(KERN_DEBUG
"Dumping packet (flags 0x%x).",flags
);
2758 for (j
=0; j
<64; j
++) {
2760 dprintk("\n%03x:", j
);
2761 dprintk(" %02x", ((unsigned char*)skb
->data
)[j
]);
2765 /* look at what we actually got: */
2766 if (likely(flags
& NV_RX2_DESCRIPTORVALID
)) {
2767 len
= flags
& LEN_MASK_V2
;
2768 if (unlikely(flags
& NV_RX2_ERROR
)) {
2769 if (flags
& NV_RX2_ERROR4
) {
2770 len
= nv_getlen(dev
, skb
->data
, len
);
2776 /* framing errors are soft errors */
2777 else if (flags
& NV_RX2_FRAMINGERR
) {
2778 if (flags
& NV_RX2_SUBSTRACT1
) {
2782 /* the rest are hard errors */
2789 if (((flags
& NV_RX2_CHECKSUMMASK
) == NV_RX2_CHECKSUM_IP_TCP
) || /*ip and tcp */
2790 ((flags
& NV_RX2_CHECKSUMMASK
) == NV_RX2_CHECKSUM_IP_UDP
)) /*ip and udp */
2791 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2793 /* got a valid packet - forward it to the network core */
2795 skb
->protocol
= eth_type_trans(skb
, dev
);
2796 prefetch(skb
->data
);
2798 dprintk(KERN_DEBUG
"%s: nv_rx_process_optimized: %d bytes, proto %d accepted.\n",
2799 dev
->name
, len
, skb
->protocol
);
2801 if (likely(!np
->vlangrp
)) {
2802 #ifdef CONFIG_FORCEDETH_NAPI
2803 netif_receive_skb(skb
);
2808 vlanflags
= le32_to_cpu(np
->get_rx
.ex
->buflow
);
2809 if (vlanflags
& NV_RX3_VLAN_TAG_PRESENT
) {
2810 #ifdef CONFIG_FORCEDETH_NAPI
2811 vlan_hwaccel_receive_skb(skb
, np
->vlangrp
,
2812 vlanflags
& NV_RX3_VLAN_TAG_MASK
);
2814 vlan_hwaccel_rx(skb
, np
->vlangrp
,
2815 vlanflags
& NV_RX3_VLAN_TAG_MASK
);
2818 #ifdef CONFIG_FORCEDETH_NAPI
2819 netif_receive_skb(skb
);
2826 dev
->last_rx
= jiffies
;
2827 dev
->stats
.rx_packets
++;
2828 dev
->stats
.rx_bytes
+= len
;
2833 if (unlikely(np
->get_rx
.ex
++ == np
->last_rx
.ex
))
2834 np
->get_rx
.ex
= np
->first_rx
.ex
;
2835 if (unlikely(np
->get_rx_ctx
++ == np
->last_rx_ctx
))
2836 np
->get_rx_ctx
= np
->first_rx_ctx
;
2844 static void set_bufsize(struct net_device
*dev
)
2846 struct fe_priv
*np
= netdev_priv(dev
);
2848 if (dev
->mtu
<= ETH_DATA_LEN
)
2849 np
->rx_buf_sz
= ETH_DATA_LEN
+ NV_RX_HEADERS
;
2851 np
->rx_buf_sz
= dev
->mtu
+ NV_RX_HEADERS
;
2855 * nv_change_mtu: dev->change_mtu function
2856 * Called with dev_base_lock held for read.
2858 static int nv_change_mtu(struct net_device
*dev
, int new_mtu
)
2860 struct fe_priv
*np
= netdev_priv(dev
);
2863 if (new_mtu
< 64 || new_mtu
> np
->pkt_limit
)
2869 /* return early if the buffer sizes will not change */
2870 if (old_mtu
<= ETH_DATA_LEN
&& new_mtu
<= ETH_DATA_LEN
)
2872 if (old_mtu
== new_mtu
)
2875 /* synchronized against open : rtnl_lock() held by caller */
2876 if (netif_running(dev
)) {
2877 u8 __iomem
*base
= get_hwbase(dev
);
2879 * It seems that the nic preloads valid ring entries into an
2880 * internal buffer. The procedure for flushing everything is
2881 * guessed, there is probably a simpler approach.
2882 * Changing the MTU is a rare event, it shouldn't matter.
2884 nv_disable_irq(dev
);
2885 netif_tx_lock_bh(dev
);
2886 netif_addr_lock(dev
);
2887 spin_lock(&np
->lock
);
2891 /* drain rx queue */
2893 /* reinit driver view of the rx queue */
2895 if (nv_init_ring(dev
)) {
2896 if (!np
->in_shutdown
)
2897 mod_timer(&np
->oom_kick
, jiffies
+ OOM_REFILL
);
2899 /* reinit nic view of the rx queue */
2900 writel(np
->rx_buf_sz
, base
+ NvRegOffloadConfig
);
2901 setup_hw_rings(dev
, NV_SETUP_RX_RING
| NV_SETUP_TX_RING
);
2902 writel( ((np
->rx_ring_size
-1) << NVREG_RINGSZ_RXSHIFT
) + ((np
->tx_ring_size
-1) << NVREG_RINGSZ_TXSHIFT
),
2903 base
+ NvRegRingSizes
);
2905 writel(NVREG_TXRXCTL_KICK
|np
->txrxctl_bits
, get_hwbase(dev
) + NvRegTxRxControl
);
2908 /* restart rx engine */
2910 spin_unlock(&np
->lock
);
2911 netif_addr_unlock(dev
);
2912 netif_tx_unlock_bh(dev
);
2918 static void nv_copy_mac_to_hw(struct net_device
*dev
)
2920 u8 __iomem
*base
= get_hwbase(dev
);
2923 mac
[0] = (dev
->dev_addr
[0] << 0) + (dev
->dev_addr
[1] << 8) +
2924 (dev
->dev_addr
[2] << 16) + (dev
->dev_addr
[3] << 24);
2925 mac
[1] = (dev
->dev_addr
[4] << 0) + (dev
->dev_addr
[5] << 8);
2927 writel(mac
[0], base
+ NvRegMacAddrA
);
2928 writel(mac
[1], base
+ NvRegMacAddrB
);
2932 * nv_set_mac_address: dev->set_mac_address function
2933 * Called with rtnl_lock() held.
2935 static int nv_set_mac_address(struct net_device
*dev
, void *addr
)
2937 struct fe_priv
*np
= netdev_priv(dev
);
2938 struct sockaddr
*macaddr
= (struct sockaddr
*)addr
;
2940 if (!is_valid_ether_addr(macaddr
->sa_data
))
2941 return -EADDRNOTAVAIL
;
2943 /* synchronized against open : rtnl_lock() held by caller */
2944 memcpy(dev
->dev_addr
, macaddr
->sa_data
, ETH_ALEN
);
2946 if (netif_running(dev
)) {
2947 netif_tx_lock_bh(dev
);
2948 netif_addr_lock(dev
);
2949 spin_lock_irq(&np
->lock
);
2951 /* stop rx engine */
2954 /* set mac address */
2955 nv_copy_mac_to_hw(dev
);
2957 /* restart rx engine */
2959 spin_unlock_irq(&np
->lock
);
2960 netif_addr_unlock(dev
);
2961 netif_tx_unlock_bh(dev
);
2963 nv_copy_mac_to_hw(dev
);
2969 * nv_set_multicast: dev->set_multicast function
2970 * Called with netif_tx_lock held.
2972 static void nv_set_multicast(struct net_device
*dev
)
2974 struct fe_priv
*np
= netdev_priv(dev
);
2975 u8 __iomem
*base
= get_hwbase(dev
);
2978 u32 pff
= readl(base
+ NvRegPacketFilterFlags
) & NVREG_PFF_PAUSE_RX
;
2980 memset(addr
, 0, sizeof(addr
));
2981 memset(mask
, 0, sizeof(mask
));
2983 if (dev
->flags
& IFF_PROMISC
) {
2984 pff
|= NVREG_PFF_PROMISC
;
2986 pff
|= NVREG_PFF_MYADDR
;
2988 if (dev
->flags
& IFF_ALLMULTI
|| dev
->mc_list
) {
2992 alwaysOn
[0] = alwaysOn
[1] = alwaysOff
[0] = alwaysOff
[1] = 0xffffffff;
2993 if (dev
->flags
& IFF_ALLMULTI
) {
2994 alwaysOn
[0] = alwaysOn
[1] = alwaysOff
[0] = alwaysOff
[1] = 0;
2996 struct dev_mc_list
*walk
;
2998 walk
= dev
->mc_list
;
2999 while (walk
!= NULL
) {
3001 a
= le32_to_cpu(*(__le32
*) walk
->dmi_addr
);
3002 b
= le16_to_cpu(*(__le16
*) (&walk
->dmi_addr
[4]));
3010 addr
[0] = alwaysOn
[0];
3011 addr
[1] = alwaysOn
[1];
3012 mask
[0] = alwaysOn
[0] | alwaysOff
[0];
3013 mask
[1] = alwaysOn
[1] | alwaysOff
[1];
3015 mask
[0] = NVREG_MCASTMASKA_NONE
;
3016 mask
[1] = NVREG_MCASTMASKB_NONE
;
3019 addr
[0] |= NVREG_MCASTADDRA_FORCE
;
3020 pff
|= NVREG_PFF_ALWAYS
;
3021 spin_lock_irq(&np
->lock
);
3023 writel(addr
[0], base
+ NvRegMulticastAddrA
);
3024 writel(addr
[1], base
+ NvRegMulticastAddrB
);
3025 writel(mask
[0], base
+ NvRegMulticastMaskA
);
3026 writel(mask
[1], base
+ NvRegMulticastMaskB
);
3027 writel(pff
, base
+ NvRegPacketFilterFlags
);
3028 dprintk(KERN_INFO
"%s: reconfiguration for multicast lists.\n",
3031 spin_unlock_irq(&np
->lock
);
3034 static void nv_update_pause(struct net_device
*dev
, u32 pause_flags
)
3036 struct fe_priv
*np
= netdev_priv(dev
);
3037 u8 __iomem
*base
= get_hwbase(dev
);
3039 np
->pause_flags
&= ~(NV_PAUSEFRAME_TX_ENABLE
| NV_PAUSEFRAME_RX_ENABLE
);
3041 if (np
->pause_flags
& NV_PAUSEFRAME_RX_CAPABLE
) {
3042 u32 pff
= readl(base
+ NvRegPacketFilterFlags
) & ~NVREG_PFF_PAUSE_RX
;
3043 if (pause_flags
& NV_PAUSEFRAME_RX_ENABLE
) {
3044 writel(pff
|NVREG_PFF_PAUSE_RX
, base
+ NvRegPacketFilterFlags
);
3045 np
->pause_flags
|= NV_PAUSEFRAME_RX_ENABLE
;
3047 writel(pff
, base
+ NvRegPacketFilterFlags
);
3050 if (np
->pause_flags
& NV_PAUSEFRAME_TX_CAPABLE
) {
3051 u32 regmisc
= readl(base
+ NvRegMisc1
) & ~NVREG_MISC1_PAUSE_TX
;
3052 if (pause_flags
& NV_PAUSEFRAME_TX_ENABLE
) {
3053 u32 pause_enable
= NVREG_TX_PAUSEFRAME_ENABLE_V1
;
3054 if (np
->driver_data
& DEV_HAS_PAUSEFRAME_TX_V2
)
3055 pause_enable
= NVREG_TX_PAUSEFRAME_ENABLE_V2
;
3056 if (np
->driver_data
& DEV_HAS_PAUSEFRAME_TX_V3
)
3057 pause_enable
= NVREG_TX_PAUSEFRAME_ENABLE_V3
;
3058 writel(pause_enable
, base
+ NvRegTxPauseFrame
);
3059 writel(regmisc
|NVREG_MISC1_PAUSE_TX
, base
+ NvRegMisc1
);
3060 np
->pause_flags
|= NV_PAUSEFRAME_TX_ENABLE
;
3062 writel(NVREG_TX_PAUSEFRAME_DISABLE
, base
+ NvRegTxPauseFrame
);
3063 writel(regmisc
, base
+ NvRegMisc1
);
3069 * nv_update_linkspeed: Setup the MAC according to the link partner
3070 * @dev: Network device to be configured
3072 * The function queries the PHY and checks if there is a link partner.
3073 * If yes, then it sets up the MAC accordingly. Otherwise, the MAC is
3074 * set to 10 MBit HD.
3076 * The function returns 0 if there is no link partner and 1 if there is
3077 * a good link partner.
3079 static int nv_update_linkspeed(struct net_device
*dev
)
3081 struct fe_priv
*np
= netdev_priv(dev
);
3082 u8 __iomem
*base
= get_hwbase(dev
);
3085 int adv_lpa
, adv_pause
, lpa_pause
;
3086 int newls
= np
->linkspeed
;
3087 int newdup
= np
->duplex
;
3090 u32 control_1000
, status_1000
, phyreg
, pause_flags
, txreg
;
3094 /* BMSR_LSTATUS is latched, read it twice:
3095 * we want the current value.
3097 mii_rw(dev
, np
->phyaddr
, MII_BMSR
, MII_READ
);
3098 mii_status
= mii_rw(dev
, np
->phyaddr
, MII_BMSR
, MII_READ
);
3100 if (!(mii_status
& BMSR_LSTATUS
)) {
3101 dprintk(KERN_DEBUG
"%s: no link detected by phy - falling back to 10HD.\n",
3103 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_10
;
3109 if (np
->autoneg
== 0) {
3110 dprintk(KERN_DEBUG
"%s: nv_update_linkspeed: autoneg off, PHY set to 0x%04x.\n",
3111 dev
->name
, np
->fixed_mode
);
3112 if (np
->fixed_mode
& LPA_100FULL
) {
3113 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_100
;
3115 } else if (np
->fixed_mode
& LPA_100HALF
) {
3116 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_100
;
3118 } else if (np
->fixed_mode
& LPA_10FULL
) {
3119 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_10
;
3122 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_10
;
3128 /* check auto negotiation is complete */
3129 if (!(mii_status
& BMSR_ANEGCOMPLETE
)) {
3130 /* still in autonegotiation - configure nic for 10 MBit HD and wait. */
3131 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_10
;
3134 dprintk(KERN_DEBUG
"%s: autoneg not completed - falling back to 10HD.\n", dev
->name
);
3138 adv
= mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, MII_READ
);
3139 lpa
= mii_rw(dev
, np
->phyaddr
, MII_LPA
, MII_READ
);
3140 dprintk(KERN_DEBUG
"%s: nv_update_linkspeed: PHY advertises 0x%04x, lpa 0x%04x.\n",
3141 dev
->name
, adv
, lpa
);
3144 if (np
->gigabit
== PHY_GIGABIT
) {
3145 control_1000
= mii_rw(dev
, np
->phyaddr
, MII_CTRL1000
, MII_READ
);
3146 status_1000
= mii_rw(dev
, np
->phyaddr
, MII_STAT1000
, MII_READ
);
3148 if ((control_1000
& ADVERTISE_1000FULL
) &&
3149 (status_1000
& LPA_1000FULL
)) {
3150 dprintk(KERN_DEBUG
"%s: nv_update_linkspeed: GBit ethernet detected.\n",
3152 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_1000
;
3158 /* FIXME: handle parallel detection properly */
3159 adv_lpa
= lpa
& adv
;
3160 if (adv_lpa
& LPA_100FULL
) {
3161 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_100
;
3163 } else if (adv_lpa
& LPA_100HALF
) {
3164 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_100
;
3166 } else if (adv_lpa
& LPA_10FULL
) {
3167 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_10
;
3169 } else if (adv_lpa
& LPA_10HALF
) {
3170 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_10
;
3173 dprintk(KERN_DEBUG
"%s: bad ability %04x - falling back to 10HD.\n", dev
->name
, adv_lpa
);
3174 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_10
;
3179 if (np
->duplex
== newdup
&& np
->linkspeed
== newls
)
3182 dprintk(KERN_INFO
"%s: changing link setting from %d/%d to %d/%d.\n",
3183 dev
->name
, np
->linkspeed
, np
->duplex
, newls
, newdup
);
3185 np
->duplex
= newdup
;
3186 np
->linkspeed
= newls
;
3188 /* The transmitter and receiver must be restarted for safe update */
3189 if (readl(base
+ NvRegTransmitterControl
) & NVREG_XMITCTL_START
) {
3190 txrxFlags
|= NV_RESTART_TX
;
3193 if (readl(base
+ NvRegReceiverControl
) & NVREG_RCVCTL_START
) {
3194 txrxFlags
|= NV_RESTART_RX
;
3198 if (np
->gigabit
== PHY_GIGABIT
) {
3199 phyreg
= readl(base
+ NvRegSlotTime
);
3200 phyreg
&= ~(0x3FF00);
3201 if (((np
->linkspeed
& 0xFFF) == NVREG_LINKSPEED_10
) ||
3202 ((np
->linkspeed
& 0xFFF) == NVREG_LINKSPEED_100
))
3203 phyreg
|= NVREG_SLOTTIME_10_100_FULL
;
3204 else if ((np
->linkspeed
& 0xFFF) == NVREG_LINKSPEED_1000
)
3205 phyreg
|= NVREG_SLOTTIME_1000_FULL
;
3206 writel(phyreg
, base
+ NvRegSlotTime
);
3209 phyreg
= readl(base
+ NvRegPhyInterface
);
3210 phyreg
&= ~(PHY_HALF
|PHY_100
|PHY_1000
);
3211 if (np
->duplex
== 0)
3213 if ((np
->linkspeed
& NVREG_LINKSPEED_MASK
) == NVREG_LINKSPEED_100
)
3215 else if ((np
->linkspeed
& NVREG_LINKSPEED_MASK
) == NVREG_LINKSPEED_1000
)
3217 writel(phyreg
, base
+ NvRegPhyInterface
);
3219 phy_exp
= mii_rw(dev
, np
->phyaddr
, MII_EXPANSION
, MII_READ
) & EXPANSION_NWAY
; /* autoneg capable */
3220 if (phyreg
& PHY_RGMII
) {
3221 if ((np
->linkspeed
& NVREG_LINKSPEED_MASK
) == NVREG_LINKSPEED_1000
) {
3222 txreg
= NVREG_TX_DEFERRAL_RGMII_1000
;
3224 if (!phy_exp
&& !np
->duplex
&& (np
->driver_data
& DEV_HAS_COLLISION_FIX
)) {
3225 if ((np
->linkspeed
& NVREG_LINKSPEED_MASK
) == NVREG_LINKSPEED_10
)
3226 txreg
= NVREG_TX_DEFERRAL_RGMII_STRETCH_10
;
3228 txreg
= NVREG_TX_DEFERRAL_RGMII_STRETCH_100
;
3230 txreg
= NVREG_TX_DEFERRAL_RGMII_10_100
;
3234 if (!phy_exp
&& !np
->duplex
&& (np
->driver_data
& DEV_HAS_COLLISION_FIX
))
3235 txreg
= NVREG_TX_DEFERRAL_MII_STRETCH
;
3237 txreg
= NVREG_TX_DEFERRAL_DEFAULT
;
3239 writel(txreg
, base
+ NvRegTxDeferral
);
3241 if (np
->desc_ver
== DESC_VER_1
) {
3242 txreg
= NVREG_TX_WM_DESC1_DEFAULT
;
3244 if ((np
->linkspeed
& NVREG_LINKSPEED_MASK
) == NVREG_LINKSPEED_1000
)
3245 txreg
= NVREG_TX_WM_DESC2_3_1000
;
3247 txreg
= NVREG_TX_WM_DESC2_3_DEFAULT
;
3249 writel(txreg
, base
+ NvRegTxWatermark
);
3251 writel(NVREG_MISC1_FORCE
| ( np
->duplex
? 0 : NVREG_MISC1_HD
),
3254 writel(np
->linkspeed
, base
+ NvRegLinkSpeed
);
3258 /* setup pause frame */
3259 if (np
->duplex
!= 0) {
3260 if (np
->autoneg
&& np
->pause_flags
& NV_PAUSEFRAME_AUTONEG
) {
3261 adv_pause
= adv
& (ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
);
3262 lpa_pause
= lpa
& (LPA_PAUSE_CAP
| LPA_PAUSE_ASYM
);
3264 switch (adv_pause
) {
3265 case ADVERTISE_PAUSE_CAP
:
3266 if (lpa_pause
& LPA_PAUSE_CAP
) {
3267 pause_flags
|= NV_PAUSEFRAME_RX_ENABLE
;
3268 if (np
->pause_flags
& NV_PAUSEFRAME_TX_REQ
)
3269 pause_flags
|= NV_PAUSEFRAME_TX_ENABLE
;
3272 case ADVERTISE_PAUSE_ASYM
:
3273 if (lpa_pause
== (LPA_PAUSE_CAP
| LPA_PAUSE_ASYM
))
3275 pause_flags
|= NV_PAUSEFRAME_TX_ENABLE
;
3278 case ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
:
3279 if (lpa_pause
& LPA_PAUSE_CAP
)
3281 pause_flags
|= NV_PAUSEFRAME_RX_ENABLE
;
3282 if (np
->pause_flags
& NV_PAUSEFRAME_TX_REQ
)
3283 pause_flags
|= NV_PAUSEFRAME_TX_ENABLE
;
3285 if (lpa_pause
== LPA_PAUSE_ASYM
)
3287 pause_flags
|= NV_PAUSEFRAME_RX_ENABLE
;
3292 pause_flags
= np
->pause_flags
;
3295 nv_update_pause(dev
, pause_flags
);
3297 if (txrxFlags
& NV_RESTART_TX
)
3299 if (txrxFlags
& NV_RESTART_RX
)
3305 static void nv_linkchange(struct net_device
*dev
)
3307 if (nv_update_linkspeed(dev
)) {
3308 if (!netif_carrier_ok(dev
)) {
3309 netif_carrier_on(dev
);
3310 printk(KERN_INFO
"%s: link up.\n", dev
->name
);
3314 if (netif_carrier_ok(dev
)) {
3315 netif_carrier_off(dev
);
3316 printk(KERN_INFO
"%s: link down.\n", dev
->name
);
3322 static void nv_link_irq(struct net_device
*dev
)
3324 u8 __iomem
*base
= get_hwbase(dev
);
3327 miistat
= readl(base
+ NvRegMIIStatus
);
3328 writel(NVREG_MIISTAT_LINKCHANGE
, base
+ NvRegMIIStatus
);
3329 dprintk(KERN_INFO
"%s: link change irq, status 0x%x.\n", dev
->name
, miistat
);
3331 if (miistat
& (NVREG_MIISTAT_LINKCHANGE
))
3333 dprintk(KERN_DEBUG
"%s: link change notification done.\n", dev
->name
);
3336 static void nv_msi_workaround(struct fe_priv
*np
)
3339 /* Need to toggle the msi irq mask within the ethernet device,
3340 * otherwise, future interrupts will not be detected.
3342 if (np
->msi_flags
& NV_MSI_ENABLED
) {
3343 u8 __iomem
*base
= np
->base
;
3345 writel(0, base
+ NvRegMSIIrqMask
);
3346 writel(NVREG_MSI_VECTOR_0_ENABLED
, base
+ NvRegMSIIrqMask
);
3350 static irqreturn_t
nv_nic_irq(int foo
, void *data
)
3352 struct net_device
*dev
= (struct net_device
*) data
;
3353 struct fe_priv
*np
= netdev_priv(dev
);
3354 u8 __iomem
*base
= get_hwbase(dev
);
3358 dprintk(KERN_DEBUG
"%s: nv_nic_irq\n", dev
->name
);
3361 if (!(np
->msi_flags
& NV_MSI_X_ENABLED
)) {
3362 events
= readl(base
+ NvRegIrqStatus
) & NVREG_IRQSTAT_MASK
;
3363 writel(NVREG_IRQSTAT_MASK
, base
+ NvRegIrqStatus
);
3365 events
= readl(base
+ NvRegMSIXIrqStatus
) & NVREG_IRQSTAT_MASK
;
3366 writel(NVREG_IRQSTAT_MASK
, base
+ NvRegMSIXIrqStatus
);
3368 dprintk(KERN_DEBUG
"%s: irq: %08x\n", dev
->name
, events
);
3369 if (!(events
& np
->irqmask
))
3372 nv_msi_workaround(np
);
3374 spin_lock(&np
->lock
);
3376 spin_unlock(&np
->lock
);
3378 #ifdef CONFIG_FORCEDETH_NAPI
3379 if (events
& NVREG_IRQ_RX_ALL
) {
3380 netif_rx_schedule(dev
, &np
->napi
);
3382 /* Disable furthur receive irq's */
3383 spin_lock(&np
->lock
);
3384 np
->irqmask
&= ~NVREG_IRQ_RX_ALL
;
3386 if (np
->msi_flags
& NV_MSI_X_ENABLED
)
3387 writel(NVREG_IRQ_RX_ALL
, base
+ NvRegIrqMask
);
3389 writel(np
->irqmask
, base
+ NvRegIrqMask
);
3390 spin_unlock(&np
->lock
);
3393 if (nv_rx_process(dev
, RX_WORK_PER_LOOP
)) {
3394 if (unlikely(nv_alloc_rx(dev
))) {
3395 spin_lock(&np
->lock
);
3396 if (!np
->in_shutdown
)
3397 mod_timer(&np
->oom_kick
, jiffies
+ OOM_REFILL
);
3398 spin_unlock(&np
->lock
);
3402 if (unlikely(events
& NVREG_IRQ_LINK
)) {
3403 spin_lock(&np
->lock
);
3405 spin_unlock(&np
->lock
);
3407 if (unlikely(np
->need_linktimer
&& time_after(jiffies
, np
->link_timeout
))) {
3408 spin_lock(&np
->lock
);
3410 spin_unlock(&np
->lock
);
3411 np
->link_timeout
= jiffies
+ LINK_TIMEOUT
;
3413 if (unlikely(events
& (NVREG_IRQ_TX_ERR
))) {
3414 dprintk(KERN_DEBUG
"%s: received irq with events 0x%x. Probably TX fail.\n",
3417 if (unlikely(events
& (NVREG_IRQ_UNKNOWN
))) {
3418 printk(KERN_DEBUG
"%s: received irq with unknown events 0x%x. Please report\n",
3421 if (unlikely(events
& NVREG_IRQ_RECOVER_ERROR
)) {
3422 spin_lock(&np
->lock
);
3423 /* disable interrupts on the nic */
3424 if (!(np
->msi_flags
& NV_MSI_X_ENABLED
))
3425 writel(0, base
+ NvRegIrqMask
);
3427 writel(np
->irqmask
, base
+ NvRegIrqMask
);
3430 if (!np
->in_shutdown
) {
3431 np
->nic_poll_irq
= np
->irqmask
;
3432 np
->recover_error
= 1;
3433 mod_timer(&np
->nic_poll
, jiffies
+ POLL_WAIT
);
3435 spin_unlock(&np
->lock
);
3438 if (unlikely(i
> max_interrupt_work
)) {
3439 spin_lock(&np
->lock
);
3440 /* disable interrupts on the nic */
3441 if (!(np
->msi_flags
& NV_MSI_X_ENABLED
))
3442 writel(0, base
+ NvRegIrqMask
);
3444 writel(np
->irqmask
, base
+ NvRegIrqMask
);
3447 if (!np
->in_shutdown
) {
3448 np
->nic_poll_irq
= np
->irqmask
;
3449 mod_timer(&np
->nic_poll
, jiffies
+ POLL_WAIT
);
3451 spin_unlock(&np
->lock
);
3452 printk(KERN_DEBUG
"%s: too many iterations (%d) in nv_nic_irq.\n", dev
->name
, i
);
3457 dprintk(KERN_DEBUG
"%s: nv_nic_irq completed\n", dev
->name
);
3459 return IRQ_RETVAL(i
);
3463 * All _optimized functions are used to help increase performance
3464 * (reduce CPU and increase throughput). They use descripter version 3,
3465 * compiler directives, and reduce memory accesses.
3467 static irqreturn_t
nv_nic_irq_optimized(int foo
, void *data
)
3469 struct net_device
*dev
= (struct net_device
*) data
;
3470 struct fe_priv
*np
= netdev_priv(dev
);
3471 u8 __iomem
*base
= get_hwbase(dev
);
3475 dprintk(KERN_DEBUG
"%s: nv_nic_irq_optimized\n", dev
->name
);
3478 if (!(np
->msi_flags
& NV_MSI_X_ENABLED
)) {
3479 events
= readl(base
+ NvRegIrqStatus
) & NVREG_IRQSTAT_MASK
;
3480 writel(NVREG_IRQSTAT_MASK
, base
+ NvRegIrqStatus
);
3482 events
= readl(base
+ NvRegMSIXIrqStatus
) & NVREG_IRQSTAT_MASK
;
3483 writel(NVREG_IRQSTAT_MASK
, base
+ NvRegMSIXIrqStatus
);
3485 dprintk(KERN_DEBUG
"%s: irq: %08x\n", dev
->name
, events
);
3486 if (!(events
& np
->irqmask
))
3489 nv_msi_workaround(np
);
3491 spin_lock(&np
->lock
);
3492 nv_tx_done_optimized(dev
, TX_WORK_PER_LOOP
);
3493 spin_unlock(&np
->lock
);
3495 #ifdef CONFIG_FORCEDETH_NAPI
3496 if (events
& NVREG_IRQ_RX_ALL
) {
3497 netif_rx_schedule(dev
, &np
->napi
);
3499 /* Disable furthur receive irq's */
3500 spin_lock(&np
->lock
);
3501 np
->irqmask
&= ~NVREG_IRQ_RX_ALL
;
3503 if (np
->msi_flags
& NV_MSI_X_ENABLED
)
3504 writel(NVREG_IRQ_RX_ALL
, base
+ NvRegIrqMask
);
3506 writel(np
->irqmask
, base
+ NvRegIrqMask
);
3507 spin_unlock(&np
->lock
);
3510 if (nv_rx_process_optimized(dev
, RX_WORK_PER_LOOP
)) {
3511 if (unlikely(nv_alloc_rx_optimized(dev
))) {
3512 spin_lock(&np
->lock
);
3513 if (!np
->in_shutdown
)
3514 mod_timer(&np
->oom_kick
, jiffies
+ OOM_REFILL
);
3515 spin_unlock(&np
->lock
);
3519 if (unlikely(events
& NVREG_IRQ_LINK
)) {
3520 spin_lock(&np
->lock
);
3522 spin_unlock(&np
->lock
);
3524 if (unlikely(np
->need_linktimer
&& time_after(jiffies
, np
->link_timeout
))) {
3525 spin_lock(&np
->lock
);
3527 spin_unlock(&np
->lock
);
3528 np
->link_timeout
= jiffies
+ LINK_TIMEOUT
;
3530 if (unlikely(events
& (NVREG_IRQ_TX_ERR
))) {
3531 dprintk(KERN_DEBUG
"%s: received irq with events 0x%x. Probably TX fail.\n",
3534 if (unlikely(events
& (NVREG_IRQ_UNKNOWN
))) {
3535 printk(KERN_DEBUG
"%s: received irq with unknown events 0x%x. Please report\n",
3538 if (unlikely(events
& NVREG_IRQ_RECOVER_ERROR
)) {
3539 spin_lock(&np
->lock
);
3540 /* disable interrupts on the nic */
3541 if (!(np
->msi_flags
& NV_MSI_X_ENABLED
))
3542 writel(0, base
+ NvRegIrqMask
);
3544 writel(np
->irqmask
, base
+ NvRegIrqMask
);
3547 if (!np
->in_shutdown
) {
3548 np
->nic_poll_irq
= np
->irqmask
;
3549 np
->recover_error
= 1;
3550 mod_timer(&np
->nic_poll
, jiffies
+ POLL_WAIT
);
3552 spin_unlock(&np
->lock
);
3556 if (unlikely(i
> max_interrupt_work
)) {
3557 spin_lock(&np
->lock
);
3558 /* disable interrupts on the nic */
3559 if (!(np
->msi_flags
& NV_MSI_X_ENABLED
))
3560 writel(0, base
+ NvRegIrqMask
);
3562 writel(np
->irqmask
, base
+ NvRegIrqMask
);
3565 if (!np
->in_shutdown
) {
3566 np
->nic_poll_irq
= np
->irqmask
;
3567 mod_timer(&np
->nic_poll
, jiffies
+ POLL_WAIT
);
3569 spin_unlock(&np
->lock
);
3570 printk(KERN_DEBUG
"%s: too many iterations (%d) in nv_nic_irq.\n", dev
->name
, i
);
3575 dprintk(KERN_DEBUG
"%s: nv_nic_irq_optimized completed\n", dev
->name
);
3577 return IRQ_RETVAL(i
);
3580 static irqreturn_t
nv_nic_irq_tx(int foo
, void *data
)
3582 struct net_device
*dev
= (struct net_device
*) data
;
3583 struct fe_priv
*np
= netdev_priv(dev
);
3584 u8 __iomem
*base
= get_hwbase(dev
);
3587 unsigned long flags
;
3589 dprintk(KERN_DEBUG
"%s: nv_nic_irq_tx\n", dev
->name
);
3592 events
= readl(base
+ NvRegMSIXIrqStatus
) & NVREG_IRQ_TX_ALL
;
3593 writel(NVREG_IRQ_TX_ALL
, base
+ NvRegMSIXIrqStatus
);
3594 dprintk(KERN_DEBUG
"%s: tx irq: %08x\n", dev
->name
, events
);
3595 if (!(events
& np
->irqmask
))
3598 spin_lock_irqsave(&np
->lock
, flags
);
3599 nv_tx_done_optimized(dev
, TX_WORK_PER_LOOP
);
3600 spin_unlock_irqrestore(&np
->lock
, flags
);
3602 if (unlikely(events
& (NVREG_IRQ_TX_ERR
))) {
3603 dprintk(KERN_DEBUG
"%s: received irq with events 0x%x. Probably TX fail.\n",
3606 if (unlikely(i
> max_interrupt_work
)) {
3607 spin_lock_irqsave(&np
->lock
, flags
);
3608 /* disable interrupts on the nic */
3609 writel(NVREG_IRQ_TX_ALL
, base
+ NvRegIrqMask
);
3612 if (!np
->in_shutdown
) {
3613 np
->nic_poll_irq
|= NVREG_IRQ_TX_ALL
;
3614 mod_timer(&np
->nic_poll
, jiffies
+ POLL_WAIT
);
3616 spin_unlock_irqrestore(&np
->lock
, flags
);
3617 printk(KERN_DEBUG
"%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev
->name
, i
);
3622 dprintk(KERN_DEBUG
"%s: nv_nic_irq_tx completed\n", dev
->name
);
3624 return IRQ_RETVAL(i
);
3627 #ifdef CONFIG_FORCEDETH_NAPI
3628 static int nv_napi_poll(struct napi_struct
*napi
, int budget
)
3630 struct fe_priv
*np
= container_of(napi
, struct fe_priv
, napi
);
3631 struct net_device
*dev
= np
->dev
;
3632 u8 __iomem
*base
= get_hwbase(dev
);
3633 unsigned long flags
;
3636 if (!nv_optimized(np
)) {
3637 pkts
= nv_rx_process(dev
, budget
);
3638 retcode
= nv_alloc_rx(dev
);
3640 pkts
= nv_rx_process_optimized(dev
, budget
);
3641 retcode
= nv_alloc_rx_optimized(dev
);
3645 spin_lock_irqsave(&np
->lock
, flags
);
3646 if (!np
->in_shutdown
)
3647 mod_timer(&np
->oom_kick
, jiffies
+ OOM_REFILL
);
3648 spin_unlock_irqrestore(&np
->lock
, flags
);
3651 if (pkts
< budget
) {
3652 /* re-enable receive interrupts */
3653 spin_lock_irqsave(&np
->lock
, flags
);
3655 __netif_rx_complete(dev
, napi
);
3657 np
->irqmask
|= NVREG_IRQ_RX_ALL
;
3658 if (np
->msi_flags
& NV_MSI_X_ENABLED
)
3659 writel(NVREG_IRQ_RX_ALL
, base
+ NvRegIrqMask
);
3661 writel(np
->irqmask
, base
+ NvRegIrqMask
);
3663 spin_unlock_irqrestore(&np
->lock
, flags
);
3669 #ifdef CONFIG_FORCEDETH_NAPI
3670 static irqreturn_t
nv_nic_irq_rx(int foo
, void *data
)
3672 struct net_device
*dev
= (struct net_device
*) data
;
3673 struct fe_priv
*np
= netdev_priv(dev
);
3674 u8 __iomem
*base
= get_hwbase(dev
);
3677 events
= readl(base
+ NvRegMSIXIrqStatus
) & NVREG_IRQ_RX_ALL
;
3678 writel(NVREG_IRQ_RX_ALL
, base
+ NvRegMSIXIrqStatus
);
3681 netif_rx_schedule(dev
, &np
->napi
);
3682 /* disable receive interrupts on the nic */
3683 writel(NVREG_IRQ_RX_ALL
, base
+ NvRegIrqMask
);
3689 static irqreturn_t
nv_nic_irq_rx(int foo
, void *data
)
3691 struct net_device
*dev
= (struct net_device
*) data
;
3692 struct fe_priv
*np
= netdev_priv(dev
);
3693 u8 __iomem
*base
= get_hwbase(dev
);
3696 unsigned long flags
;
3698 dprintk(KERN_DEBUG
"%s: nv_nic_irq_rx\n", dev
->name
);
3701 events
= readl(base
+ NvRegMSIXIrqStatus
) & NVREG_IRQ_RX_ALL
;
3702 writel(NVREG_IRQ_RX_ALL
, base
+ NvRegMSIXIrqStatus
);
3703 dprintk(KERN_DEBUG
"%s: rx irq: %08x\n", dev
->name
, events
);
3704 if (!(events
& np
->irqmask
))
3707 if (nv_rx_process_optimized(dev
, RX_WORK_PER_LOOP
)) {
3708 if (unlikely(nv_alloc_rx_optimized(dev
))) {
3709 spin_lock_irqsave(&np
->lock
, flags
);
3710 if (!np
->in_shutdown
)
3711 mod_timer(&np
->oom_kick
, jiffies
+ OOM_REFILL
);
3712 spin_unlock_irqrestore(&np
->lock
, flags
);
3716 if (unlikely(i
> max_interrupt_work
)) {
3717 spin_lock_irqsave(&np
->lock
, flags
);
3718 /* disable interrupts on the nic */
3719 writel(NVREG_IRQ_RX_ALL
, base
+ NvRegIrqMask
);
3722 if (!np
->in_shutdown
) {
3723 np
->nic_poll_irq
|= NVREG_IRQ_RX_ALL
;
3724 mod_timer(&np
->nic_poll
, jiffies
+ POLL_WAIT
);
3726 spin_unlock_irqrestore(&np
->lock
, flags
);
3727 printk(KERN_DEBUG
"%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev
->name
, i
);
3731 dprintk(KERN_DEBUG
"%s: nv_nic_irq_rx completed\n", dev
->name
);
3733 return IRQ_RETVAL(i
);
3737 static irqreturn_t
nv_nic_irq_other(int foo
, void *data
)
3739 struct net_device
*dev
= (struct net_device
*) data
;
3740 struct fe_priv
*np
= netdev_priv(dev
);
3741 u8 __iomem
*base
= get_hwbase(dev
);
3744 unsigned long flags
;
3746 dprintk(KERN_DEBUG
"%s: nv_nic_irq_other\n", dev
->name
);
3749 events
= readl(base
+ NvRegMSIXIrqStatus
) & NVREG_IRQ_OTHER
;
3750 writel(NVREG_IRQ_OTHER
, base
+ NvRegMSIXIrqStatus
);
3751 dprintk(KERN_DEBUG
"%s: irq: %08x\n", dev
->name
, events
);
3752 if (!(events
& np
->irqmask
))
3755 /* check tx in case we reached max loop limit in tx isr */
3756 spin_lock_irqsave(&np
->lock
, flags
);
3757 nv_tx_done_optimized(dev
, TX_WORK_PER_LOOP
);
3758 spin_unlock_irqrestore(&np
->lock
, flags
);
3760 if (events
& NVREG_IRQ_LINK
) {
3761 spin_lock_irqsave(&np
->lock
, flags
);
3763 spin_unlock_irqrestore(&np
->lock
, flags
);
3765 if (np
->need_linktimer
&& time_after(jiffies
, np
->link_timeout
)) {
3766 spin_lock_irqsave(&np
->lock
, flags
);
3768 spin_unlock_irqrestore(&np
->lock
, flags
);
3769 np
->link_timeout
= jiffies
+ LINK_TIMEOUT
;
3771 if (events
& NVREG_IRQ_RECOVER_ERROR
) {
3772 spin_lock_irq(&np
->lock
);
3773 /* disable interrupts on the nic */
3774 writel(NVREG_IRQ_OTHER
, base
+ NvRegIrqMask
);
3777 if (!np
->in_shutdown
) {
3778 np
->nic_poll_irq
|= NVREG_IRQ_OTHER
;
3779 np
->recover_error
= 1;
3780 mod_timer(&np
->nic_poll
, jiffies
+ POLL_WAIT
);
3782 spin_unlock_irq(&np
->lock
);
3785 if (events
& (NVREG_IRQ_UNKNOWN
)) {
3786 printk(KERN_DEBUG
"%s: received irq with unknown events 0x%x. Please report\n",
3789 if (unlikely(i
> max_interrupt_work
)) {
3790 spin_lock_irqsave(&np
->lock
, flags
);
3791 /* disable interrupts on the nic */
3792 writel(NVREG_IRQ_OTHER
, base
+ NvRegIrqMask
);
3795 if (!np
->in_shutdown
) {
3796 np
->nic_poll_irq
|= NVREG_IRQ_OTHER
;
3797 mod_timer(&np
->nic_poll
, jiffies
+ POLL_WAIT
);
3799 spin_unlock_irqrestore(&np
->lock
, flags
);
3800 printk(KERN_DEBUG
"%s: too many iterations (%d) in nv_nic_irq_other.\n", dev
->name
, i
);
3805 dprintk(KERN_DEBUG
"%s: nv_nic_irq_other completed\n", dev
->name
);
3807 return IRQ_RETVAL(i
);
3810 static irqreturn_t
nv_nic_irq_test(int foo
, void *data
)
3812 struct net_device
*dev
= (struct net_device
*) data
;
3813 struct fe_priv
*np
= netdev_priv(dev
);
3814 u8 __iomem
*base
= get_hwbase(dev
);
3817 dprintk(KERN_DEBUG
"%s: nv_nic_irq_test\n", dev
->name
);
3819 if (!(np
->msi_flags
& NV_MSI_X_ENABLED
)) {
3820 events
= readl(base
+ NvRegIrqStatus
) & NVREG_IRQSTAT_MASK
;
3821 writel(NVREG_IRQ_TIMER
, base
+ NvRegIrqStatus
);
3823 events
= readl(base
+ NvRegMSIXIrqStatus
) & NVREG_IRQSTAT_MASK
;
3824 writel(NVREG_IRQ_TIMER
, base
+ NvRegMSIXIrqStatus
);
3827 dprintk(KERN_DEBUG
"%s: irq: %08x\n", dev
->name
, events
);
3828 if (!(events
& NVREG_IRQ_TIMER
))
3829 return IRQ_RETVAL(0);
3831 nv_msi_workaround(np
);
3833 spin_lock(&np
->lock
);
3835 spin_unlock(&np
->lock
);
3837 dprintk(KERN_DEBUG
"%s: nv_nic_irq_test completed\n", dev
->name
);
3839 return IRQ_RETVAL(1);
3842 static void set_msix_vector_map(struct net_device
*dev
, u32 vector
, u32 irqmask
)
3844 u8 __iomem
*base
= get_hwbase(dev
);
3848 /* Each interrupt bit can be mapped to a MSIX vector (4 bits).
3849 * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents
3850 * the remaining 8 interrupts.
3852 for (i
= 0; i
< 8; i
++) {
3853 if ((irqmask
>> i
) & 0x1) {
3854 msixmap
|= vector
<< (i
<< 2);
3857 writel(readl(base
+ NvRegMSIXMap0
) | msixmap
, base
+ NvRegMSIXMap0
);
3860 for (i
= 0; i
< 8; i
++) {
3861 if ((irqmask
>> (i
+ 8)) & 0x1) {
3862 msixmap
|= vector
<< (i
<< 2);
3865 writel(readl(base
+ NvRegMSIXMap1
) | msixmap
, base
+ NvRegMSIXMap1
);
3868 static int nv_request_irq(struct net_device
*dev
, int intr_test
)
3870 struct fe_priv
*np
= get_nvpriv(dev
);
3871 u8 __iomem
*base
= get_hwbase(dev
);
3874 irqreturn_t (*handler
)(int foo
, void *data
);
3877 handler
= nv_nic_irq_test
;
3879 if (nv_optimized(np
))
3880 handler
= nv_nic_irq_optimized
;
3882 handler
= nv_nic_irq
;
3885 if (np
->msi_flags
& NV_MSI_X_CAPABLE
) {
3886 for (i
= 0; i
< (np
->msi_flags
& NV_MSI_X_VECTORS_MASK
); i
++) {
3887 np
->msi_x_entry
[i
].entry
= i
;
3889 if ((ret
= pci_enable_msix(np
->pci_dev
, np
->msi_x_entry
, (np
->msi_flags
& NV_MSI_X_VECTORS_MASK
))) == 0) {
3890 np
->msi_flags
|= NV_MSI_X_ENABLED
;
3891 if (optimization_mode
== NV_OPTIMIZATION_MODE_THROUGHPUT
&& !intr_test
) {
3892 /* Request irq for rx handling */
3893 if (request_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_RX
].vector
, &nv_nic_irq_rx
, IRQF_SHARED
, dev
->name
, dev
) != 0) {
3894 printk(KERN_INFO
"forcedeth: request_irq failed for rx %d\n", ret
);
3895 pci_disable_msix(np
->pci_dev
);
3896 np
->msi_flags
&= ~NV_MSI_X_ENABLED
;
3899 /* Request irq for tx handling */
3900 if (request_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_TX
].vector
, &nv_nic_irq_tx
, IRQF_SHARED
, dev
->name
, dev
) != 0) {
3901 printk(KERN_INFO
"forcedeth: request_irq failed for tx %d\n", ret
);
3902 pci_disable_msix(np
->pci_dev
);
3903 np
->msi_flags
&= ~NV_MSI_X_ENABLED
;
3906 /* Request irq for link and timer handling */
3907 if (request_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_OTHER
].vector
, &nv_nic_irq_other
, IRQF_SHARED
, dev
->name
, dev
) != 0) {
3908 printk(KERN_INFO
"forcedeth: request_irq failed for link %d\n", ret
);
3909 pci_disable_msix(np
->pci_dev
);
3910 np
->msi_flags
&= ~NV_MSI_X_ENABLED
;
3913 /* map interrupts to their respective vector */
3914 writel(0, base
+ NvRegMSIXMap0
);
3915 writel(0, base
+ NvRegMSIXMap1
);
3916 set_msix_vector_map(dev
, NV_MSI_X_VECTOR_RX
, NVREG_IRQ_RX_ALL
);
3917 set_msix_vector_map(dev
, NV_MSI_X_VECTOR_TX
, NVREG_IRQ_TX_ALL
);
3918 set_msix_vector_map(dev
, NV_MSI_X_VECTOR_OTHER
, NVREG_IRQ_OTHER
);
3920 /* Request irq for all interrupts */
3921 if (request_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_ALL
].vector
, handler
, IRQF_SHARED
, dev
->name
, dev
) != 0) {
3922 printk(KERN_INFO
"forcedeth: request_irq failed %d\n", ret
);
3923 pci_disable_msix(np
->pci_dev
);
3924 np
->msi_flags
&= ~NV_MSI_X_ENABLED
;
3928 /* map interrupts to vector 0 */
3929 writel(0, base
+ NvRegMSIXMap0
);
3930 writel(0, base
+ NvRegMSIXMap1
);
3934 if (ret
!= 0 && np
->msi_flags
& NV_MSI_CAPABLE
) {
3935 if ((ret
= pci_enable_msi(np
->pci_dev
)) == 0) {
3936 np
->msi_flags
|= NV_MSI_ENABLED
;
3937 dev
->irq
= np
->pci_dev
->irq
;
3938 if (request_irq(np
->pci_dev
->irq
, handler
, IRQF_SHARED
, dev
->name
, dev
) != 0) {
3939 printk(KERN_INFO
"forcedeth: request_irq failed %d\n", ret
);
3940 pci_disable_msi(np
->pci_dev
);
3941 np
->msi_flags
&= ~NV_MSI_ENABLED
;
3942 dev
->irq
= np
->pci_dev
->irq
;
3946 /* map interrupts to vector 0 */
3947 writel(0, base
+ NvRegMSIMap0
);
3948 writel(0, base
+ NvRegMSIMap1
);
3949 /* enable msi vector 0 */
3950 writel(NVREG_MSI_VECTOR_0_ENABLED
, base
+ NvRegMSIIrqMask
);
3954 if (request_irq(np
->pci_dev
->irq
, handler
, IRQF_SHARED
, dev
->name
, dev
) != 0)
3961 free_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_TX
].vector
, dev
);
3963 free_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_RX
].vector
, dev
);
3968 static void nv_free_irq(struct net_device
*dev
)
3970 struct fe_priv
*np
= get_nvpriv(dev
);
3973 if (np
->msi_flags
& NV_MSI_X_ENABLED
) {
3974 for (i
= 0; i
< (np
->msi_flags
& NV_MSI_X_VECTORS_MASK
); i
++) {
3975 free_irq(np
->msi_x_entry
[i
].vector
, dev
);
3977 pci_disable_msix(np
->pci_dev
);
3978 np
->msi_flags
&= ~NV_MSI_X_ENABLED
;
3980 free_irq(np
->pci_dev
->irq
, dev
);
3981 if (np
->msi_flags
& NV_MSI_ENABLED
) {
3982 pci_disable_msi(np
->pci_dev
);
3983 np
->msi_flags
&= ~NV_MSI_ENABLED
;
3988 static void nv_do_nic_poll(unsigned long data
)
3990 struct net_device
*dev
= (struct net_device
*) data
;
3991 struct fe_priv
*np
= netdev_priv(dev
);
3992 u8 __iomem
*base
= get_hwbase(dev
);
3996 * First disable irq(s) and then
3997 * reenable interrupts on the nic, we have to do this before calling
3998 * nv_nic_irq because that may decide to do otherwise
4001 if (!using_multi_irqs(dev
)) {
4002 if (np
->msi_flags
& NV_MSI_X_ENABLED
)
4003 disable_irq_lockdep(np
->msi_x_entry
[NV_MSI_X_VECTOR_ALL
].vector
);
4005 disable_irq_lockdep(np
->pci_dev
->irq
);
4008 if (np
->nic_poll_irq
& NVREG_IRQ_RX_ALL
) {
4009 disable_irq_lockdep(np
->msi_x_entry
[NV_MSI_X_VECTOR_RX
].vector
);
4010 mask
|= NVREG_IRQ_RX_ALL
;
4012 if (np
->nic_poll_irq
& NVREG_IRQ_TX_ALL
) {
4013 disable_irq_lockdep(np
->msi_x_entry
[NV_MSI_X_VECTOR_TX
].vector
);
4014 mask
|= NVREG_IRQ_TX_ALL
;
4016 if (np
->nic_poll_irq
& NVREG_IRQ_OTHER
) {
4017 disable_irq_lockdep(np
->msi_x_entry
[NV_MSI_X_VECTOR_OTHER
].vector
);
4018 mask
|= NVREG_IRQ_OTHER
;
4021 np
->nic_poll_irq
= 0;
4023 /* disable_irq() contains synchronize_irq, thus no irq handler can run now */
4025 if (np
->recover_error
) {
4026 np
->recover_error
= 0;
4027 printk(KERN_INFO
"forcedeth: MAC in recoverable error state\n");
4028 if (netif_running(dev
)) {
4029 netif_tx_lock_bh(dev
);
4030 netif_addr_lock(dev
);
4031 spin_lock(&np
->lock
);
4035 /* drain rx queue */
4037 /* reinit driver view of the rx queue */
4039 if (nv_init_ring(dev
)) {
4040 if (!np
->in_shutdown
)
4041 mod_timer(&np
->oom_kick
, jiffies
+ OOM_REFILL
);
4043 /* reinit nic view of the rx queue */
4044 writel(np
->rx_buf_sz
, base
+ NvRegOffloadConfig
);
4045 setup_hw_rings(dev
, NV_SETUP_RX_RING
| NV_SETUP_TX_RING
);
4046 writel( ((np
->rx_ring_size
-1) << NVREG_RINGSZ_RXSHIFT
) + ((np
->tx_ring_size
-1) << NVREG_RINGSZ_TXSHIFT
),
4047 base
+ NvRegRingSizes
);
4049 writel(NVREG_TXRXCTL_KICK
|np
->txrxctl_bits
, get_hwbase(dev
) + NvRegTxRxControl
);
4052 /* restart rx engine */
4054 spin_unlock(&np
->lock
);
4055 netif_addr_unlock(dev
);
4056 netif_tx_unlock_bh(dev
);
4061 writel(mask
, base
+ NvRegIrqMask
);
4064 if (!using_multi_irqs(dev
)) {
4065 if (nv_optimized(np
))
4066 nv_nic_irq_optimized(0, dev
);
4069 if (np
->msi_flags
& NV_MSI_X_ENABLED
)
4070 enable_irq_lockdep(np
->msi_x_entry
[NV_MSI_X_VECTOR_ALL
].vector
);
4072 enable_irq_lockdep(np
->pci_dev
->irq
);
4074 if (np
->nic_poll_irq
& NVREG_IRQ_RX_ALL
) {
4075 nv_nic_irq_rx(0, dev
);
4076 enable_irq_lockdep(np
->msi_x_entry
[NV_MSI_X_VECTOR_RX
].vector
);
4078 if (np
->nic_poll_irq
& NVREG_IRQ_TX_ALL
) {
4079 nv_nic_irq_tx(0, dev
);
4080 enable_irq_lockdep(np
->msi_x_entry
[NV_MSI_X_VECTOR_TX
].vector
);
4082 if (np
->nic_poll_irq
& NVREG_IRQ_OTHER
) {
4083 nv_nic_irq_other(0, dev
);
4084 enable_irq_lockdep(np
->msi_x_entry
[NV_MSI_X_VECTOR_OTHER
].vector
);
4089 #ifdef CONFIG_NET_POLL_CONTROLLER
4090 static void nv_poll_controller(struct net_device
*dev
)
4092 nv_do_nic_poll((unsigned long) dev
);
4096 static void nv_do_stats_poll(unsigned long data
)
4098 struct net_device
*dev
= (struct net_device
*) data
;
4099 struct fe_priv
*np
= netdev_priv(dev
);
4101 nv_get_hw_stats(dev
);
4103 if (!np
->in_shutdown
)
4104 mod_timer(&np
->stats_poll
,
4105 round_jiffies(jiffies
+ STATS_INTERVAL
));
4108 static void nv_get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
4110 struct fe_priv
*np
= netdev_priv(dev
);
4111 strcpy(info
->driver
, DRV_NAME
);
4112 strcpy(info
->version
, FORCEDETH_VERSION
);
4113 strcpy(info
->bus_info
, pci_name(np
->pci_dev
));
4116 static void nv_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wolinfo
)
4118 struct fe_priv
*np
= netdev_priv(dev
);
4119 wolinfo
->supported
= WAKE_MAGIC
;
4121 spin_lock_irq(&np
->lock
);
4123 wolinfo
->wolopts
= WAKE_MAGIC
;
4124 spin_unlock_irq(&np
->lock
);
4127 static int nv_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wolinfo
)
4129 struct fe_priv
*np
= netdev_priv(dev
);
4130 u8 __iomem
*base
= get_hwbase(dev
);
4133 if (wolinfo
->wolopts
== 0) {
4135 } else if (wolinfo
->wolopts
& WAKE_MAGIC
) {
4137 flags
= NVREG_WAKEUPFLAGS_ENABLE
;
4139 if (netif_running(dev
)) {
4140 spin_lock_irq(&np
->lock
);
4141 writel(flags
, base
+ NvRegWakeUpFlags
);
4142 spin_unlock_irq(&np
->lock
);
4147 static int nv_get_settings(struct net_device
*dev
, struct ethtool_cmd
*ecmd
)
4149 struct fe_priv
*np
= netdev_priv(dev
);
4152 spin_lock_irq(&np
->lock
);
4153 ecmd
->port
= PORT_MII
;
4154 if (!netif_running(dev
)) {
4155 /* We do not track link speed / duplex setting if the
4156 * interface is disabled. Force a link check */
4157 if (nv_update_linkspeed(dev
)) {
4158 if (!netif_carrier_ok(dev
))
4159 netif_carrier_on(dev
);
4161 if (netif_carrier_ok(dev
))
4162 netif_carrier_off(dev
);
4166 if (netif_carrier_ok(dev
)) {
4167 switch(np
->linkspeed
& (NVREG_LINKSPEED_MASK
)) {
4168 case NVREG_LINKSPEED_10
:
4169 ecmd
->speed
= SPEED_10
;
4171 case NVREG_LINKSPEED_100
:
4172 ecmd
->speed
= SPEED_100
;
4174 case NVREG_LINKSPEED_1000
:
4175 ecmd
->speed
= SPEED_1000
;
4178 ecmd
->duplex
= DUPLEX_HALF
;
4180 ecmd
->duplex
= DUPLEX_FULL
;
4186 ecmd
->autoneg
= np
->autoneg
;
4188 ecmd
->advertising
= ADVERTISED_MII
;
4190 ecmd
->advertising
|= ADVERTISED_Autoneg
;
4191 adv
= mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, MII_READ
);
4192 if (adv
& ADVERTISE_10HALF
)
4193 ecmd
->advertising
|= ADVERTISED_10baseT_Half
;
4194 if (adv
& ADVERTISE_10FULL
)
4195 ecmd
->advertising
|= ADVERTISED_10baseT_Full
;
4196 if (adv
& ADVERTISE_100HALF
)
4197 ecmd
->advertising
|= ADVERTISED_100baseT_Half
;
4198 if (adv
& ADVERTISE_100FULL
)
4199 ecmd
->advertising
|= ADVERTISED_100baseT_Full
;
4200 if (np
->gigabit
== PHY_GIGABIT
) {
4201 adv
= mii_rw(dev
, np
->phyaddr
, MII_CTRL1000
, MII_READ
);
4202 if (adv
& ADVERTISE_1000FULL
)
4203 ecmd
->advertising
|= ADVERTISED_1000baseT_Full
;
4206 ecmd
->supported
= (SUPPORTED_Autoneg
|
4207 SUPPORTED_10baseT_Half
| SUPPORTED_10baseT_Full
|
4208 SUPPORTED_100baseT_Half
| SUPPORTED_100baseT_Full
|
4210 if (np
->gigabit
== PHY_GIGABIT
)
4211 ecmd
->supported
|= SUPPORTED_1000baseT_Full
;
4213 ecmd
->phy_address
= np
->phyaddr
;
4214 ecmd
->transceiver
= XCVR_EXTERNAL
;
4216 /* ignore maxtxpkt, maxrxpkt for now */
4217 spin_unlock_irq(&np
->lock
);
4221 static int nv_set_settings(struct net_device
*dev
, struct ethtool_cmd
*ecmd
)
4223 struct fe_priv
*np
= netdev_priv(dev
);
4225 if (ecmd
->port
!= PORT_MII
)
4227 if (ecmd
->transceiver
!= XCVR_EXTERNAL
)
4229 if (ecmd
->phy_address
!= np
->phyaddr
) {
4230 /* TODO: support switching between multiple phys. Should be
4231 * trivial, but not enabled due to lack of test hardware. */
4234 if (ecmd
->autoneg
== AUTONEG_ENABLE
) {
4237 mask
= ADVERTISED_10baseT_Half
| ADVERTISED_10baseT_Full
|
4238 ADVERTISED_100baseT_Half
| ADVERTISED_100baseT_Full
;
4239 if (np
->gigabit
== PHY_GIGABIT
)
4240 mask
|= ADVERTISED_1000baseT_Full
;
4242 if ((ecmd
->advertising
& mask
) == 0)
4245 } else if (ecmd
->autoneg
== AUTONEG_DISABLE
) {
4246 /* Note: autonegotiation disable, speed 1000 intentionally
4247 * forbidden - noone should need that. */
4249 if (ecmd
->speed
!= SPEED_10
&& ecmd
->speed
!= SPEED_100
)
4251 if (ecmd
->duplex
!= DUPLEX_HALF
&& ecmd
->duplex
!= DUPLEX_FULL
)
4257 netif_carrier_off(dev
);
4258 if (netif_running(dev
)) {
4259 unsigned long flags
;
4261 nv_disable_irq(dev
);
4262 netif_tx_lock_bh(dev
);
4263 netif_addr_lock(dev
);
4264 /* with plain spinlock lockdep complains */
4265 spin_lock_irqsave(&np
->lock
, flags
);
4268 * this can take some time, and interrupts are disabled
4269 * due to spin_lock_irqsave, but let's hope no daemon
4270 * is going to change the settings very often...
4272 * NV_RXSTOP_DELAY1MAX + NV_TXSTOP_DELAY1MAX
4273 * + some minor delays, which is up to a second approximately
4276 spin_unlock_irqrestore(&np
->lock
, flags
);
4277 netif_addr_unlock(dev
);
4278 netif_tx_unlock_bh(dev
);
4281 if (ecmd
->autoneg
== AUTONEG_ENABLE
) {
4286 /* advertise only what has been requested */
4287 adv
= mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, MII_READ
);
4288 adv
&= ~(ADVERTISE_ALL
| ADVERTISE_100BASE4
| ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
);
4289 if (ecmd
->advertising
& ADVERTISED_10baseT_Half
)
4290 adv
|= ADVERTISE_10HALF
;
4291 if (ecmd
->advertising
& ADVERTISED_10baseT_Full
)
4292 adv
|= ADVERTISE_10FULL
;
4293 if (ecmd
->advertising
& ADVERTISED_100baseT_Half
)
4294 adv
|= ADVERTISE_100HALF
;
4295 if (ecmd
->advertising
& ADVERTISED_100baseT_Full
)
4296 adv
|= ADVERTISE_100FULL
;
4297 if (np
->pause_flags
& NV_PAUSEFRAME_RX_REQ
) /* for rx we set both advertisments but disable tx pause */
4298 adv
|= ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
;
4299 if (np
->pause_flags
& NV_PAUSEFRAME_TX_REQ
)
4300 adv
|= ADVERTISE_PAUSE_ASYM
;
4301 mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, adv
);
4303 if (np
->gigabit
== PHY_GIGABIT
) {
4304 adv
= mii_rw(dev
, np
->phyaddr
, MII_CTRL1000
, MII_READ
);
4305 adv
&= ~ADVERTISE_1000FULL
;
4306 if (ecmd
->advertising
& ADVERTISED_1000baseT_Full
)
4307 adv
|= ADVERTISE_1000FULL
;
4308 mii_rw(dev
, np
->phyaddr
, MII_CTRL1000
, adv
);
4311 if (netif_running(dev
))
4312 printk(KERN_INFO
"%s: link down.\n", dev
->name
);
4313 bmcr
= mii_rw(dev
, np
->phyaddr
, MII_BMCR
, MII_READ
);
4314 if (np
->phy_model
== PHY_MODEL_MARVELL_E3016
) {
4315 bmcr
|= BMCR_ANENABLE
;
4316 /* reset the phy in order for settings to stick,
4317 * and cause autoneg to start */
4318 if (phy_reset(dev
, bmcr
)) {
4319 printk(KERN_INFO
"%s: phy reset failed\n", dev
->name
);
4323 bmcr
|= (BMCR_ANENABLE
| BMCR_ANRESTART
);
4324 mii_rw(dev
, np
->phyaddr
, MII_BMCR
, bmcr
);
4331 adv
= mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, MII_READ
);
4332 adv
&= ~(ADVERTISE_ALL
| ADVERTISE_100BASE4
| ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
);
4333 if (ecmd
->speed
== SPEED_10
&& ecmd
->duplex
== DUPLEX_HALF
)
4334 adv
|= ADVERTISE_10HALF
;
4335 if (ecmd
->speed
== SPEED_10
&& ecmd
->duplex
== DUPLEX_FULL
)
4336 adv
|= ADVERTISE_10FULL
;
4337 if (ecmd
->speed
== SPEED_100
&& ecmd
->duplex
== DUPLEX_HALF
)
4338 adv
|= ADVERTISE_100HALF
;
4339 if (ecmd
->speed
== SPEED_100
&& ecmd
->duplex
== DUPLEX_FULL
)
4340 adv
|= ADVERTISE_100FULL
;
4341 np
->pause_flags
&= ~(NV_PAUSEFRAME_AUTONEG
|NV_PAUSEFRAME_RX_ENABLE
|NV_PAUSEFRAME_TX_ENABLE
);
4342 if (np
->pause_flags
& NV_PAUSEFRAME_RX_REQ
) {/* for rx we set both advertisments but disable tx pause */
4343 adv
|= ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
;
4344 np
->pause_flags
|= NV_PAUSEFRAME_RX_ENABLE
;
4346 if (np
->pause_flags
& NV_PAUSEFRAME_TX_REQ
) {
4347 adv
|= ADVERTISE_PAUSE_ASYM
;
4348 np
->pause_flags
|= NV_PAUSEFRAME_TX_ENABLE
;
4350 mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, adv
);
4351 np
->fixed_mode
= adv
;
4353 if (np
->gigabit
== PHY_GIGABIT
) {
4354 adv
= mii_rw(dev
, np
->phyaddr
, MII_CTRL1000
, MII_READ
);
4355 adv
&= ~ADVERTISE_1000FULL
;
4356 mii_rw(dev
, np
->phyaddr
, MII_CTRL1000
, adv
);
4359 bmcr
= mii_rw(dev
, np
->phyaddr
, MII_BMCR
, MII_READ
);
4360 bmcr
&= ~(BMCR_ANENABLE
|BMCR_SPEED100
|BMCR_SPEED1000
|BMCR_FULLDPLX
);
4361 if (np
->fixed_mode
& (ADVERTISE_10FULL
|ADVERTISE_100FULL
))
4362 bmcr
|= BMCR_FULLDPLX
;
4363 if (np
->fixed_mode
& (ADVERTISE_100HALF
|ADVERTISE_100FULL
))
4364 bmcr
|= BMCR_SPEED100
;
4365 if (np
->phy_oui
== PHY_OUI_MARVELL
) {
4366 /* reset the phy in order for forced mode settings to stick */
4367 if (phy_reset(dev
, bmcr
)) {
4368 printk(KERN_INFO
"%s: phy reset failed\n", dev
->name
);
4372 mii_rw(dev
, np
->phyaddr
, MII_BMCR
, bmcr
);
4373 if (netif_running(dev
)) {
4374 /* Wait a bit and then reconfigure the nic. */
4381 if (netif_running(dev
)) {
4389 #define FORCEDETH_REGS_VER 1
4391 static int nv_get_regs_len(struct net_device
*dev
)
4393 struct fe_priv
*np
= netdev_priv(dev
);
4394 return np
->register_size
;
4397 static void nv_get_regs(struct net_device
*dev
, struct ethtool_regs
*regs
, void *buf
)
4399 struct fe_priv
*np
= netdev_priv(dev
);
4400 u8 __iomem
*base
= get_hwbase(dev
);
4404 regs
->version
= FORCEDETH_REGS_VER
;
4405 spin_lock_irq(&np
->lock
);
4406 for (i
= 0;i
<= np
->register_size
/sizeof(u32
); i
++)
4407 rbuf
[i
] = readl(base
+ i
*sizeof(u32
));
4408 spin_unlock_irq(&np
->lock
);
4411 static int nv_nway_reset(struct net_device
*dev
)
4413 struct fe_priv
*np
= netdev_priv(dev
);
4419 netif_carrier_off(dev
);
4420 if (netif_running(dev
)) {
4421 nv_disable_irq(dev
);
4422 netif_tx_lock_bh(dev
);
4423 netif_addr_lock(dev
);
4424 spin_lock(&np
->lock
);
4427 spin_unlock(&np
->lock
);
4428 netif_addr_unlock(dev
);
4429 netif_tx_unlock_bh(dev
);
4430 printk(KERN_INFO
"%s: link down.\n", dev
->name
);
4433 bmcr
= mii_rw(dev
, np
->phyaddr
, MII_BMCR
, MII_READ
);
4434 if (np
->phy_model
== PHY_MODEL_MARVELL_E3016
) {
4435 bmcr
|= BMCR_ANENABLE
;
4436 /* reset the phy in order for settings to stick*/
4437 if (phy_reset(dev
, bmcr
)) {
4438 printk(KERN_INFO
"%s: phy reset failed\n", dev
->name
);
4442 bmcr
|= (BMCR_ANENABLE
| BMCR_ANRESTART
);
4443 mii_rw(dev
, np
->phyaddr
, MII_BMCR
, bmcr
);
4446 if (netif_running(dev
)) {
4458 static int nv_set_tso(struct net_device
*dev
, u32 value
)
4460 struct fe_priv
*np
= netdev_priv(dev
);
4462 if ((np
->driver_data
& DEV_HAS_CHECKSUM
))
4463 return ethtool_op_set_tso(dev
, value
);
4468 static void nv_get_ringparam(struct net_device
*dev
, struct ethtool_ringparam
* ring
)
4470 struct fe_priv
*np
= netdev_priv(dev
);
4472 ring
->rx_max_pending
= (np
->desc_ver
== DESC_VER_1
) ? RING_MAX_DESC_VER_1
: RING_MAX_DESC_VER_2_3
;
4473 ring
->rx_mini_max_pending
= 0;
4474 ring
->rx_jumbo_max_pending
= 0;
4475 ring
->tx_max_pending
= (np
->desc_ver
== DESC_VER_1
) ? RING_MAX_DESC_VER_1
: RING_MAX_DESC_VER_2_3
;
4477 ring
->rx_pending
= np
->rx_ring_size
;
4478 ring
->rx_mini_pending
= 0;
4479 ring
->rx_jumbo_pending
= 0;
4480 ring
->tx_pending
= np
->tx_ring_size
;
4483 static int nv_set_ringparam(struct net_device
*dev
, struct ethtool_ringparam
* ring
)
4485 struct fe_priv
*np
= netdev_priv(dev
);
4486 u8 __iomem
*base
= get_hwbase(dev
);
4487 u8
*rxtx_ring
, *rx_skbuff
, *tx_skbuff
;
4488 dma_addr_t ring_addr
;
4490 if (ring
->rx_pending
< RX_RING_MIN
||
4491 ring
->tx_pending
< TX_RING_MIN
||
4492 ring
->rx_mini_pending
!= 0 ||
4493 ring
->rx_jumbo_pending
!= 0 ||
4494 (np
->desc_ver
== DESC_VER_1
&&
4495 (ring
->rx_pending
> RING_MAX_DESC_VER_1
||
4496 ring
->tx_pending
> RING_MAX_DESC_VER_1
)) ||
4497 (np
->desc_ver
!= DESC_VER_1
&&
4498 (ring
->rx_pending
> RING_MAX_DESC_VER_2_3
||
4499 ring
->tx_pending
> RING_MAX_DESC_VER_2_3
))) {
4503 /* allocate new rings */
4504 if (!nv_optimized(np
)) {
4505 rxtx_ring
= pci_alloc_consistent(np
->pci_dev
,
4506 sizeof(struct ring_desc
) * (ring
->rx_pending
+ ring
->tx_pending
),
4509 rxtx_ring
= pci_alloc_consistent(np
->pci_dev
,
4510 sizeof(struct ring_desc_ex
) * (ring
->rx_pending
+ ring
->tx_pending
),
4513 rx_skbuff
= kmalloc(sizeof(struct nv_skb_map
) * ring
->rx_pending
, GFP_KERNEL
);
4514 tx_skbuff
= kmalloc(sizeof(struct nv_skb_map
) * ring
->tx_pending
, GFP_KERNEL
);
4515 if (!rxtx_ring
|| !rx_skbuff
|| !tx_skbuff
) {
4516 /* fall back to old rings */
4517 if (!nv_optimized(np
)) {
4519 pci_free_consistent(np
->pci_dev
, sizeof(struct ring_desc
) * (ring
->rx_pending
+ ring
->tx_pending
),
4520 rxtx_ring
, ring_addr
);
4523 pci_free_consistent(np
->pci_dev
, sizeof(struct ring_desc_ex
) * (ring
->rx_pending
+ ring
->tx_pending
),
4524 rxtx_ring
, ring_addr
);
4533 if (netif_running(dev
)) {
4534 nv_disable_irq(dev
);
4535 netif_tx_lock_bh(dev
);
4536 netif_addr_lock(dev
);
4537 spin_lock(&np
->lock
);
4547 /* set new values */
4548 np
->rx_ring_size
= ring
->rx_pending
;
4549 np
->tx_ring_size
= ring
->tx_pending
;
4551 if (!nv_optimized(np
)) {
4552 np
->rx_ring
.orig
= (struct ring_desc
*)rxtx_ring
;
4553 np
->tx_ring
.orig
= &np
->rx_ring
.orig
[np
->rx_ring_size
];
4555 np
->rx_ring
.ex
= (struct ring_desc_ex
*)rxtx_ring
;
4556 np
->tx_ring
.ex
= &np
->rx_ring
.ex
[np
->rx_ring_size
];
4558 np
->rx_skb
= (struct nv_skb_map
*)rx_skbuff
;
4559 np
->tx_skb
= (struct nv_skb_map
*)tx_skbuff
;
4560 np
->ring_addr
= ring_addr
;
4562 memset(np
->rx_skb
, 0, sizeof(struct nv_skb_map
) * np
->rx_ring_size
);
4563 memset(np
->tx_skb
, 0, sizeof(struct nv_skb_map
) * np
->tx_ring_size
);
4565 if (netif_running(dev
)) {
4566 /* reinit driver view of the queues */
4568 if (nv_init_ring(dev
)) {
4569 if (!np
->in_shutdown
)
4570 mod_timer(&np
->oom_kick
, jiffies
+ OOM_REFILL
);
4573 /* reinit nic view of the queues */
4574 writel(np
->rx_buf_sz
, base
+ NvRegOffloadConfig
);
4575 setup_hw_rings(dev
, NV_SETUP_RX_RING
| NV_SETUP_TX_RING
);
4576 writel( ((np
->rx_ring_size
-1) << NVREG_RINGSZ_RXSHIFT
) + ((np
->tx_ring_size
-1) << NVREG_RINGSZ_TXSHIFT
),
4577 base
+ NvRegRingSizes
);
4579 writel(NVREG_TXRXCTL_KICK
|np
->txrxctl_bits
, get_hwbase(dev
) + NvRegTxRxControl
);
4582 /* restart engines */
4584 spin_unlock(&np
->lock
);
4585 netif_addr_unlock(dev
);
4586 netif_tx_unlock_bh(dev
);
4594 static void nv_get_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
* pause
)
4596 struct fe_priv
*np
= netdev_priv(dev
);
4598 pause
->autoneg
= (np
->pause_flags
& NV_PAUSEFRAME_AUTONEG
) != 0;
4599 pause
->rx_pause
= (np
->pause_flags
& NV_PAUSEFRAME_RX_ENABLE
) != 0;
4600 pause
->tx_pause
= (np
->pause_flags
& NV_PAUSEFRAME_TX_ENABLE
) != 0;
4603 static int nv_set_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
* pause
)
4605 struct fe_priv
*np
= netdev_priv(dev
);
4608 if ((!np
->autoneg
&& np
->duplex
== 0) ||
4609 (np
->autoneg
&& !pause
->autoneg
&& np
->duplex
== 0)) {
4610 printk(KERN_INFO
"%s: can not set pause settings when forced link is in half duplex.\n",
4614 if (pause
->tx_pause
&& !(np
->pause_flags
& NV_PAUSEFRAME_TX_CAPABLE
)) {
4615 printk(KERN_INFO
"%s: hardware does not support tx pause frames.\n", dev
->name
);
4619 netif_carrier_off(dev
);
4620 if (netif_running(dev
)) {
4621 nv_disable_irq(dev
);
4622 netif_tx_lock_bh(dev
);
4623 netif_addr_lock(dev
);
4624 spin_lock(&np
->lock
);
4627 spin_unlock(&np
->lock
);
4628 netif_addr_unlock(dev
);
4629 netif_tx_unlock_bh(dev
);
4632 np
->pause_flags
&= ~(NV_PAUSEFRAME_RX_REQ
|NV_PAUSEFRAME_TX_REQ
);
4633 if (pause
->rx_pause
)
4634 np
->pause_flags
|= NV_PAUSEFRAME_RX_REQ
;
4635 if (pause
->tx_pause
)
4636 np
->pause_flags
|= NV_PAUSEFRAME_TX_REQ
;
4638 if (np
->autoneg
&& pause
->autoneg
) {
4639 np
->pause_flags
|= NV_PAUSEFRAME_AUTONEG
;
4641 adv
= mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, MII_READ
);
4642 adv
&= ~(ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
);
4643 if (np
->pause_flags
& NV_PAUSEFRAME_RX_REQ
) /* for rx we set both advertisments but disable tx pause */
4644 adv
|= ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
;
4645 if (np
->pause_flags
& NV_PAUSEFRAME_TX_REQ
)
4646 adv
|= ADVERTISE_PAUSE_ASYM
;
4647 mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, adv
);
4649 if (netif_running(dev
))
4650 printk(KERN_INFO
"%s: link down.\n", dev
->name
);
4651 bmcr
= mii_rw(dev
, np
->phyaddr
, MII_BMCR
, MII_READ
);
4652 bmcr
|= (BMCR_ANENABLE
| BMCR_ANRESTART
);
4653 mii_rw(dev
, np
->phyaddr
, MII_BMCR
, bmcr
);
4655 np
->pause_flags
&= ~(NV_PAUSEFRAME_AUTONEG
|NV_PAUSEFRAME_RX_ENABLE
|NV_PAUSEFRAME_TX_ENABLE
);
4656 if (pause
->rx_pause
)
4657 np
->pause_flags
|= NV_PAUSEFRAME_RX_ENABLE
;
4658 if (pause
->tx_pause
)
4659 np
->pause_flags
|= NV_PAUSEFRAME_TX_ENABLE
;
4661 if (!netif_running(dev
))
4662 nv_update_linkspeed(dev
);
4664 nv_update_pause(dev
, np
->pause_flags
);
4667 if (netif_running(dev
)) {
4674 static u32
nv_get_rx_csum(struct net_device
*dev
)
4676 struct fe_priv
*np
= netdev_priv(dev
);
4677 return (np
->rx_csum
) != 0;
4680 static int nv_set_rx_csum(struct net_device
*dev
, u32 data
)
4682 struct fe_priv
*np
= netdev_priv(dev
);
4683 u8 __iomem
*base
= get_hwbase(dev
);
4686 if (np
->driver_data
& DEV_HAS_CHECKSUM
) {
4689 np
->txrxctl_bits
|= NVREG_TXRXCTL_RXCHECK
;
4692 /* vlan is dependent on rx checksum offload */
4693 if (!(np
->vlanctl_bits
& NVREG_VLANCONTROL_ENABLE
))
4694 np
->txrxctl_bits
&= ~NVREG_TXRXCTL_RXCHECK
;
4696 if (netif_running(dev
)) {
4697 spin_lock_irq(&np
->lock
);
4698 writel(np
->txrxctl_bits
, base
+ NvRegTxRxControl
);
4699 spin_unlock_irq(&np
->lock
);
4708 static int nv_set_tx_csum(struct net_device
*dev
, u32 data
)
4710 struct fe_priv
*np
= netdev_priv(dev
);
4712 if (np
->driver_data
& DEV_HAS_CHECKSUM
)
4713 return ethtool_op_set_tx_hw_csum(dev
, data
);
4718 static int nv_set_sg(struct net_device
*dev
, u32 data
)
4720 struct fe_priv
*np
= netdev_priv(dev
);
4722 if (np
->driver_data
& DEV_HAS_CHECKSUM
)
4723 return ethtool_op_set_sg(dev
, data
);
4728 static int nv_get_sset_count(struct net_device
*dev
, int sset
)
4730 struct fe_priv
*np
= netdev_priv(dev
);
4734 if (np
->driver_data
& DEV_HAS_TEST_EXTENDED
)
4735 return NV_TEST_COUNT_EXTENDED
;
4737 return NV_TEST_COUNT_BASE
;
4739 if (np
->driver_data
& DEV_HAS_STATISTICS_V1
)
4740 return NV_DEV_STATISTICS_V1_COUNT
;
4741 else if (np
->driver_data
& DEV_HAS_STATISTICS_V2
)
4742 return NV_DEV_STATISTICS_V2_COUNT
;
4750 static void nv_get_ethtool_stats(struct net_device
*dev
, struct ethtool_stats
*estats
, u64
*buffer
)
4752 struct fe_priv
*np
= netdev_priv(dev
);
4755 nv_do_stats_poll((unsigned long)dev
);
4757 memcpy(buffer
, &np
->estats
, nv_get_sset_count(dev
, ETH_SS_STATS
)*sizeof(u64
));
4760 static int nv_link_test(struct net_device
*dev
)
4762 struct fe_priv
*np
= netdev_priv(dev
);
4765 mii_rw(dev
, np
->phyaddr
, MII_BMSR
, MII_READ
);
4766 mii_status
= mii_rw(dev
, np
->phyaddr
, MII_BMSR
, MII_READ
);
4768 /* check phy link status */
4769 if (!(mii_status
& BMSR_LSTATUS
))
4775 static int nv_register_test(struct net_device
*dev
)
4777 u8 __iomem
*base
= get_hwbase(dev
);
4779 u32 orig_read
, new_read
;
4782 orig_read
= readl(base
+ nv_registers_test
[i
].reg
);
4784 /* xor with mask to toggle bits */
4785 orig_read
^= nv_registers_test
[i
].mask
;
4787 writel(orig_read
, base
+ nv_registers_test
[i
].reg
);
4789 new_read
= readl(base
+ nv_registers_test
[i
].reg
);
4791 if ((new_read
& nv_registers_test
[i
].mask
) != (orig_read
& nv_registers_test
[i
].mask
))
4794 /* restore original value */
4795 orig_read
^= nv_registers_test
[i
].mask
;
4796 writel(orig_read
, base
+ nv_registers_test
[i
].reg
);
4798 } while (nv_registers_test
[++i
].reg
!= 0);
4803 static int nv_interrupt_test(struct net_device
*dev
)
4805 struct fe_priv
*np
= netdev_priv(dev
);
4806 u8 __iomem
*base
= get_hwbase(dev
);
4809 u32 save_msi_flags
, save_poll_interval
= 0;
4811 if (netif_running(dev
)) {
4812 /* free current irq */
4814 save_poll_interval
= readl(base
+NvRegPollingInterval
);
4817 /* flag to test interrupt handler */
4820 /* setup test irq */
4821 save_msi_flags
= np
->msi_flags
;
4822 np
->msi_flags
&= ~NV_MSI_X_VECTORS_MASK
;
4823 np
->msi_flags
|= 0x001; /* setup 1 vector */
4824 if (nv_request_irq(dev
, 1))
4827 /* setup timer interrupt */
4828 writel(NVREG_POLL_DEFAULT_CPU
, base
+ NvRegPollingInterval
);
4829 writel(NVREG_UNKSETUP6_VAL
, base
+ NvRegUnknownSetupReg6
);
4831 nv_enable_hw_interrupts(dev
, NVREG_IRQ_TIMER
);
4833 /* wait for at least one interrupt */
4836 spin_lock_irq(&np
->lock
);
4838 /* flag should be set within ISR */
4839 testcnt
= np
->intr_test
;
4843 nv_disable_hw_interrupts(dev
, NVREG_IRQ_TIMER
);
4844 if (!(np
->msi_flags
& NV_MSI_X_ENABLED
))
4845 writel(NVREG_IRQSTAT_MASK
, base
+ NvRegIrqStatus
);
4847 writel(NVREG_IRQSTAT_MASK
, base
+ NvRegMSIXIrqStatus
);
4849 spin_unlock_irq(&np
->lock
);
4853 np
->msi_flags
= save_msi_flags
;
4855 if (netif_running(dev
)) {
4856 writel(save_poll_interval
, base
+ NvRegPollingInterval
);
4857 writel(NVREG_UNKSETUP6_VAL
, base
+ NvRegUnknownSetupReg6
);
4858 /* restore original irq */
4859 if (nv_request_irq(dev
, 0))
4866 static int nv_loopback_test(struct net_device
*dev
)
4868 struct fe_priv
*np
= netdev_priv(dev
);
4869 u8 __iomem
*base
= get_hwbase(dev
);
4870 struct sk_buff
*tx_skb
, *rx_skb
;
4871 dma_addr_t test_dma_addr
;
4872 u32 tx_flags_extra
= (np
->desc_ver
== DESC_VER_1
? NV_TX_LASTPACKET
: NV_TX2_LASTPACKET
);
4874 int len
, i
, pkt_len
;
4876 u32 filter_flags
= 0;
4877 u32 misc1_flags
= 0;
4880 if (netif_running(dev
)) {
4881 nv_disable_irq(dev
);
4882 filter_flags
= readl(base
+ NvRegPacketFilterFlags
);
4883 misc1_flags
= readl(base
+ NvRegMisc1
);
4888 /* reinit driver view of the rx queue */
4892 /* setup hardware for loopback */
4893 writel(NVREG_MISC1_FORCE
, base
+ NvRegMisc1
);
4894 writel(NVREG_PFF_ALWAYS
| NVREG_PFF_LOOPBACK
, base
+ NvRegPacketFilterFlags
);
4896 /* reinit nic view of the rx queue */
4897 writel(np
->rx_buf_sz
, base
+ NvRegOffloadConfig
);
4898 setup_hw_rings(dev
, NV_SETUP_RX_RING
| NV_SETUP_TX_RING
);
4899 writel( ((np
->rx_ring_size
-1) << NVREG_RINGSZ_RXSHIFT
) + ((np
->tx_ring_size
-1) << NVREG_RINGSZ_TXSHIFT
),
4900 base
+ NvRegRingSizes
);
4903 /* restart rx engine */
4906 /* setup packet for tx */
4907 pkt_len
= ETH_DATA_LEN
;
4908 tx_skb
= dev_alloc_skb(pkt_len
);
4910 printk(KERN_ERR
"dev_alloc_skb() failed during loopback test"
4911 " of %s\n", dev
->name
);
4915 test_dma_addr
= pci_map_single(np
->pci_dev
, tx_skb
->data
,
4916 skb_tailroom(tx_skb
),
4917 PCI_DMA_FROMDEVICE
);
4918 pkt_data
= skb_put(tx_skb
, pkt_len
);
4919 for (i
= 0; i
< pkt_len
; i
++)
4920 pkt_data
[i
] = (u8
)(i
& 0xff);
4922 if (!nv_optimized(np
)) {
4923 np
->tx_ring
.orig
[0].buf
= cpu_to_le32(test_dma_addr
);
4924 np
->tx_ring
.orig
[0].flaglen
= cpu_to_le32((pkt_len
-1) | np
->tx_flags
| tx_flags_extra
);
4926 np
->tx_ring
.ex
[0].bufhigh
= cpu_to_le32(dma_high(test_dma_addr
));
4927 np
->tx_ring
.ex
[0].buflow
= cpu_to_le32(dma_low(test_dma_addr
));
4928 np
->tx_ring
.ex
[0].flaglen
= cpu_to_le32((pkt_len
-1) | np
->tx_flags
| tx_flags_extra
);
4930 writel(NVREG_TXRXCTL_KICK
|np
->txrxctl_bits
, get_hwbase(dev
) + NvRegTxRxControl
);
4931 pci_push(get_hwbase(dev
));
4935 /* check for rx of the packet */
4936 if (!nv_optimized(np
)) {
4937 flags
= le32_to_cpu(np
->rx_ring
.orig
[0].flaglen
);
4938 len
= nv_descr_getlength(&np
->rx_ring
.orig
[0], np
->desc_ver
);
4941 flags
= le32_to_cpu(np
->rx_ring
.ex
[0].flaglen
);
4942 len
= nv_descr_getlength_ex(&np
->rx_ring
.ex
[0], np
->desc_ver
);
4945 if (flags
& NV_RX_AVAIL
) {
4947 } else if (np
->desc_ver
== DESC_VER_1
) {
4948 if (flags
& NV_RX_ERROR
)
4951 if (flags
& NV_RX2_ERROR
) {
4957 if (len
!= pkt_len
) {
4959 dprintk(KERN_DEBUG
"%s: loopback len mismatch %d vs %d\n",
4960 dev
->name
, len
, pkt_len
);
4962 rx_skb
= np
->rx_skb
[0].skb
;
4963 for (i
= 0; i
< pkt_len
; i
++) {
4964 if (rx_skb
->data
[i
] != (u8
)(i
& 0xff)) {
4966 dprintk(KERN_DEBUG
"%s: loopback pattern check failed on byte %d\n",
4973 dprintk(KERN_DEBUG
"%s: loopback - did not receive test packet\n", dev
->name
);
4976 pci_unmap_page(np
->pci_dev
, test_dma_addr
,
4977 (skb_end_pointer(tx_skb
) - tx_skb
->data
),
4979 dev_kfree_skb_any(tx_skb
);
4984 /* drain rx queue */
4987 if (netif_running(dev
)) {
4988 writel(misc1_flags
, base
+ NvRegMisc1
);
4989 writel(filter_flags
, base
+ NvRegPacketFilterFlags
);
4996 static void nv_self_test(struct net_device
*dev
, struct ethtool_test
*test
, u64
*buffer
)
4998 struct fe_priv
*np
= netdev_priv(dev
);
4999 u8 __iomem
*base
= get_hwbase(dev
);
5001 memset(buffer
, 0, nv_get_sset_count(dev
, ETH_SS_TEST
)*sizeof(u64
));
5003 if (!nv_link_test(dev
)) {
5004 test
->flags
|= ETH_TEST_FL_FAILED
;
5008 if (test
->flags
& ETH_TEST_FL_OFFLINE
) {
5009 if (netif_running(dev
)) {
5010 netif_stop_queue(dev
);
5011 #ifdef CONFIG_FORCEDETH_NAPI
5012 napi_disable(&np
->napi
);
5014 netif_tx_lock_bh(dev
);
5015 netif_addr_lock(dev
);
5016 spin_lock_irq(&np
->lock
);
5017 nv_disable_hw_interrupts(dev
, np
->irqmask
);
5018 if (!(np
->msi_flags
& NV_MSI_X_ENABLED
)) {
5019 writel(NVREG_IRQSTAT_MASK
, base
+ NvRegIrqStatus
);
5021 writel(NVREG_IRQSTAT_MASK
, base
+ NvRegMSIXIrqStatus
);
5026 /* drain rx queue */
5028 spin_unlock_irq(&np
->lock
);
5029 netif_addr_unlock(dev
);
5030 netif_tx_unlock_bh(dev
);
5033 if (!nv_register_test(dev
)) {
5034 test
->flags
|= ETH_TEST_FL_FAILED
;
5038 result
= nv_interrupt_test(dev
);
5040 test
->flags
|= ETH_TEST_FL_FAILED
;
5048 if (!nv_loopback_test(dev
)) {
5049 test
->flags
|= ETH_TEST_FL_FAILED
;
5053 if (netif_running(dev
)) {
5054 /* reinit driver view of the rx queue */
5056 if (nv_init_ring(dev
)) {
5057 if (!np
->in_shutdown
)
5058 mod_timer(&np
->oom_kick
, jiffies
+ OOM_REFILL
);
5060 /* reinit nic view of the rx queue */
5061 writel(np
->rx_buf_sz
, base
+ NvRegOffloadConfig
);
5062 setup_hw_rings(dev
, NV_SETUP_RX_RING
| NV_SETUP_TX_RING
);
5063 writel( ((np
->rx_ring_size
-1) << NVREG_RINGSZ_RXSHIFT
) + ((np
->tx_ring_size
-1) << NVREG_RINGSZ_TXSHIFT
),
5064 base
+ NvRegRingSizes
);
5066 writel(NVREG_TXRXCTL_KICK
|np
->txrxctl_bits
, get_hwbase(dev
) + NvRegTxRxControl
);
5068 /* restart rx engine */
5070 netif_start_queue(dev
);
5071 #ifdef CONFIG_FORCEDETH_NAPI
5072 napi_enable(&np
->napi
);
5074 nv_enable_hw_interrupts(dev
, np
->irqmask
);
5079 static void nv_get_strings(struct net_device
*dev
, u32 stringset
, u8
*buffer
)
5081 switch (stringset
) {
5083 memcpy(buffer
, &nv_estats_str
, nv_get_sset_count(dev
, ETH_SS_STATS
)*sizeof(struct nv_ethtool_str
));
5086 memcpy(buffer
, &nv_etests_str
, nv_get_sset_count(dev
, ETH_SS_TEST
)*sizeof(struct nv_ethtool_str
));
5091 static const struct ethtool_ops ops
= {
5092 .get_drvinfo
= nv_get_drvinfo
,
5093 .get_link
= ethtool_op_get_link
,
5094 .get_wol
= nv_get_wol
,
5095 .set_wol
= nv_set_wol
,
5096 .get_settings
= nv_get_settings
,
5097 .set_settings
= nv_set_settings
,
5098 .get_regs_len
= nv_get_regs_len
,
5099 .get_regs
= nv_get_regs
,
5100 .nway_reset
= nv_nway_reset
,
5101 .set_tso
= nv_set_tso
,
5102 .get_ringparam
= nv_get_ringparam
,
5103 .set_ringparam
= nv_set_ringparam
,
5104 .get_pauseparam
= nv_get_pauseparam
,
5105 .set_pauseparam
= nv_set_pauseparam
,
5106 .get_rx_csum
= nv_get_rx_csum
,
5107 .set_rx_csum
= nv_set_rx_csum
,
5108 .set_tx_csum
= nv_set_tx_csum
,
5109 .set_sg
= nv_set_sg
,
5110 .get_strings
= nv_get_strings
,
5111 .get_ethtool_stats
= nv_get_ethtool_stats
,
5112 .get_sset_count
= nv_get_sset_count
,
5113 .self_test
= nv_self_test
,
5116 static void nv_vlan_rx_register(struct net_device
*dev
, struct vlan_group
*grp
)
5118 struct fe_priv
*np
= get_nvpriv(dev
);
5120 spin_lock_irq(&np
->lock
);
5122 /* save vlan group */
5126 /* enable vlan on MAC */
5127 np
->txrxctl_bits
|= NVREG_TXRXCTL_VLANSTRIP
| NVREG_TXRXCTL_VLANINS
;
5129 /* disable vlan on MAC */
5130 np
->txrxctl_bits
&= ~NVREG_TXRXCTL_VLANSTRIP
;
5131 np
->txrxctl_bits
&= ~NVREG_TXRXCTL_VLANINS
;
5134 writel(np
->txrxctl_bits
, get_hwbase(dev
) + NvRegTxRxControl
);
5136 spin_unlock_irq(&np
->lock
);
5139 /* The mgmt unit and driver use a semaphore to access the phy during init */
5140 static int nv_mgmt_acquire_sema(struct net_device
*dev
)
5142 u8 __iomem
*base
= get_hwbase(dev
);
5144 u32 tx_ctrl
, mgmt_sema
;
5146 for (i
= 0; i
< 10; i
++) {
5147 mgmt_sema
= readl(base
+ NvRegTransmitterControl
) & NVREG_XMITCTL_MGMT_SEMA_MASK
;
5148 if (mgmt_sema
== NVREG_XMITCTL_MGMT_SEMA_FREE
)
5153 if (mgmt_sema
!= NVREG_XMITCTL_MGMT_SEMA_FREE
)
5156 for (i
= 0; i
< 2; i
++) {
5157 tx_ctrl
= readl(base
+ NvRegTransmitterControl
);
5158 tx_ctrl
|= NVREG_XMITCTL_HOST_SEMA_ACQ
;
5159 writel(tx_ctrl
, base
+ NvRegTransmitterControl
);
5161 /* verify that semaphore was acquired */
5162 tx_ctrl
= readl(base
+ NvRegTransmitterControl
);
5163 if (((tx_ctrl
& NVREG_XMITCTL_HOST_SEMA_MASK
) == NVREG_XMITCTL_HOST_SEMA_ACQ
) &&
5164 ((tx_ctrl
& NVREG_XMITCTL_MGMT_SEMA_MASK
) == NVREG_XMITCTL_MGMT_SEMA_FREE
))
5173 static int nv_open(struct net_device
*dev
)
5175 struct fe_priv
*np
= netdev_priv(dev
);
5176 u8 __iomem
*base
= get_hwbase(dev
);
5181 dprintk(KERN_DEBUG
"nv_open: begin\n");
5183 /* erase previous misconfiguration */
5184 if (np
->driver_data
& DEV_HAS_POWER_CNTRL
)
5186 writel(NVREG_MCASTADDRA_FORCE
, base
+ NvRegMulticastAddrA
);
5187 writel(0, base
+ NvRegMulticastAddrB
);
5188 writel(NVREG_MCASTMASKA_NONE
, base
+ NvRegMulticastMaskA
);
5189 writel(NVREG_MCASTMASKB_NONE
, base
+ NvRegMulticastMaskB
);
5190 writel(0, base
+ NvRegPacketFilterFlags
);
5192 writel(0, base
+ NvRegTransmitterControl
);
5193 writel(0, base
+ NvRegReceiverControl
);
5195 writel(0, base
+ NvRegAdapterControl
);
5197 if (np
->pause_flags
& NV_PAUSEFRAME_TX_CAPABLE
)
5198 writel(NVREG_TX_PAUSEFRAME_DISABLE
, base
+ NvRegTxPauseFrame
);
5200 /* initialize descriptor rings */
5202 oom
= nv_init_ring(dev
);
5204 writel(0, base
+ NvRegLinkSpeed
);
5205 writel(readl(base
+ NvRegTransmitPoll
) & NVREG_TRANSMITPOLL_MAC_ADDR_REV
, base
+ NvRegTransmitPoll
);
5207 writel(0, base
+ NvRegUnknownSetupReg6
);
5209 np
->in_shutdown
= 0;
5212 setup_hw_rings(dev
, NV_SETUP_RX_RING
| NV_SETUP_TX_RING
);
5213 writel( ((np
->rx_ring_size
-1) << NVREG_RINGSZ_RXSHIFT
) + ((np
->tx_ring_size
-1) << NVREG_RINGSZ_TXSHIFT
),
5214 base
+ NvRegRingSizes
);
5216 writel(np
->linkspeed
, base
+ NvRegLinkSpeed
);
5217 if (np
->desc_ver
== DESC_VER_1
)
5218 writel(NVREG_TX_WM_DESC1_DEFAULT
, base
+ NvRegTxWatermark
);
5220 writel(NVREG_TX_WM_DESC2_3_DEFAULT
, base
+ NvRegTxWatermark
);
5221 writel(np
->txrxctl_bits
, base
+ NvRegTxRxControl
);
5222 writel(np
->vlanctl_bits
, base
+ NvRegVlanControl
);
5224 writel(NVREG_TXRXCTL_BIT1
|np
->txrxctl_bits
, base
+ NvRegTxRxControl
);
5225 reg_delay(dev
, NvRegUnknownSetupReg5
, NVREG_UNKSETUP5_BIT31
, NVREG_UNKSETUP5_BIT31
,
5226 NV_SETUP5_DELAY
, NV_SETUP5_DELAYMAX
,
5227 KERN_INFO
"open: SetupReg5, Bit 31 remained off\n");
5229 writel(0, base
+ NvRegMIIMask
);
5230 writel(NVREG_IRQSTAT_MASK
, base
+ NvRegIrqStatus
);
5231 writel(NVREG_MIISTAT_MASK_ALL
, base
+ NvRegMIIStatus
);
5233 writel(NVREG_MISC1_FORCE
| NVREG_MISC1_HD
, base
+ NvRegMisc1
);
5234 writel(readl(base
+ NvRegTransmitterStatus
), base
+ NvRegTransmitterStatus
);
5235 writel(NVREG_PFF_ALWAYS
, base
+ NvRegPacketFilterFlags
);
5236 writel(np
->rx_buf_sz
, base
+ NvRegOffloadConfig
);
5238 writel(readl(base
+ NvRegReceiverStatus
), base
+ NvRegReceiverStatus
);
5240 get_random_bytes(&low
, sizeof(low
));
5241 low
&= NVREG_SLOTTIME_MASK
;
5242 if (np
->desc_ver
== DESC_VER_1
) {
5243 writel(low
|NVREG_SLOTTIME_DEFAULT
, base
+ NvRegSlotTime
);
5245 if (!(np
->driver_data
& DEV_HAS_GEAR_MODE
)) {
5246 /* setup legacy backoff */
5247 writel(NVREG_SLOTTIME_LEGBF_ENABLED
|NVREG_SLOTTIME_10_100_FULL
|low
, base
+ NvRegSlotTime
);
5249 writel(NVREG_SLOTTIME_10_100_FULL
, base
+ NvRegSlotTime
);
5250 nv_gear_backoff_reseed(dev
);
5253 writel(NVREG_TX_DEFERRAL_DEFAULT
, base
+ NvRegTxDeferral
);
5254 writel(NVREG_RX_DEFERRAL_DEFAULT
, base
+ NvRegRxDeferral
);
5255 if (poll_interval
== -1) {
5256 if (optimization_mode
== NV_OPTIMIZATION_MODE_THROUGHPUT
)
5257 writel(NVREG_POLL_DEFAULT_THROUGHPUT
, base
+ NvRegPollingInterval
);
5259 writel(NVREG_POLL_DEFAULT_CPU
, base
+ NvRegPollingInterval
);
5262 writel(poll_interval
& 0xFFFF, base
+ NvRegPollingInterval
);
5263 writel(NVREG_UNKSETUP6_VAL
, base
+ NvRegUnknownSetupReg6
);
5264 writel((np
->phyaddr
<< NVREG_ADAPTCTL_PHYSHIFT
)|NVREG_ADAPTCTL_PHYVALID
|NVREG_ADAPTCTL_RUNNING
,
5265 base
+ NvRegAdapterControl
);
5266 writel(NVREG_MIISPEED_BIT8
|NVREG_MIIDELAY
, base
+ NvRegMIISpeed
);
5267 writel(NVREG_MII_LINKCHANGE
, base
+ NvRegMIIMask
);
5269 writel(NVREG_WAKEUPFLAGS_ENABLE
, base
+ NvRegWakeUpFlags
);
5271 i
= readl(base
+ NvRegPowerState
);
5272 if ( (i
& NVREG_POWERSTATE_POWEREDUP
) == 0)
5273 writel(NVREG_POWERSTATE_POWEREDUP
|i
, base
+ NvRegPowerState
);
5277 writel(readl(base
+ NvRegPowerState
) | NVREG_POWERSTATE_VALID
, base
+ NvRegPowerState
);
5279 nv_disable_hw_interrupts(dev
, np
->irqmask
);
5281 writel(NVREG_MIISTAT_MASK_ALL
, base
+ NvRegMIIStatus
);
5282 writel(NVREG_IRQSTAT_MASK
, base
+ NvRegIrqStatus
);
5285 if (nv_request_irq(dev
, 0)) {
5289 /* ask for interrupts */
5290 nv_enable_hw_interrupts(dev
, np
->irqmask
);
5292 spin_lock_irq(&np
->lock
);
5293 writel(NVREG_MCASTADDRA_FORCE
, base
+ NvRegMulticastAddrA
);
5294 writel(0, base
+ NvRegMulticastAddrB
);
5295 writel(NVREG_MCASTMASKA_NONE
, base
+ NvRegMulticastMaskA
);
5296 writel(NVREG_MCASTMASKB_NONE
, base
+ NvRegMulticastMaskB
);
5297 writel(NVREG_PFF_ALWAYS
|NVREG_PFF_MYADDR
, base
+ NvRegPacketFilterFlags
);
5298 /* One manual link speed update: Interrupts are enabled, future link
5299 * speed changes cause interrupts and are handled by nv_link_irq().
5303 miistat
= readl(base
+ NvRegMIIStatus
);
5304 writel(NVREG_MIISTAT_MASK_ALL
, base
+ NvRegMIIStatus
);
5305 dprintk(KERN_INFO
"startup: got 0x%08x.\n", miistat
);
5307 /* set linkspeed to invalid value, thus force nv_update_linkspeed
5310 ret
= nv_update_linkspeed(dev
);
5312 netif_start_queue(dev
);
5313 #ifdef CONFIG_FORCEDETH_NAPI
5314 napi_enable(&np
->napi
);
5318 netif_carrier_on(dev
);
5320 printk(KERN_INFO
"%s: no link during initialization.\n", dev
->name
);
5321 netif_carrier_off(dev
);
5324 mod_timer(&np
->oom_kick
, jiffies
+ OOM_REFILL
);
5326 /* start statistics timer */
5327 if (np
->driver_data
& (DEV_HAS_STATISTICS_V1
|DEV_HAS_STATISTICS_V2
))
5328 mod_timer(&np
->stats_poll
,
5329 round_jiffies(jiffies
+ STATS_INTERVAL
));
5331 spin_unlock_irq(&np
->lock
);
5339 static int nv_close(struct net_device
*dev
)
5341 struct fe_priv
*np
= netdev_priv(dev
);
5344 spin_lock_irq(&np
->lock
);
5345 np
->in_shutdown
= 1;
5346 spin_unlock_irq(&np
->lock
);
5347 #ifdef CONFIG_FORCEDETH_NAPI
5348 napi_disable(&np
->napi
);
5350 synchronize_irq(np
->pci_dev
->irq
);
5352 del_timer_sync(&np
->oom_kick
);
5353 del_timer_sync(&np
->nic_poll
);
5354 del_timer_sync(&np
->stats_poll
);
5356 netif_stop_queue(dev
);
5357 spin_lock_irq(&np
->lock
);
5361 /* disable interrupts on the nic or we will lock up */
5362 base
= get_hwbase(dev
);
5363 nv_disable_hw_interrupts(dev
, np
->irqmask
);
5365 dprintk(KERN_INFO
"%s: Irqmask is zero again\n", dev
->name
);
5367 spin_unlock_irq(&np
->lock
);
5373 if (np
->wolenabled
) {
5374 writel(NVREG_PFF_ALWAYS
|NVREG_PFF_MYADDR
, base
+ NvRegPacketFilterFlags
);
5378 /* FIXME: power down nic */
5383 static int __devinit
nv_probe(struct pci_dev
*pci_dev
, const struct pci_device_id
*id
)
5385 struct net_device
*dev
;
5390 u32 powerstate
, txreg
;
5391 u32 phystate_orig
= 0, phystate
;
5392 int phyinitialized
= 0;
5393 DECLARE_MAC_BUF(mac
);
5394 static int printed_version
;
5396 if (!printed_version
++)
5397 printk(KERN_INFO
"%s: Reverse Engineered nForce ethernet"
5398 " driver. Version %s.\n", DRV_NAME
, FORCEDETH_VERSION
);
5400 dev
= alloc_etherdev(sizeof(struct fe_priv
));
5405 np
= netdev_priv(dev
);
5407 np
->pci_dev
= pci_dev
;
5408 spin_lock_init(&np
->lock
);
5409 SET_NETDEV_DEV(dev
, &pci_dev
->dev
);
5411 init_timer(&np
->oom_kick
);
5412 np
->oom_kick
.data
= (unsigned long) dev
;
5413 np
->oom_kick
.function
= &nv_do_rx_refill
; /* timer handler */
5414 init_timer(&np
->nic_poll
);
5415 np
->nic_poll
.data
= (unsigned long) dev
;
5416 np
->nic_poll
.function
= &nv_do_nic_poll
; /* timer handler */
5417 init_timer(&np
->stats_poll
);
5418 np
->stats_poll
.data
= (unsigned long) dev
;
5419 np
->stats_poll
.function
= &nv_do_stats_poll
; /* timer handler */
5421 err
= pci_enable_device(pci_dev
);
5425 pci_set_master(pci_dev
);
5427 err
= pci_request_regions(pci_dev
, DRV_NAME
);
5431 if (id
->driver_data
& (DEV_HAS_VLAN
|DEV_HAS_MSI_X
|DEV_HAS_POWER_CNTRL
|DEV_HAS_STATISTICS_V2
))
5432 np
->register_size
= NV_PCI_REGSZ_VER3
;
5433 else if (id
->driver_data
& DEV_HAS_STATISTICS_V1
)
5434 np
->register_size
= NV_PCI_REGSZ_VER2
;
5436 np
->register_size
= NV_PCI_REGSZ_VER1
;
5440 for (i
= 0; i
< DEVICE_COUNT_RESOURCE
; i
++) {
5441 dprintk(KERN_DEBUG
"%s: resource %d start %p len %ld flags 0x%08lx.\n",
5442 pci_name(pci_dev
), i
, (void*)pci_resource_start(pci_dev
, i
),
5443 pci_resource_len(pci_dev
, i
),
5444 pci_resource_flags(pci_dev
, i
));
5445 if (pci_resource_flags(pci_dev
, i
) & IORESOURCE_MEM
&&
5446 pci_resource_len(pci_dev
, i
) >= np
->register_size
) {
5447 addr
= pci_resource_start(pci_dev
, i
);
5451 if (i
== DEVICE_COUNT_RESOURCE
) {
5452 dev_printk(KERN_INFO
, &pci_dev
->dev
,
5453 "Couldn't find register window\n");
5457 /* copy of driver data */
5458 np
->driver_data
= id
->driver_data
;
5459 /* copy of device id */
5460 np
->device_id
= id
->device
;
5462 /* handle different descriptor versions */
5463 if (id
->driver_data
& DEV_HAS_HIGH_DMA
) {
5464 /* packet format 3: supports 40-bit addressing */
5465 np
->desc_ver
= DESC_VER_3
;
5466 np
->txrxctl_bits
= NVREG_TXRXCTL_DESC_3
;
5468 if (pci_set_dma_mask(pci_dev
, DMA_39BIT_MASK
))
5469 dev_printk(KERN_INFO
, &pci_dev
->dev
,
5470 "64-bit DMA failed, using 32-bit addressing\n");
5472 dev
->features
|= NETIF_F_HIGHDMA
;
5473 if (pci_set_consistent_dma_mask(pci_dev
, DMA_39BIT_MASK
)) {
5474 dev_printk(KERN_INFO
, &pci_dev
->dev
,
5475 "64-bit DMA (consistent) failed, using 32-bit ring buffers\n");
5478 } else if (id
->driver_data
& DEV_HAS_LARGEDESC
) {
5479 /* packet format 2: supports jumbo frames */
5480 np
->desc_ver
= DESC_VER_2
;
5481 np
->txrxctl_bits
= NVREG_TXRXCTL_DESC_2
;
5483 /* original packet format */
5484 np
->desc_ver
= DESC_VER_1
;
5485 np
->txrxctl_bits
= NVREG_TXRXCTL_DESC_1
;
5488 np
->pkt_limit
= NV_PKTLIMIT_1
;
5489 if (id
->driver_data
& DEV_HAS_LARGEDESC
)
5490 np
->pkt_limit
= NV_PKTLIMIT_2
;
5492 if (id
->driver_data
& DEV_HAS_CHECKSUM
) {
5494 np
->txrxctl_bits
|= NVREG_TXRXCTL_RXCHECK
;
5495 dev
->features
|= NETIF_F_HW_CSUM
| NETIF_F_SG
;
5496 dev
->features
|= NETIF_F_TSO
;
5499 np
->vlanctl_bits
= 0;
5500 if (id
->driver_data
& DEV_HAS_VLAN
) {
5501 np
->vlanctl_bits
= NVREG_VLANCONTROL_ENABLE
;
5502 dev
->features
|= NETIF_F_HW_VLAN_RX
| NETIF_F_HW_VLAN_TX
;
5503 dev
->vlan_rx_register
= nv_vlan_rx_register
;
5507 if ((id
->driver_data
& DEV_HAS_MSI
) && msi
) {
5508 np
->msi_flags
|= NV_MSI_CAPABLE
;
5510 if ((id
->driver_data
& DEV_HAS_MSI_X
) && msix
) {
5511 np
->msi_flags
|= NV_MSI_X_CAPABLE
;
5514 np
->pause_flags
= NV_PAUSEFRAME_RX_CAPABLE
| NV_PAUSEFRAME_RX_REQ
| NV_PAUSEFRAME_AUTONEG
;
5515 if ((id
->driver_data
& DEV_HAS_PAUSEFRAME_TX_V1
) ||
5516 (id
->driver_data
& DEV_HAS_PAUSEFRAME_TX_V2
) ||
5517 (id
->driver_data
& DEV_HAS_PAUSEFRAME_TX_V3
)) {
5518 np
->pause_flags
|= NV_PAUSEFRAME_TX_CAPABLE
| NV_PAUSEFRAME_TX_REQ
;
5523 np
->base
= ioremap(addr
, np
->register_size
);
5526 dev
->base_addr
= (unsigned long)np
->base
;
5528 dev
->irq
= pci_dev
->irq
;
5530 np
->rx_ring_size
= RX_RING_DEFAULT
;
5531 np
->tx_ring_size
= TX_RING_DEFAULT
;
5533 if (!nv_optimized(np
)) {
5534 np
->rx_ring
.orig
= pci_alloc_consistent(pci_dev
,
5535 sizeof(struct ring_desc
) * (np
->rx_ring_size
+ np
->tx_ring_size
),
5537 if (!np
->rx_ring
.orig
)
5539 np
->tx_ring
.orig
= &np
->rx_ring
.orig
[np
->rx_ring_size
];
5541 np
->rx_ring
.ex
= pci_alloc_consistent(pci_dev
,
5542 sizeof(struct ring_desc_ex
) * (np
->rx_ring_size
+ np
->tx_ring_size
),
5544 if (!np
->rx_ring
.ex
)
5546 np
->tx_ring
.ex
= &np
->rx_ring
.ex
[np
->rx_ring_size
];
5548 np
->rx_skb
= kcalloc(np
->rx_ring_size
, sizeof(struct nv_skb_map
), GFP_KERNEL
);
5549 np
->tx_skb
= kcalloc(np
->tx_ring_size
, sizeof(struct nv_skb_map
), GFP_KERNEL
);
5550 if (!np
->rx_skb
|| !np
->tx_skb
)
5553 dev
->open
= nv_open
;
5554 dev
->stop
= nv_close
;
5556 if (!nv_optimized(np
))
5557 dev
->hard_start_xmit
= nv_start_xmit
;
5559 dev
->hard_start_xmit
= nv_start_xmit_optimized
;
5560 dev
->get_stats
= nv_get_stats
;
5561 dev
->change_mtu
= nv_change_mtu
;
5562 dev
->set_mac_address
= nv_set_mac_address
;
5563 dev
->set_multicast_list
= nv_set_multicast
;
5564 #ifdef CONFIG_NET_POLL_CONTROLLER
5565 dev
->poll_controller
= nv_poll_controller
;
5567 #ifdef CONFIG_FORCEDETH_NAPI
5568 netif_napi_add(dev
, &np
->napi
, nv_napi_poll
, RX_WORK_PER_LOOP
);
5570 SET_ETHTOOL_OPS(dev
, &ops
);
5571 dev
->tx_timeout
= nv_tx_timeout
;
5572 dev
->watchdog_timeo
= NV_WATCHDOG_TIMEO
;
5574 pci_set_drvdata(pci_dev
, dev
);
5576 /* read the mac address */
5577 base
= get_hwbase(dev
);
5578 np
->orig_mac
[0] = readl(base
+ NvRegMacAddrA
);
5579 np
->orig_mac
[1] = readl(base
+ NvRegMacAddrB
);
5581 /* check the workaround bit for correct mac address order */
5582 txreg
= readl(base
+ NvRegTransmitPoll
);
5583 if (id
->driver_data
& DEV_HAS_CORRECT_MACADDR
) {
5584 /* mac address is already in correct order */
5585 dev
->dev_addr
[0] = (np
->orig_mac
[0] >> 0) & 0xff;
5586 dev
->dev_addr
[1] = (np
->orig_mac
[0] >> 8) & 0xff;
5587 dev
->dev_addr
[2] = (np
->orig_mac
[0] >> 16) & 0xff;
5588 dev
->dev_addr
[3] = (np
->orig_mac
[0] >> 24) & 0xff;
5589 dev
->dev_addr
[4] = (np
->orig_mac
[1] >> 0) & 0xff;
5590 dev
->dev_addr
[5] = (np
->orig_mac
[1] >> 8) & 0xff;
5591 } else if (txreg
& NVREG_TRANSMITPOLL_MAC_ADDR_REV
) {
5592 /* mac address is already in correct order */
5593 dev
->dev_addr
[0] = (np
->orig_mac
[0] >> 0) & 0xff;
5594 dev
->dev_addr
[1] = (np
->orig_mac
[0] >> 8) & 0xff;
5595 dev
->dev_addr
[2] = (np
->orig_mac
[0] >> 16) & 0xff;
5596 dev
->dev_addr
[3] = (np
->orig_mac
[0] >> 24) & 0xff;
5597 dev
->dev_addr
[4] = (np
->orig_mac
[1] >> 0) & 0xff;
5598 dev
->dev_addr
[5] = (np
->orig_mac
[1] >> 8) & 0xff;
5600 * Set orig mac address back to the reversed version.
5601 * This flag will be cleared during low power transition.
5602 * Therefore, we should always put back the reversed address.
5604 np
->orig_mac
[0] = (dev
->dev_addr
[5] << 0) + (dev
->dev_addr
[4] << 8) +
5605 (dev
->dev_addr
[3] << 16) + (dev
->dev_addr
[2] << 24);
5606 np
->orig_mac
[1] = (dev
->dev_addr
[1] << 0) + (dev
->dev_addr
[0] << 8);
5608 /* need to reverse mac address to correct order */
5609 dev
->dev_addr
[0] = (np
->orig_mac
[1] >> 8) & 0xff;
5610 dev
->dev_addr
[1] = (np
->orig_mac
[1] >> 0) & 0xff;
5611 dev
->dev_addr
[2] = (np
->orig_mac
[0] >> 24) & 0xff;
5612 dev
->dev_addr
[3] = (np
->orig_mac
[0] >> 16) & 0xff;
5613 dev
->dev_addr
[4] = (np
->orig_mac
[0] >> 8) & 0xff;
5614 dev
->dev_addr
[5] = (np
->orig_mac
[0] >> 0) & 0xff;
5615 writel(txreg
|NVREG_TRANSMITPOLL_MAC_ADDR_REV
, base
+ NvRegTransmitPoll
);
5617 memcpy(dev
->perm_addr
, dev
->dev_addr
, dev
->addr_len
);
5619 if (!is_valid_ether_addr(dev
->perm_addr
)) {
5621 * Bad mac address. At least one bios sets the mac address
5622 * to 01:23:45:67:89:ab
5624 dev_printk(KERN_ERR
, &pci_dev
->dev
,
5625 "Invalid Mac address detected: %s\n",
5626 print_mac(mac
, dev
->dev_addr
));
5627 dev_printk(KERN_ERR
, &pci_dev
->dev
,
5628 "Please complain to your hardware vendor. Switching to a random MAC.\n");
5629 dev
->dev_addr
[0] = 0x00;
5630 dev
->dev_addr
[1] = 0x00;
5631 dev
->dev_addr
[2] = 0x6c;
5632 get_random_bytes(&dev
->dev_addr
[3], 3);
5635 dprintk(KERN_DEBUG
"%s: MAC Address %s\n",
5636 pci_name(pci_dev
), print_mac(mac
, dev
->dev_addr
));
5638 /* set mac address */
5639 nv_copy_mac_to_hw(dev
);
5641 /* Workaround current PCI init glitch: wakeup bits aren't
5642 * being set from PCI PM capability.
5644 device_init_wakeup(&pci_dev
->dev
, 1);
5647 writel(0, base
+ NvRegWakeUpFlags
);
5650 if (id
->driver_data
& DEV_HAS_POWER_CNTRL
) {
5652 /* take phy and nic out of low power mode */
5653 powerstate
= readl(base
+ NvRegPowerState2
);
5654 powerstate
&= ~NVREG_POWERSTATE2_POWERUP_MASK
;
5655 if ((id
->device
== PCI_DEVICE_ID_NVIDIA_NVENET_12
||
5656 id
->device
== PCI_DEVICE_ID_NVIDIA_NVENET_13
) &&
5657 pci_dev
->revision
>= 0xA3)
5658 powerstate
|= NVREG_POWERSTATE2_POWERUP_REV_A3
;
5659 writel(powerstate
, base
+ NvRegPowerState2
);
5662 if (np
->desc_ver
== DESC_VER_1
) {
5663 np
->tx_flags
= NV_TX_VALID
;
5665 np
->tx_flags
= NV_TX2_VALID
;
5667 if (optimization_mode
== NV_OPTIMIZATION_MODE_THROUGHPUT
) {
5668 np
->irqmask
= NVREG_IRQMASK_THROUGHPUT
;
5669 if (np
->msi_flags
& NV_MSI_X_CAPABLE
) /* set number of vectors */
5670 np
->msi_flags
|= 0x0003;
5672 np
->irqmask
= NVREG_IRQMASK_CPU
;
5673 if (np
->msi_flags
& NV_MSI_X_CAPABLE
) /* set number of vectors */
5674 np
->msi_flags
|= 0x0001;
5677 if (id
->driver_data
& DEV_NEED_TIMERIRQ
)
5678 np
->irqmask
|= NVREG_IRQ_TIMER
;
5679 if (id
->driver_data
& DEV_NEED_LINKTIMER
) {
5680 dprintk(KERN_INFO
"%s: link timer on.\n", pci_name(pci_dev
));
5681 np
->need_linktimer
= 1;
5682 np
->link_timeout
= jiffies
+ LINK_TIMEOUT
;
5684 dprintk(KERN_INFO
"%s: link timer off.\n", pci_name(pci_dev
));
5685 np
->need_linktimer
= 0;
5688 /* Limit the number of tx's outstanding for hw bug */
5689 if (id
->driver_data
& DEV_NEED_TX_LIMIT
) {
5691 if ((id
->device
== PCI_DEVICE_ID_NVIDIA_NVENET_32
||
5692 id
->device
== PCI_DEVICE_ID_NVIDIA_NVENET_33
||
5693 id
->device
== PCI_DEVICE_ID_NVIDIA_NVENET_34
||
5694 id
->device
== PCI_DEVICE_ID_NVIDIA_NVENET_35
||
5695 id
->device
== PCI_DEVICE_ID_NVIDIA_NVENET_36
||
5696 id
->device
== PCI_DEVICE_ID_NVIDIA_NVENET_37
||
5697 id
->device
== PCI_DEVICE_ID_NVIDIA_NVENET_38
||
5698 id
->device
== PCI_DEVICE_ID_NVIDIA_NVENET_39
) &&
5699 pci_dev
->revision
>= 0xA2)
5703 /* clear phy state and temporarily halt phy interrupts */
5704 writel(0, base
+ NvRegMIIMask
);
5705 phystate
= readl(base
+ NvRegAdapterControl
);
5706 if (phystate
& NVREG_ADAPTCTL_RUNNING
) {
5708 phystate
&= ~NVREG_ADAPTCTL_RUNNING
;
5709 writel(phystate
, base
+ NvRegAdapterControl
);
5711 writel(NVREG_MIISTAT_MASK_ALL
, base
+ NvRegMIIStatus
);
5713 if (id
->driver_data
& DEV_HAS_MGMT_UNIT
) {
5714 /* management unit running on the mac? */
5715 if (readl(base
+ NvRegTransmitterControl
) & NVREG_XMITCTL_SYNC_PHY_INIT
) {
5716 np
->mac_in_use
= readl(base
+ NvRegTransmitterControl
) & NVREG_XMITCTL_MGMT_ST
;
5717 dprintk(KERN_INFO
"%s: mgmt unit is running. mac in use %x.\n", pci_name(pci_dev
), np
->mac_in_use
);
5718 if (nv_mgmt_acquire_sema(dev
)) {
5719 /* management unit setup the phy already? */
5720 if ((readl(base
+ NvRegTransmitterControl
) & NVREG_XMITCTL_SYNC_MASK
) ==
5721 NVREG_XMITCTL_SYNC_PHY_INIT
) {
5722 /* phy is inited by mgmt unit */
5724 dprintk(KERN_INFO
"%s: Phy already initialized by mgmt unit.\n", pci_name(pci_dev
));
5726 /* we need to init the phy */
5732 /* find a suitable phy */
5733 for (i
= 1; i
<= 32; i
++) {
5735 int phyaddr
= i
& 0x1F;
5737 spin_lock_irq(&np
->lock
);
5738 id1
= mii_rw(dev
, phyaddr
, MII_PHYSID1
, MII_READ
);
5739 spin_unlock_irq(&np
->lock
);
5740 if (id1
< 0 || id1
== 0xffff)
5742 spin_lock_irq(&np
->lock
);
5743 id2
= mii_rw(dev
, phyaddr
, MII_PHYSID2
, MII_READ
);
5744 spin_unlock_irq(&np
->lock
);
5745 if (id2
< 0 || id2
== 0xffff)
5748 np
->phy_model
= id2
& PHYID2_MODEL_MASK
;
5749 id1
= (id1
& PHYID1_OUI_MASK
) << PHYID1_OUI_SHFT
;
5750 id2
= (id2
& PHYID2_OUI_MASK
) >> PHYID2_OUI_SHFT
;
5751 dprintk(KERN_DEBUG
"%s: open: Found PHY %04x:%04x at address %d.\n",
5752 pci_name(pci_dev
), id1
, id2
, phyaddr
);
5753 np
->phyaddr
= phyaddr
;
5754 np
->phy_oui
= id1
| id2
;
5756 /* Realtek hardcoded phy id1 to all zero's on certain phys */
5757 if (np
->phy_oui
== PHY_OUI_REALTEK2
)
5758 np
->phy_oui
= PHY_OUI_REALTEK
;
5759 /* Setup phy revision for Realtek */
5760 if (np
->phy_oui
== PHY_OUI_REALTEK
&& np
->phy_model
== PHY_MODEL_REALTEK_8211
)
5761 np
->phy_rev
= mii_rw(dev
, phyaddr
, MII_RESV1
, MII_READ
) & PHY_REV_MASK
;
5766 dev_printk(KERN_INFO
, &pci_dev
->dev
,
5767 "open: Could not find a valid PHY.\n");
5771 if (!phyinitialized
) {
5775 /* see if it is a gigabit phy */
5776 u32 mii_status
= mii_rw(dev
, np
->phyaddr
, MII_BMSR
, MII_READ
);
5777 if (mii_status
& PHY_GIGABIT
) {
5778 np
->gigabit
= PHY_GIGABIT
;
5782 /* set default link speed settings */
5783 np
->linkspeed
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_10
;
5787 err
= register_netdev(dev
);
5789 dev_printk(KERN_INFO
, &pci_dev
->dev
,
5790 "unable to register netdev: %d\n", err
);
5794 dev_printk(KERN_INFO
, &pci_dev
->dev
, "ifname %s, PHY OUI 0x%x @ %d, "
5795 "addr %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
5806 dev_printk(KERN_INFO
, &pci_dev
->dev
, "%s%s%s%s%s%s%s%s%s%sdesc-v%u\n",
5807 dev
->features
& NETIF_F_HIGHDMA
? "highdma " : "",
5808 dev
->features
& (NETIF_F_HW_CSUM
| NETIF_F_SG
) ?
5810 dev
->features
& (NETIF_F_HW_VLAN_RX
| NETIF_F_HW_VLAN_TX
) ?
5812 id
->driver_data
& DEV_HAS_POWER_CNTRL
? "pwrctl " : "",
5813 id
->driver_data
& DEV_HAS_MGMT_UNIT
? "mgmt " : "",
5814 id
->driver_data
& DEV_NEED_TIMERIRQ
? "timirq " : "",
5815 np
->gigabit
== PHY_GIGABIT
? "gbit " : "",
5816 np
->need_linktimer
? "lnktim " : "",
5817 np
->msi_flags
& NV_MSI_CAPABLE
? "msi " : "",
5818 np
->msi_flags
& NV_MSI_X_CAPABLE
? "msi-x " : "",
5825 writel(phystate
|NVREG_ADAPTCTL_RUNNING
, base
+ NvRegAdapterControl
);
5826 pci_set_drvdata(pci_dev
, NULL
);
5830 iounmap(get_hwbase(dev
));
5832 pci_release_regions(pci_dev
);
5834 pci_disable_device(pci_dev
);
5841 static void nv_restore_phy(struct net_device
*dev
)
5843 struct fe_priv
*np
= netdev_priv(dev
);
5844 u16 phy_reserved
, mii_control
;
5846 if (np
->phy_oui
== PHY_OUI_REALTEK
&&
5847 np
->phy_model
== PHY_MODEL_REALTEK_8201
&&
5848 phy_cross
== NV_CROSSOVER_DETECTION_DISABLED
) {
5849 mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG1
, PHY_REALTEK_INIT3
);
5850 phy_reserved
= mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG2
, MII_READ
);
5851 phy_reserved
&= ~PHY_REALTEK_INIT_MSK1
;
5852 phy_reserved
|= PHY_REALTEK_INIT8
;
5853 mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG2
, phy_reserved
);
5854 mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG1
, PHY_REALTEK_INIT1
);
5856 /* restart auto negotiation */
5857 mii_control
= mii_rw(dev
, np
->phyaddr
, MII_BMCR
, MII_READ
);
5858 mii_control
|= (BMCR_ANRESTART
| BMCR_ANENABLE
);
5859 mii_rw(dev
, np
->phyaddr
, MII_BMCR
, mii_control
);
5863 static void __devexit
nv_remove(struct pci_dev
*pci_dev
)
5865 struct net_device
*dev
= pci_get_drvdata(pci_dev
);
5866 struct fe_priv
*np
= netdev_priv(dev
);
5867 u8 __iomem
*base
= get_hwbase(dev
);
5869 unregister_netdev(dev
);
5871 /* special op: write back the misordered MAC address - otherwise
5872 * the next nv_probe would see a wrong address.
5874 writel(np
->orig_mac
[0], base
+ NvRegMacAddrA
);
5875 writel(np
->orig_mac
[1], base
+ NvRegMacAddrB
);
5876 writel(readl(base
+ NvRegTransmitPoll
) & ~NVREG_TRANSMITPOLL_MAC_ADDR_REV
,
5877 base
+ NvRegTransmitPoll
);
5879 /* restore any phy related changes */
5880 nv_restore_phy(dev
);
5882 /* free all structures */
5884 iounmap(get_hwbase(dev
));
5885 pci_release_regions(pci_dev
);
5886 pci_disable_device(pci_dev
);
5888 pci_set_drvdata(pci_dev
, NULL
);
5892 static int nv_suspend(struct pci_dev
*pdev
, pm_message_t state
)
5894 struct net_device
*dev
= pci_get_drvdata(pdev
);
5895 struct fe_priv
*np
= netdev_priv(dev
);
5896 u8 __iomem
*base
= get_hwbase(dev
);
5899 if (netif_running(dev
)) {
5903 netif_device_detach(dev
);
5905 /* save non-pci configuration space */
5906 for (i
= 0;i
<= np
->register_size
/sizeof(u32
); i
++)
5907 np
->saved_config_space
[i
] = readl(base
+ i
*sizeof(u32
));
5909 pci_save_state(pdev
);
5910 pci_enable_wake(pdev
, pci_choose_state(pdev
, state
), np
->wolenabled
);
5911 pci_disable_device(pdev
);
5912 pci_set_power_state(pdev
, pci_choose_state(pdev
, state
));
5916 static int nv_resume(struct pci_dev
*pdev
)
5918 struct net_device
*dev
= pci_get_drvdata(pdev
);
5919 struct fe_priv
*np
= netdev_priv(dev
);
5920 u8 __iomem
*base
= get_hwbase(dev
);
5923 pci_set_power_state(pdev
, PCI_D0
);
5924 pci_restore_state(pdev
);
5925 /* ack any pending wake events, disable PME */
5926 pci_enable_wake(pdev
, PCI_D0
, 0);
5928 /* restore non-pci configuration space */
5929 for (i
= 0;i
<= np
->register_size
/sizeof(u32
); i
++)
5930 writel(np
->saved_config_space
[i
], base
+i
*sizeof(u32
));
5932 netif_device_attach(dev
);
5933 if (netif_running(dev
)) {
5935 nv_set_multicast(dev
);
5940 static void nv_shutdown(struct pci_dev
*pdev
)
5942 struct net_device
*dev
= pci_get_drvdata(pdev
);
5943 struct fe_priv
*np
= netdev_priv(dev
);
5945 if (netif_running(dev
))
5948 pci_enable_wake(pdev
, PCI_D3hot
, np
->wolenabled
);
5949 pci_enable_wake(pdev
, PCI_D3cold
, np
->wolenabled
);
5950 pci_disable_device(pdev
);
5951 pci_set_power_state(pdev
, PCI_D3hot
);
5954 #define nv_suspend NULL
5955 #define nv_shutdown NULL
5956 #define nv_resume NULL
5957 #endif /* CONFIG_PM */
5959 static struct pci_device_id pci_tbl
[] = {
5960 { /* nForce Ethernet Controller */
5961 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_1
),
5962 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
,
5964 { /* nForce2 Ethernet Controller */
5965 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_2
),
5966 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
,
5968 { /* nForce3 Ethernet Controller */
5969 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_3
),
5970 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
,
5972 { /* nForce3 Ethernet Controller */
5973 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_4
),
5974 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
,
5976 { /* nForce3 Ethernet Controller */
5977 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_5
),
5978 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
,
5980 { /* nForce3 Ethernet Controller */
5981 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_6
),
5982 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
,
5984 { /* nForce3 Ethernet Controller */
5985 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_7
),
5986 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
,
5988 { /* CK804 Ethernet Controller */
5989 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_8
),
5990 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_STATISTICS_V1
|DEV_NEED_TX_LIMIT
,
5992 { /* CK804 Ethernet Controller */
5993 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_9
),
5994 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_STATISTICS_V1
|DEV_NEED_TX_LIMIT
,
5996 { /* MCP04 Ethernet Controller */
5997 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_10
),
5998 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_STATISTICS_V1
|DEV_NEED_TX_LIMIT
,
6000 { /* MCP04 Ethernet Controller */
6001 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_11
),
6002 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_STATISTICS_V1
|DEV_NEED_TX_LIMIT
,
6004 { /* MCP51 Ethernet Controller */
6005 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_12
),
6006 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_STATISTICS_V1
,
6008 { /* MCP51 Ethernet Controller */
6009 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_13
),
6010 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_STATISTICS_V1
,
6012 { /* MCP55 Ethernet Controller */
6013 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_14
),
6014 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_VLAN
|DEV_HAS_MSI
|DEV_HAS_MSI_X
|DEV_HAS_POWER_CNTRL
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_NEED_TX_LIMIT
,
6016 { /* MCP55 Ethernet Controller */
6017 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_15
),
6018 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_VLAN
|DEV_HAS_MSI
|DEV_HAS_MSI_X
|DEV_HAS_POWER_CNTRL
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_NEED_TX_LIMIT
,
6020 { /* MCP61 Ethernet Controller */
6021 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_16
),
6022 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
,
6024 { /* MCP61 Ethernet Controller */
6025 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_17
),
6026 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
,
6028 { /* MCP61 Ethernet Controller */
6029 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_18
),
6030 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
,
6032 { /* MCP61 Ethernet Controller */
6033 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_19
),
6034 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
,
6036 { /* MCP65 Ethernet Controller */
6037 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_20
),
6038 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_NEED_TX_LIMIT
|DEV_NEED_TX_LIMIT
|DEV_HAS_GEAR_MODE
,
6040 { /* MCP65 Ethernet Controller */
6041 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_21
),
6042 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_NEED_TX_LIMIT
|DEV_HAS_GEAR_MODE
,
6044 { /* MCP65 Ethernet Controller */
6045 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_22
),
6046 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_NEED_TX_LIMIT
|DEV_HAS_GEAR_MODE
,
6048 { /* MCP65 Ethernet Controller */
6049 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_23
),
6050 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_NEED_TX_LIMIT
|DEV_HAS_GEAR_MODE
,
6052 { /* MCP67 Ethernet Controller */
6053 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_24
),
6054 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_GEAR_MODE
,
6056 { /* MCP67 Ethernet Controller */
6057 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_25
),
6058 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_GEAR_MODE
,
6060 { /* MCP67 Ethernet Controller */
6061 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_26
),
6062 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_GEAR_MODE
,
6064 { /* MCP67 Ethernet Controller */
6065 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_27
),
6066 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_GEAR_MODE
,
6068 { /* MCP73 Ethernet Controller */
6069 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_28
),
6070 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_COLLISION_FIX
|DEV_HAS_GEAR_MODE
,
6072 { /* MCP73 Ethernet Controller */
6073 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_29
),
6074 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_COLLISION_FIX
|DEV_HAS_GEAR_MODE
,
6076 { /* MCP73 Ethernet Controller */
6077 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_30
),
6078 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_COLLISION_FIX
|DEV_HAS_GEAR_MODE
,
6080 { /* MCP73 Ethernet Controller */
6081 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_31
),
6082 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_COLLISION_FIX
|DEV_HAS_GEAR_MODE
,
6084 { /* MCP77 Ethernet Controller */
6085 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_32
),
6086 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_MSI
|DEV_HAS_POWER_CNTRL
|DEV_HAS_PAUSEFRAME_TX_V2
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_COLLISION_FIX
|DEV_NEED_TX_LIMIT
|DEV_HAS_GEAR_MODE
,
6088 { /* MCP77 Ethernet Controller */
6089 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_33
),
6090 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_MSI
|DEV_HAS_POWER_CNTRL
|DEV_HAS_PAUSEFRAME_TX_V2
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_COLLISION_FIX
|DEV_NEED_TX_LIMIT
|DEV_HAS_GEAR_MODE
,
6092 { /* MCP77 Ethernet Controller */
6093 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_34
),
6094 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_MSI
|DEV_HAS_POWER_CNTRL
|DEV_HAS_PAUSEFRAME_TX_V2
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_COLLISION_FIX
|DEV_NEED_TX_LIMIT
|DEV_HAS_GEAR_MODE
,
6096 { /* MCP77 Ethernet Controller */
6097 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_35
),
6098 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_MSI
|DEV_HAS_POWER_CNTRL
|DEV_HAS_PAUSEFRAME_TX_V2
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_COLLISION_FIX
|DEV_NEED_TX_LIMIT
|DEV_HAS_GEAR_MODE
,
6100 { /* MCP79 Ethernet Controller */
6101 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_36
),
6102 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_MSI
|DEV_HAS_POWER_CNTRL
|DEV_HAS_PAUSEFRAME_TX_V3
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_COLLISION_FIX
|DEV_NEED_TX_LIMIT
|DEV_HAS_GEAR_MODE
,
6104 { /* MCP79 Ethernet Controller */
6105 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_37
),
6106 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_MSI
|DEV_HAS_POWER_CNTRL
|DEV_HAS_PAUSEFRAME_TX_V3
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_COLLISION_FIX
|DEV_NEED_TX_LIMIT
|DEV_HAS_GEAR_MODE
,
6108 { /* MCP79 Ethernet Controller */
6109 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_38
),
6110 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_MSI
|DEV_HAS_POWER_CNTRL
|DEV_HAS_PAUSEFRAME_TX_V3
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_COLLISION_FIX
|DEV_NEED_TX_LIMIT
|DEV_HAS_GEAR_MODE
,
6112 { /* MCP79 Ethernet Controller */
6113 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_39
),
6114 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_MSI
|DEV_HAS_POWER_CNTRL
|DEV_HAS_PAUSEFRAME_TX_V3
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_COLLISION_FIX
|DEV_NEED_TX_LIMIT
|DEV_HAS_GEAR_MODE
,
6119 static struct pci_driver driver
= {
6121 .id_table
= pci_tbl
,
6123 .remove
= __devexit_p(nv_remove
),
6124 .suspend
= nv_suspend
,
6125 .resume
= nv_resume
,
6126 .shutdown
= nv_shutdown
,
6129 static int __init
init_nic(void)
6131 return pci_register_driver(&driver
);
6134 static void __exit
exit_nic(void)
6136 pci_unregister_driver(&driver
);
6139 module_param(max_interrupt_work
, int, 0);
6140 MODULE_PARM_DESC(max_interrupt_work
, "forcedeth maximum events handled per interrupt");
6141 module_param(optimization_mode
, int, 0);
6142 MODULE_PARM_DESC(optimization_mode
, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer.");
6143 module_param(poll_interval
, int, 0);
6144 MODULE_PARM_DESC(poll_interval
, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535.");
6145 module_param(msi
, int, 0);
6146 MODULE_PARM_DESC(msi
, "MSI interrupts are enabled by setting to 1 and disabled by setting to 0.");
6147 module_param(msix
, int, 0);
6148 MODULE_PARM_DESC(msix
, "MSIX interrupts are enabled by setting to 1 and disabled by setting to 0.");
6149 module_param(dma_64bit
, int, 0);
6150 MODULE_PARM_DESC(dma_64bit
, "High DMA is enabled by setting to 1 and disabled by setting to 0.");
6151 module_param(phy_cross
, int, 0);
6152 MODULE_PARM_DESC(phy_cross
, "Phy crossover detection for Realtek 8201 phy is enabled by setting to 1 and disabled by setting to 0.");
6154 MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>");
6155 MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");
6156 MODULE_LICENSE("GPL");
6158 MODULE_DEVICE_TABLE(pci
, pci_tbl
);
6160 module_init(init_nic
);
6161 module_exit(exit_nic
);