2 * forcedeth: Ethernet driver for NVIDIA nForce media access controllers.
4 * Note: This driver is a cleanroom reimplementation based on reverse
5 * engineered documentation written by Carl-Daniel Hailfinger
6 * and Andrew de Quincey.
8 * NVIDIA, nForce and other NVIDIA marks are trademarks or registered
9 * trademarks of NVIDIA Corporation in the United States and other
12 * Copyright (C) 2003,4,5 Manfred Spraul
13 * Copyright (C) 2004 Andrew de Quincey (wol support)
14 * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane
15 * IRQ rate fixes, bigendian fixes, cleanups, verification)
16 * Copyright (c) 2004,2005,2006,2007,2008 NVIDIA Corporation
18 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License as published by
20 * the Free Software Foundation; either version 2 of the License, or
21 * (at your option) any later version.
23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * GNU General Public License for more details.
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, write to the Free Software
30 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
33 * We suspect that on some hardware no TX done interrupts are generated.
34 * This means recovery from netif_stop_queue only happens if the hw timer
35 * interrupt fires (100 times/second, configurable with NVREG_POLL_DEFAULT)
36 * and the timer is active in the IRQMask, or if a rx packet arrives by chance.
37 * If your hardware reliably generates tx done interrupts, then you can remove
38 * DEV_NEED_TIMERIRQ from the driver_data flags.
39 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
40 * superfluous timer interrupts from the nic.
42 #define FORCEDETH_VERSION "0.61"
43 #define DRV_NAME "forcedeth"
45 #include <linux/module.h>
46 #include <linux/types.h>
47 #include <linux/pci.h>
48 #include <linux/interrupt.h>
49 #include <linux/netdevice.h>
50 #include <linux/etherdevice.h>
51 #include <linux/delay.h>
52 #include <linux/spinlock.h>
53 #include <linux/ethtool.h>
54 #include <linux/timer.h>
55 #include <linux/skbuff.h>
56 #include <linux/mii.h>
57 #include <linux/random.h>
58 #include <linux/init.h>
59 #include <linux/if_vlan.h>
60 #include <linux/dma-mapping.h>
64 #include <asm/uaccess.h>
65 #include <asm/system.h>
68 #define dprintk printk
70 #define dprintk(x...) do { } while (0)
73 #define TX_WORK_PER_LOOP 64
74 #define RX_WORK_PER_LOOP 64
80 #define DEV_NEED_TIMERIRQ 0x000001 /* set the timer irq flag in the irq mask */
81 #define DEV_NEED_LINKTIMER 0x000002 /* poll link settings. Relies on the timer irq */
82 #define DEV_HAS_LARGEDESC 0x000004 /* device supports jumbo frames and needs packet format 2 */
83 #define DEV_HAS_HIGH_DMA 0x000008 /* device supports 64bit dma */
84 #define DEV_HAS_CHECKSUM 0x000010 /* device supports tx and rx checksum offloads */
85 #define DEV_HAS_VLAN 0x000020 /* device supports vlan tagging and striping */
86 #define DEV_HAS_MSI 0x000040 /* device supports MSI */
87 #define DEV_HAS_MSI_X 0x000080 /* device supports MSI-X */
88 #define DEV_HAS_POWER_CNTRL 0x000100 /* device supports power savings */
89 #define DEV_HAS_STATISTICS_V1 0x000200 /* device supports hw statistics version 1 */
90 #define DEV_HAS_STATISTICS_V2 0x000400 /* device supports hw statistics version 2 */
91 #define DEV_HAS_STATISTICS_V3 0x000800 /* device supports hw statistics version 3 */
92 #define DEV_HAS_TEST_EXTENDED 0x001000 /* device supports extended diagnostic test */
93 #define DEV_HAS_MGMT_UNIT 0x002000 /* device supports management unit */
94 #define DEV_HAS_CORRECT_MACADDR 0x004000 /* device supports correct mac address order */
95 #define DEV_HAS_COLLISION_FIX 0x008000 /* device supports tx collision fix */
96 #define DEV_HAS_PAUSEFRAME_TX_V1 0x010000 /* device supports tx pause frames version 1 */
97 #define DEV_HAS_PAUSEFRAME_TX_V2 0x020000 /* device supports tx pause frames version 2 */
98 #define DEV_HAS_PAUSEFRAME_TX_V3 0x040000 /* device supports tx pause frames version 3 */
99 #define DEV_NEED_TX_LIMIT 0x080000 /* device needs to limit tx */
100 #define DEV_HAS_GEAR_MODE 0x100000 /* device supports gear mode */
103 NvRegIrqStatus
= 0x000,
104 #define NVREG_IRQSTAT_MIIEVENT 0x040
105 #define NVREG_IRQSTAT_MASK 0x81ff
106 NvRegIrqMask
= 0x004,
107 #define NVREG_IRQ_RX_ERROR 0x0001
108 #define NVREG_IRQ_RX 0x0002
109 #define NVREG_IRQ_RX_NOBUF 0x0004
110 #define NVREG_IRQ_TX_ERR 0x0008
111 #define NVREG_IRQ_TX_OK 0x0010
112 #define NVREG_IRQ_TIMER 0x0020
113 #define NVREG_IRQ_LINK 0x0040
114 #define NVREG_IRQ_RX_FORCED 0x0080
115 #define NVREG_IRQ_TX_FORCED 0x0100
116 #define NVREG_IRQ_RECOVER_ERROR 0x8000
117 #define NVREG_IRQMASK_THROUGHPUT 0x00df
118 #define NVREG_IRQMASK_CPU 0x0060
119 #define NVREG_IRQ_TX_ALL (NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED)
120 #define NVREG_IRQ_RX_ALL (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED)
121 #define NVREG_IRQ_OTHER (NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RECOVER_ERROR)
123 #define NVREG_IRQ_UNKNOWN (~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR| \
124 NVREG_IRQ_TX_OK|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RX_FORCED| \
125 NVREG_IRQ_TX_FORCED|NVREG_IRQ_RECOVER_ERROR))
127 NvRegUnknownSetupReg6
= 0x008,
128 #define NVREG_UNKSETUP6_VAL 3
131 * NVREG_POLL_DEFAULT is the interval length of the timer source on the nic
132 * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms
134 NvRegPollingInterval
= 0x00c,
135 #define NVREG_POLL_DEFAULT_THROUGHPUT 970 /* backup tx cleanup if loop max reached */
136 #define NVREG_POLL_DEFAULT_CPU 13
137 NvRegMSIMap0
= 0x020,
138 NvRegMSIMap1
= 0x024,
139 NvRegMSIIrqMask
= 0x030,
140 #define NVREG_MSI_VECTOR_0_ENABLED 0x01
142 #define NVREG_MISC1_PAUSE_TX 0x01
143 #define NVREG_MISC1_HD 0x02
144 #define NVREG_MISC1_FORCE 0x3b0f3c
146 NvRegMacReset
= 0x34,
147 #define NVREG_MAC_RESET_ASSERT 0x0F3
148 NvRegTransmitterControl
= 0x084,
149 #define NVREG_XMITCTL_START 0x01
150 #define NVREG_XMITCTL_MGMT_ST 0x40000000
151 #define NVREG_XMITCTL_SYNC_MASK 0x000f0000
152 #define NVREG_XMITCTL_SYNC_NOT_READY 0x0
153 #define NVREG_XMITCTL_SYNC_PHY_INIT 0x00040000
154 #define NVREG_XMITCTL_MGMT_SEMA_MASK 0x00000f00
155 #define NVREG_XMITCTL_MGMT_SEMA_FREE 0x0
156 #define NVREG_XMITCTL_HOST_SEMA_MASK 0x0000f000
157 #define NVREG_XMITCTL_HOST_SEMA_ACQ 0x0000f000
158 #define NVREG_XMITCTL_HOST_LOADED 0x00004000
159 #define NVREG_XMITCTL_TX_PATH_EN 0x01000000
160 NvRegTransmitterStatus
= 0x088,
161 #define NVREG_XMITSTAT_BUSY 0x01
163 NvRegPacketFilterFlags
= 0x8c,
164 #define NVREG_PFF_PAUSE_RX 0x08
165 #define NVREG_PFF_ALWAYS 0x7F0000
166 #define NVREG_PFF_PROMISC 0x80
167 #define NVREG_PFF_MYADDR 0x20
168 #define NVREG_PFF_LOOPBACK 0x10
170 NvRegOffloadConfig
= 0x90,
171 #define NVREG_OFFLOAD_HOMEPHY 0x601
172 #define NVREG_OFFLOAD_NORMAL RX_NIC_BUFSIZE
173 NvRegReceiverControl
= 0x094,
174 #define NVREG_RCVCTL_START 0x01
175 #define NVREG_RCVCTL_RX_PATH_EN 0x01000000
176 NvRegReceiverStatus
= 0x98,
177 #define NVREG_RCVSTAT_BUSY 0x01
179 NvRegSlotTime
= 0x9c,
180 #define NVREG_SLOTTIME_LEGBF_ENABLED 0x80000000
181 #define NVREG_SLOTTIME_10_100_FULL 0x00007f00
182 #define NVREG_SLOTTIME_1000_FULL 0x0003ff00
183 #define NVREG_SLOTTIME_HALF 0x0000ff00
184 #define NVREG_SLOTTIME_DEFAULT 0x00007f00
185 #define NVREG_SLOTTIME_MASK 0x000000ff
187 NvRegTxDeferral
= 0xA0,
188 #define NVREG_TX_DEFERRAL_DEFAULT 0x15050f
189 #define NVREG_TX_DEFERRAL_RGMII_10_100 0x16070f
190 #define NVREG_TX_DEFERRAL_RGMII_1000 0x14050f
191 #define NVREG_TX_DEFERRAL_RGMII_STRETCH_10 0x16190f
192 #define NVREG_TX_DEFERRAL_RGMII_STRETCH_100 0x16300f
193 #define NVREG_TX_DEFERRAL_MII_STRETCH 0x152000
194 NvRegRxDeferral
= 0xA4,
195 #define NVREG_RX_DEFERRAL_DEFAULT 0x16
196 NvRegMacAddrA
= 0xA8,
197 NvRegMacAddrB
= 0xAC,
198 NvRegMulticastAddrA
= 0xB0,
199 #define NVREG_MCASTADDRA_FORCE 0x01
200 NvRegMulticastAddrB
= 0xB4,
201 NvRegMulticastMaskA
= 0xB8,
202 #define NVREG_MCASTMASKA_NONE 0xffffffff
203 NvRegMulticastMaskB
= 0xBC,
204 #define NVREG_MCASTMASKB_NONE 0xffff
206 NvRegPhyInterface
= 0xC0,
207 #define PHY_RGMII 0x10000000
208 NvRegBackOffControl
= 0xC4,
209 #define NVREG_BKOFFCTRL_DEFAULT 0x70000000
210 #define NVREG_BKOFFCTRL_SEED_MASK 0x000003ff
211 #define NVREG_BKOFFCTRL_SELECT 24
212 #define NVREG_BKOFFCTRL_GEAR 12
214 NvRegTxRingPhysAddr
= 0x100,
215 NvRegRxRingPhysAddr
= 0x104,
216 NvRegRingSizes
= 0x108,
217 #define NVREG_RINGSZ_TXSHIFT 0
218 #define NVREG_RINGSZ_RXSHIFT 16
219 NvRegTransmitPoll
= 0x10c,
220 #define NVREG_TRANSMITPOLL_MAC_ADDR_REV 0x00008000
221 NvRegLinkSpeed
= 0x110,
222 #define NVREG_LINKSPEED_FORCE 0x10000
223 #define NVREG_LINKSPEED_10 1000
224 #define NVREG_LINKSPEED_100 100
225 #define NVREG_LINKSPEED_1000 50
226 #define NVREG_LINKSPEED_MASK (0xFFF)
227 NvRegUnknownSetupReg5
= 0x130,
228 #define NVREG_UNKSETUP5_BIT31 (1<<31)
229 NvRegTxWatermark
= 0x13c,
230 #define NVREG_TX_WM_DESC1_DEFAULT 0x0200010
231 #define NVREG_TX_WM_DESC2_3_DEFAULT 0x1e08000
232 #define NVREG_TX_WM_DESC2_3_1000 0xfe08000
233 NvRegTxRxControl
= 0x144,
234 #define NVREG_TXRXCTL_KICK 0x0001
235 #define NVREG_TXRXCTL_BIT1 0x0002
236 #define NVREG_TXRXCTL_BIT2 0x0004
237 #define NVREG_TXRXCTL_IDLE 0x0008
238 #define NVREG_TXRXCTL_RESET 0x0010
239 #define NVREG_TXRXCTL_RXCHECK 0x0400
240 #define NVREG_TXRXCTL_DESC_1 0
241 #define NVREG_TXRXCTL_DESC_2 0x002100
242 #define NVREG_TXRXCTL_DESC_3 0xc02200
243 #define NVREG_TXRXCTL_VLANSTRIP 0x00040
244 #define NVREG_TXRXCTL_VLANINS 0x00080
245 NvRegTxRingPhysAddrHigh
= 0x148,
246 NvRegRxRingPhysAddrHigh
= 0x14C,
247 NvRegTxPauseFrame
= 0x170,
248 #define NVREG_TX_PAUSEFRAME_DISABLE 0x0fff0080
249 #define NVREG_TX_PAUSEFRAME_ENABLE_V1 0x01800010
250 #define NVREG_TX_PAUSEFRAME_ENABLE_V2 0x056003f0
251 #define NVREG_TX_PAUSEFRAME_ENABLE_V3 0x09f00880
252 NvRegMIIStatus
= 0x180,
253 #define NVREG_MIISTAT_ERROR 0x0001
254 #define NVREG_MIISTAT_LINKCHANGE 0x0008
255 #define NVREG_MIISTAT_MASK_RW 0x0007
256 #define NVREG_MIISTAT_MASK_ALL 0x000f
257 NvRegMIIMask
= 0x184,
258 #define NVREG_MII_LINKCHANGE 0x0008
260 NvRegAdapterControl
= 0x188,
261 #define NVREG_ADAPTCTL_START 0x02
262 #define NVREG_ADAPTCTL_LINKUP 0x04
263 #define NVREG_ADAPTCTL_PHYVALID 0x40000
264 #define NVREG_ADAPTCTL_RUNNING 0x100000
265 #define NVREG_ADAPTCTL_PHYSHIFT 24
266 NvRegMIISpeed
= 0x18c,
267 #define NVREG_MIISPEED_BIT8 (1<<8)
268 #define NVREG_MIIDELAY 5
269 NvRegMIIControl
= 0x190,
270 #define NVREG_MIICTL_INUSE 0x08000
271 #define NVREG_MIICTL_WRITE 0x00400
272 #define NVREG_MIICTL_ADDRSHIFT 5
273 NvRegMIIData
= 0x194,
274 NvRegTxUnicast
= 0x1a0,
275 NvRegTxMulticast
= 0x1a4,
276 NvRegTxBroadcast
= 0x1a8,
277 NvRegWakeUpFlags
= 0x200,
278 #define NVREG_WAKEUPFLAGS_VAL 0x7770
279 #define NVREG_WAKEUPFLAGS_BUSYSHIFT 24
280 #define NVREG_WAKEUPFLAGS_ENABLESHIFT 16
281 #define NVREG_WAKEUPFLAGS_D3SHIFT 12
282 #define NVREG_WAKEUPFLAGS_D2SHIFT 8
283 #define NVREG_WAKEUPFLAGS_D1SHIFT 4
284 #define NVREG_WAKEUPFLAGS_D0SHIFT 0
285 #define NVREG_WAKEUPFLAGS_ACCEPT_MAGPAT 0x01
286 #define NVREG_WAKEUPFLAGS_ACCEPT_WAKEUPPAT 0x02
287 #define NVREG_WAKEUPFLAGS_ACCEPT_LINKCHANGE 0x04
288 #define NVREG_WAKEUPFLAGS_ENABLE 0x1111
290 NvRegPatternCRC
= 0x204,
291 NvRegPatternMask
= 0x208,
292 NvRegPowerCap
= 0x268,
293 #define NVREG_POWERCAP_D3SUPP (1<<30)
294 #define NVREG_POWERCAP_D2SUPP (1<<26)
295 #define NVREG_POWERCAP_D1SUPP (1<<25)
296 NvRegPowerState
= 0x26c,
297 #define NVREG_POWERSTATE_POWEREDUP 0x8000
298 #define NVREG_POWERSTATE_VALID 0x0100
299 #define NVREG_POWERSTATE_MASK 0x0003
300 #define NVREG_POWERSTATE_D0 0x0000
301 #define NVREG_POWERSTATE_D1 0x0001
302 #define NVREG_POWERSTATE_D2 0x0002
303 #define NVREG_POWERSTATE_D3 0x0003
305 NvRegTxZeroReXmt
= 0x284,
306 NvRegTxOneReXmt
= 0x288,
307 NvRegTxManyReXmt
= 0x28c,
308 NvRegTxLateCol
= 0x290,
309 NvRegTxUnderflow
= 0x294,
310 NvRegTxLossCarrier
= 0x298,
311 NvRegTxExcessDef
= 0x29c,
312 NvRegTxRetryErr
= 0x2a0,
313 NvRegRxFrameErr
= 0x2a4,
314 NvRegRxExtraByte
= 0x2a8,
315 NvRegRxLateCol
= 0x2ac,
317 NvRegRxFrameTooLong
= 0x2b4,
318 NvRegRxOverflow
= 0x2b8,
319 NvRegRxFCSErr
= 0x2bc,
320 NvRegRxFrameAlignErr
= 0x2c0,
321 NvRegRxLenErr
= 0x2c4,
322 NvRegRxUnicast
= 0x2c8,
323 NvRegRxMulticast
= 0x2cc,
324 NvRegRxBroadcast
= 0x2d0,
326 NvRegTxFrame
= 0x2d8,
328 NvRegTxPause
= 0x2e0,
329 NvRegRxPause
= 0x2e4,
330 NvRegRxDropFrame
= 0x2e8,
331 NvRegVlanControl
= 0x300,
332 #define NVREG_VLANCONTROL_ENABLE 0x2000
333 NvRegMSIXMap0
= 0x3e0,
334 NvRegMSIXMap1
= 0x3e4,
335 NvRegMSIXIrqStatus
= 0x3f0,
337 NvRegPowerState2
= 0x600,
338 #define NVREG_POWERSTATE2_POWERUP_MASK 0x0F11
339 #define NVREG_POWERSTATE2_POWERUP_REV_A3 0x0001
340 #define NVREG_POWERSTATE2_PHY_RESET 0x0004
343 /* Big endian: should work, but is untested */
349 struct ring_desc_ex
{
357 struct ring_desc
* orig
;
358 struct ring_desc_ex
* ex
;
361 #define FLAG_MASK_V1 0xffff0000
362 #define FLAG_MASK_V2 0xffffc000
363 #define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1)
364 #define LEN_MASK_V2 (0xffffffff ^ FLAG_MASK_V2)
366 #define NV_TX_LASTPACKET (1<<16)
367 #define NV_TX_RETRYERROR (1<<19)
368 #define NV_TX_RETRYCOUNT_MASK (0xF<<20)
369 #define NV_TX_FORCED_INTERRUPT (1<<24)
370 #define NV_TX_DEFERRED (1<<26)
371 #define NV_TX_CARRIERLOST (1<<27)
372 #define NV_TX_LATECOLLISION (1<<28)
373 #define NV_TX_UNDERFLOW (1<<29)
374 #define NV_TX_ERROR (1<<30)
375 #define NV_TX_VALID (1<<31)
377 #define NV_TX2_LASTPACKET (1<<29)
378 #define NV_TX2_RETRYERROR (1<<18)
379 #define NV_TX2_RETRYCOUNT_MASK (0xF<<19)
380 #define NV_TX2_FORCED_INTERRUPT (1<<30)
381 #define NV_TX2_DEFERRED (1<<25)
382 #define NV_TX2_CARRIERLOST (1<<26)
383 #define NV_TX2_LATECOLLISION (1<<27)
384 #define NV_TX2_UNDERFLOW (1<<28)
385 /* error and valid are the same for both */
386 #define NV_TX2_ERROR (1<<30)
387 #define NV_TX2_VALID (1<<31)
388 #define NV_TX2_TSO (1<<28)
389 #define NV_TX2_TSO_SHIFT 14
390 #define NV_TX2_TSO_MAX_SHIFT 14
391 #define NV_TX2_TSO_MAX_SIZE (1<<NV_TX2_TSO_MAX_SHIFT)
392 #define NV_TX2_CHECKSUM_L3 (1<<27)
393 #define NV_TX2_CHECKSUM_L4 (1<<26)
395 #define NV_TX3_VLAN_TAG_PRESENT (1<<18)
397 #define NV_RX_DESCRIPTORVALID (1<<16)
398 #define NV_RX_MISSEDFRAME (1<<17)
399 #define NV_RX_SUBSTRACT1 (1<<18)
400 #define NV_RX_ERROR1 (1<<23)
401 #define NV_RX_ERROR2 (1<<24)
402 #define NV_RX_ERROR3 (1<<25)
403 #define NV_RX_ERROR4 (1<<26)
404 #define NV_RX_CRCERR (1<<27)
405 #define NV_RX_OVERFLOW (1<<28)
406 #define NV_RX_FRAMINGERR (1<<29)
407 #define NV_RX_ERROR (1<<30)
408 #define NV_RX_AVAIL (1<<31)
409 #define NV_RX_ERROR_MASK (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3|NV_RX_ERROR4|NV_RX_CRCERR|NV_RX_OVERFLOW|NV_RX_FRAMINGERR)
411 #define NV_RX2_CHECKSUMMASK (0x1C000000)
412 #define NV_RX2_CHECKSUM_IP (0x10000000)
413 #define NV_RX2_CHECKSUM_IP_TCP (0x14000000)
414 #define NV_RX2_CHECKSUM_IP_UDP (0x18000000)
415 #define NV_RX2_DESCRIPTORVALID (1<<29)
416 #define NV_RX2_SUBSTRACT1 (1<<25)
417 #define NV_RX2_ERROR1 (1<<18)
418 #define NV_RX2_ERROR2 (1<<19)
419 #define NV_RX2_ERROR3 (1<<20)
420 #define NV_RX2_ERROR4 (1<<21)
421 #define NV_RX2_CRCERR (1<<22)
422 #define NV_RX2_OVERFLOW (1<<23)
423 #define NV_RX2_FRAMINGERR (1<<24)
424 /* error and avail are the same for both */
425 #define NV_RX2_ERROR (1<<30)
426 #define NV_RX2_AVAIL (1<<31)
427 #define NV_RX2_ERROR_MASK (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3|NV_RX2_ERROR4|NV_RX2_CRCERR|NV_RX2_OVERFLOW|NV_RX2_FRAMINGERR)
429 #define NV_RX3_VLAN_TAG_PRESENT (1<<16)
430 #define NV_RX3_VLAN_TAG_MASK (0x0000FFFF)
432 /* Miscelaneous hardware related defines: */
433 #define NV_PCI_REGSZ_VER1 0x270
434 #define NV_PCI_REGSZ_VER2 0x2d4
435 #define NV_PCI_REGSZ_VER3 0x604
436 #define NV_PCI_REGSZ_MAX 0x604
438 /* various timeout delays: all in usec */
439 #define NV_TXRX_RESET_DELAY 4
440 #define NV_TXSTOP_DELAY1 10
441 #define NV_TXSTOP_DELAY1MAX 500000
442 #define NV_TXSTOP_DELAY2 100
443 #define NV_RXSTOP_DELAY1 10
444 #define NV_RXSTOP_DELAY1MAX 500000
445 #define NV_RXSTOP_DELAY2 100
446 #define NV_SETUP5_DELAY 5
447 #define NV_SETUP5_DELAYMAX 50000
448 #define NV_POWERUP_DELAY 5
449 #define NV_POWERUP_DELAYMAX 5000
450 #define NV_MIIBUSY_DELAY 50
451 #define NV_MIIPHY_DELAY 10
452 #define NV_MIIPHY_DELAYMAX 10000
453 #define NV_MAC_RESET_DELAY 64
455 #define NV_WAKEUPPATTERNS 5
456 #define NV_WAKEUPMASKENTRIES 4
458 /* General driver defaults */
459 #define NV_WATCHDOG_TIMEO (5*HZ)
461 #define RX_RING_DEFAULT 128
462 #define TX_RING_DEFAULT 256
463 #define RX_RING_MIN 128
464 #define TX_RING_MIN 64
465 #define RING_MAX_DESC_VER_1 1024
466 #define RING_MAX_DESC_VER_2_3 16384
468 /* rx/tx mac addr + type + vlan + align + slack*/
469 #define NV_RX_HEADERS (64)
470 /* even more slack. */
471 #define NV_RX_ALLOC_PAD (64)
473 /* maximum mtu size */
474 #define NV_PKTLIMIT_1 ETH_DATA_LEN /* hard limit not known */
475 #define NV_PKTLIMIT_2 9100 /* Actual limit according to NVidia: 9202 */
477 #define OOM_REFILL (1+HZ/20)
478 #define POLL_WAIT (1+HZ/100)
479 #define LINK_TIMEOUT (3*HZ)
480 #define STATS_INTERVAL (10*HZ)
484 * The nic supports three different descriptor types:
485 * - DESC_VER_1: Original
486 * - DESC_VER_2: support for jumbo frames.
487 * - DESC_VER_3: 64-bit format.
494 #define PHY_OUI_MARVELL 0x5043
495 #define PHY_OUI_CICADA 0x03f1
496 #define PHY_OUI_VITESSE 0x01c1
497 #define PHY_OUI_REALTEK 0x0732
498 #define PHY_OUI_REALTEK2 0x0020
499 #define PHYID1_OUI_MASK 0x03ff
500 #define PHYID1_OUI_SHFT 6
501 #define PHYID2_OUI_MASK 0xfc00
502 #define PHYID2_OUI_SHFT 10
503 #define PHYID2_MODEL_MASK 0x03f0
504 #define PHY_MODEL_REALTEK_8211 0x0110
505 #define PHY_REV_MASK 0x0001
506 #define PHY_REV_REALTEK_8211B 0x0000
507 #define PHY_REV_REALTEK_8211C 0x0001
508 #define PHY_MODEL_REALTEK_8201 0x0200
509 #define PHY_MODEL_MARVELL_E3016 0x0220
510 #define PHY_MARVELL_E3016_INITMASK 0x0300
511 #define PHY_CICADA_INIT1 0x0f000
512 #define PHY_CICADA_INIT2 0x0e00
513 #define PHY_CICADA_INIT3 0x01000
514 #define PHY_CICADA_INIT4 0x0200
515 #define PHY_CICADA_INIT5 0x0004
516 #define PHY_CICADA_INIT6 0x02000
517 #define PHY_VITESSE_INIT_REG1 0x1f
518 #define PHY_VITESSE_INIT_REG2 0x10
519 #define PHY_VITESSE_INIT_REG3 0x11
520 #define PHY_VITESSE_INIT_REG4 0x12
521 #define PHY_VITESSE_INIT_MSK1 0xc
522 #define PHY_VITESSE_INIT_MSK2 0x0180
523 #define PHY_VITESSE_INIT1 0x52b5
524 #define PHY_VITESSE_INIT2 0xaf8a
525 #define PHY_VITESSE_INIT3 0x8
526 #define PHY_VITESSE_INIT4 0x8f8a
527 #define PHY_VITESSE_INIT5 0xaf86
528 #define PHY_VITESSE_INIT6 0x8f86
529 #define PHY_VITESSE_INIT7 0xaf82
530 #define PHY_VITESSE_INIT8 0x0100
531 #define PHY_VITESSE_INIT9 0x8f82
532 #define PHY_VITESSE_INIT10 0x0
533 #define PHY_REALTEK_INIT_REG1 0x1f
534 #define PHY_REALTEK_INIT_REG2 0x19
535 #define PHY_REALTEK_INIT_REG3 0x13
536 #define PHY_REALTEK_INIT_REG4 0x14
537 #define PHY_REALTEK_INIT_REG5 0x18
538 #define PHY_REALTEK_INIT_REG6 0x11
539 #define PHY_REALTEK_INIT_REG7 0x01
540 #define PHY_REALTEK_INIT1 0x0000
541 #define PHY_REALTEK_INIT2 0x8e00
542 #define PHY_REALTEK_INIT3 0x0001
543 #define PHY_REALTEK_INIT4 0xad17
544 #define PHY_REALTEK_INIT5 0xfb54
545 #define PHY_REALTEK_INIT6 0xf5c7
546 #define PHY_REALTEK_INIT7 0x1000
547 #define PHY_REALTEK_INIT8 0x0003
548 #define PHY_REALTEK_INIT9 0x0008
549 #define PHY_REALTEK_INIT10 0x0005
550 #define PHY_REALTEK_INIT11 0x0200
551 #define PHY_REALTEK_INIT_MSK1 0x0003
553 #define PHY_GIGABIT 0x0100
555 #define PHY_TIMEOUT 0x1
556 #define PHY_ERROR 0x2
560 #define PHY_HALF 0x100
562 #define NV_PAUSEFRAME_RX_CAPABLE 0x0001
563 #define NV_PAUSEFRAME_TX_CAPABLE 0x0002
564 #define NV_PAUSEFRAME_RX_ENABLE 0x0004
565 #define NV_PAUSEFRAME_TX_ENABLE 0x0008
566 #define NV_PAUSEFRAME_RX_REQ 0x0010
567 #define NV_PAUSEFRAME_TX_REQ 0x0020
568 #define NV_PAUSEFRAME_AUTONEG 0x0040
570 /* MSI/MSI-X defines */
571 #define NV_MSI_X_MAX_VECTORS 8
572 #define NV_MSI_X_VECTORS_MASK 0x000f
573 #define NV_MSI_CAPABLE 0x0010
574 #define NV_MSI_X_CAPABLE 0x0020
575 #define NV_MSI_ENABLED 0x0040
576 #define NV_MSI_X_ENABLED 0x0080
578 #define NV_MSI_X_VECTOR_ALL 0x0
579 #define NV_MSI_X_VECTOR_RX 0x0
580 #define NV_MSI_X_VECTOR_TX 0x1
581 #define NV_MSI_X_VECTOR_OTHER 0x2
583 #define NV_RESTART_TX 0x1
584 #define NV_RESTART_RX 0x2
586 #define NV_TX_LIMIT_COUNT 16
589 struct nv_ethtool_str
{
590 char name
[ETH_GSTRING_LEN
];
593 static const struct nv_ethtool_str nv_estats_str
[] = {
598 { "tx_late_collision" },
599 { "tx_fifo_errors" },
600 { "tx_carrier_errors" },
601 { "tx_excess_deferral" },
602 { "tx_retry_error" },
603 { "rx_frame_error" },
605 { "rx_late_collision" },
607 { "rx_frame_too_long" },
608 { "rx_over_errors" },
610 { "rx_frame_align_error" },
611 { "rx_length_error" },
616 { "rx_errors_total" },
617 { "tx_errors_total" },
619 /* version 2 stats */
627 /* version 3 stats */
633 struct nv_ethtool_stats
{
638 u64 tx_late_collision
;
640 u64 tx_carrier_errors
;
641 u64 tx_excess_deferral
;
645 u64 rx_late_collision
;
647 u64 rx_frame_too_long
;
650 u64 rx_frame_align_error
;
659 /* version 2 stats */
667 /* version 3 stats */
673 #define NV_DEV_STATISTICS_V3_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64))
674 #define NV_DEV_STATISTICS_V2_COUNT (NV_DEV_STATISTICS_V3_COUNT - 3)
675 #define NV_DEV_STATISTICS_V1_COUNT (NV_DEV_STATISTICS_V2_COUNT - 6)
678 #define NV_TEST_COUNT_BASE 3
679 #define NV_TEST_COUNT_EXTENDED 4
681 static const struct nv_ethtool_str nv_etests_str
[] = {
682 { "link (online/offline)" },
683 { "register (offline) " },
684 { "interrupt (offline) " },
685 { "loopback (offline) " }
688 struct register_test
{
693 static const struct register_test nv_registers_test
[] = {
694 { NvRegUnknownSetupReg6
, 0x01 },
695 { NvRegMisc1
, 0x03c },
696 { NvRegOffloadConfig
, 0x03ff },
697 { NvRegMulticastAddrA
, 0xffffffff },
698 { NvRegTxWatermark
, 0x0ff },
699 { NvRegWakeUpFlags
, 0x07777 },
706 unsigned int dma_len
;
707 struct ring_desc_ex
*first_tx_desc
;
708 struct nv_skb_map
*next_tx_ctx
;
713 * All hardware access under dev->priv->lock, except the performance
715 * - rx is (pseudo-) lockless: it relies on the single-threading provided
716 * by the arch code for interrupts.
717 * - tx setup is lockless: it relies on netif_tx_lock. Actual submission
718 * needs dev->priv->lock :-(
719 * - set_multicast_list: preparation lockless, relies on netif_tx_lock.
722 /* in dev: base, irq */
726 struct net_device
*dev
;
727 struct napi_struct napi
;
730 * Locking: spin_lock(&np->lock); */
731 struct nv_ethtool_stats estats
;
739 unsigned int phy_oui
;
740 unsigned int phy_model
;
741 unsigned int phy_rev
;
746 /* General data: RO fields */
747 dma_addr_t ring_addr
;
748 struct pci_dev
*pci_dev
;
762 /* rx specific fields.
763 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
765 union ring_type get_rx
, put_rx
, first_rx
, last_rx
;
766 struct nv_skb_map
*get_rx_ctx
, *put_rx_ctx
;
767 struct nv_skb_map
*first_rx_ctx
, *last_rx_ctx
;
768 struct nv_skb_map
*rx_skb
;
770 union ring_type rx_ring
;
771 unsigned int rx_buf_sz
;
772 unsigned int pkt_limit
;
773 struct timer_list oom_kick
;
774 struct timer_list nic_poll
;
775 struct timer_list stats_poll
;
779 /* media detection workaround.
780 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
783 unsigned long link_timeout
;
785 * tx specific fields.
787 union ring_type get_tx
, put_tx
, first_tx
, last_tx
;
788 struct nv_skb_map
*get_tx_ctx
, *put_tx_ctx
;
789 struct nv_skb_map
*first_tx_ctx
, *last_tx_ctx
;
790 struct nv_skb_map
*tx_skb
;
792 union ring_type tx_ring
;
796 u32 tx_pkts_in_progress
;
797 struct nv_skb_map
*tx_change_owner
;
798 struct nv_skb_map
*tx_end_flip
;
802 struct vlan_group
*vlangrp
;
804 /* msi/msi-x fields */
806 struct msix_entry msi_x_entry
[NV_MSI_X_MAX_VECTORS
];
811 /* power saved state */
812 u32 saved_config_space
[NV_PCI_REGSZ_MAX
/4];
816 * Maximum number of loops until we assume that a bit in the irq mask
817 * is stuck. Overridable with module param.
819 static int max_interrupt_work
= 5;
822 * Optimization can be either throuput mode or cpu mode
824 * Throughput Mode: Every tx and rx packet will generate an interrupt.
825 * CPU Mode: Interrupts are controlled by a timer.
828 NV_OPTIMIZATION_MODE_THROUGHPUT
,
829 NV_OPTIMIZATION_MODE_CPU
831 static int optimization_mode
= NV_OPTIMIZATION_MODE_THROUGHPUT
;
834 * Poll interval for timer irq
836 * This interval determines how frequent an interrupt is generated.
837 * The is value is determined by [(time_in_micro_secs * 100) / (2^10)]
838 * Min = 0, and Max = 65535
840 static int poll_interval
= -1;
849 static int msi
= NV_MSI_INT_ENABLED
;
855 NV_MSIX_INT_DISABLED
,
858 static int msix
= NV_MSIX_INT_DISABLED
;
864 NV_DMA_64BIT_DISABLED
,
867 static int dma_64bit
= NV_DMA_64BIT_ENABLED
;
870 * Crossover Detection
871 * Realtek 8201 phy + some OEM boards do not work properly.
874 NV_CROSSOVER_DETECTION_DISABLED
,
875 NV_CROSSOVER_DETECTION_ENABLED
877 static int phy_cross
= NV_CROSSOVER_DETECTION_DISABLED
;
879 static inline struct fe_priv
*get_nvpriv(struct net_device
*dev
)
881 return netdev_priv(dev
);
884 static inline u8 __iomem
*get_hwbase(struct net_device
*dev
)
886 return ((struct fe_priv
*)netdev_priv(dev
))->base
;
889 static inline void pci_push(u8 __iomem
*base
)
891 /* force out pending posted writes */
895 static inline u32
nv_descr_getlength(struct ring_desc
*prd
, u32 v
)
897 return le32_to_cpu(prd
->flaglen
)
898 & ((v
== DESC_VER_1
) ? LEN_MASK_V1
: LEN_MASK_V2
);
901 static inline u32
nv_descr_getlength_ex(struct ring_desc_ex
*prd
, u32 v
)
903 return le32_to_cpu(prd
->flaglen
) & LEN_MASK_V2
;
906 static bool nv_optimized(struct fe_priv
*np
)
908 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
)
913 static int reg_delay(struct net_device
*dev
, int offset
, u32 mask
, u32 target
,
914 int delay
, int delaymax
, const char *msg
)
916 u8 __iomem
*base
= get_hwbase(dev
);
927 } while ((readl(base
+ offset
) & mask
) != target
);
931 #define NV_SETUP_RX_RING 0x01
932 #define NV_SETUP_TX_RING 0x02
934 static inline u32
dma_low(dma_addr_t addr
)
939 static inline u32
dma_high(dma_addr_t addr
)
941 return addr
>>31>>1; /* 0 if 32bit, shift down by 32 if 64bit */
944 static void setup_hw_rings(struct net_device
*dev
, int rxtx_flags
)
946 struct fe_priv
*np
= get_nvpriv(dev
);
947 u8 __iomem
*base
= get_hwbase(dev
);
949 if (!nv_optimized(np
)) {
950 if (rxtx_flags
& NV_SETUP_RX_RING
) {
951 writel(dma_low(np
->ring_addr
), base
+ NvRegRxRingPhysAddr
);
953 if (rxtx_flags
& NV_SETUP_TX_RING
) {
954 writel(dma_low(np
->ring_addr
+ np
->rx_ring_size
*sizeof(struct ring_desc
)), base
+ NvRegTxRingPhysAddr
);
957 if (rxtx_flags
& NV_SETUP_RX_RING
) {
958 writel(dma_low(np
->ring_addr
), base
+ NvRegRxRingPhysAddr
);
959 writel(dma_high(np
->ring_addr
), base
+ NvRegRxRingPhysAddrHigh
);
961 if (rxtx_flags
& NV_SETUP_TX_RING
) {
962 writel(dma_low(np
->ring_addr
+ np
->rx_ring_size
*sizeof(struct ring_desc_ex
)), base
+ NvRegTxRingPhysAddr
);
963 writel(dma_high(np
->ring_addr
+ np
->rx_ring_size
*sizeof(struct ring_desc_ex
)), base
+ NvRegTxRingPhysAddrHigh
);
968 static void free_rings(struct net_device
*dev
)
970 struct fe_priv
*np
= get_nvpriv(dev
);
972 if (!nv_optimized(np
)) {
973 if (np
->rx_ring
.orig
)
974 pci_free_consistent(np
->pci_dev
, sizeof(struct ring_desc
) * (np
->rx_ring_size
+ np
->tx_ring_size
),
975 np
->rx_ring
.orig
, np
->ring_addr
);
978 pci_free_consistent(np
->pci_dev
, sizeof(struct ring_desc_ex
) * (np
->rx_ring_size
+ np
->tx_ring_size
),
979 np
->rx_ring
.ex
, np
->ring_addr
);
987 static int using_multi_irqs(struct net_device
*dev
)
989 struct fe_priv
*np
= get_nvpriv(dev
);
991 if (!(np
->msi_flags
& NV_MSI_X_ENABLED
) ||
992 ((np
->msi_flags
& NV_MSI_X_ENABLED
) &&
993 ((np
->msi_flags
& NV_MSI_X_VECTORS_MASK
) == 0x1)))
999 static void nv_enable_irq(struct net_device
*dev
)
1001 struct fe_priv
*np
= get_nvpriv(dev
);
1003 if (!using_multi_irqs(dev
)) {
1004 if (np
->msi_flags
& NV_MSI_X_ENABLED
)
1005 enable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_ALL
].vector
);
1007 enable_irq(np
->pci_dev
->irq
);
1009 enable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_RX
].vector
);
1010 enable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_TX
].vector
);
1011 enable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_OTHER
].vector
);
1015 static void nv_disable_irq(struct net_device
*dev
)
1017 struct fe_priv
*np
= get_nvpriv(dev
);
1019 if (!using_multi_irqs(dev
)) {
1020 if (np
->msi_flags
& NV_MSI_X_ENABLED
)
1021 disable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_ALL
].vector
);
1023 disable_irq(np
->pci_dev
->irq
);
1025 disable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_RX
].vector
);
1026 disable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_TX
].vector
);
1027 disable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_OTHER
].vector
);
1031 /* In MSIX mode, a write to irqmask behaves as XOR */
1032 static void nv_enable_hw_interrupts(struct net_device
*dev
, u32 mask
)
1034 u8 __iomem
*base
= get_hwbase(dev
);
1036 writel(mask
, base
+ NvRegIrqMask
);
1039 static void nv_disable_hw_interrupts(struct net_device
*dev
, u32 mask
)
1041 struct fe_priv
*np
= get_nvpriv(dev
);
1042 u8 __iomem
*base
= get_hwbase(dev
);
1044 if (np
->msi_flags
& NV_MSI_X_ENABLED
) {
1045 writel(mask
, base
+ NvRegIrqMask
);
1047 if (np
->msi_flags
& NV_MSI_ENABLED
)
1048 writel(0, base
+ NvRegMSIIrqMask
);
1049 writel(0, base
+ NvRegIrqMask
);
1053 #define MII_READ (-1)
1054 /* mii_rw: read/write a register on the PHY.
1056 * Caller must guarantee serialization
1058 static int mii_rw(struct net_device
*dev
, int addr
, int miireg
, int value
)
1060 u8 __iomem
*base
= get_hwbase(dev
);
1064 writel(NVREG_MIISTAT_MASK_RW
, base
+ NvRegMIIStatus
);
1066 reg
= readl(base
+ NvRegMIIControl
);
1067 if (reg
& NVREG_MIICTL_INUSE
) {
1068 writel(NVREG_MIICTL_INUSE
, base
+ NvRegMIIControl
);
1069 udelay(NV_MIIBUSY_DELAY
);
1072 reg
= (addr
<< NVREG_MIICTL_ADDRSHIFT
) | miireg
;
1073 if (value
!= MII_READ
) {
1074 writel(value
, base
+ NvRegMIIData
);
1075 reg
|= NVREG_MIICTL_WRITE
;
1077 writel(reg
, base
+ NvRegMIIControl
);
1079 if (reg_delay(dev
, NvRegMIIControl
, NVREG_MIICTL_INUSE
, 0,
1080 NV_MIIPHY_DELAY
, NV_MIIPHY_DELAYMAX
, NULL
)) {
1081 dprintk(KERN_DEBUG
"%s: mii_rw of reg %d at PHY %d timed out.\n",
1082 dev
->name
, miireg
, addr
);
1084 } else if (value
!= MII_READ
) {
1085 /* it was a write operation - fewer failures are detectable */
1086 dprintk(KERN_DEBUG
"%s: mii_rw wrote 0x%x to reg %d at PHY %d\n",
1087 dev
->name
, value
, miireg
, addr
);
1089 } else if (readl(base
+ NvRegMIIStatus
) & NVREG_MIISTAT_ERROR
) {
1090 dprintk(KERN_DEBUG
"%s: mii_rw of reg %d at PHY %d failed.\n",
1091 dev
->name
, miireg
, addr
);
1094 retval
= readl(base
+ NvRegMIIData
);
1095 dprintk(KERN_DEBUG
"%s: mii_rw read from reg %d at PHY %d: 0x%x.\n",
1096 dev
->name
, miireg
, addr
, retval
);
1102 static int phy_reset(struct net_device
*dev
, u32 bmcr_setup
)
1104 struct fe_priv
*np
= netdev_priv(dev
);
1106 unsigned int tries
= 0;
1108 miicontrol
= BMCR_RESET
| bmcr_setup
;
1109 if (mii_rw(dev
, np
->phyaddr
, MII_BMCR
, miicontrol
)) {
1113 /* wait for 500ms */
1116 /* must wait till reset is deasserted */
1117 while (miicontrol
& BMCR_RESET
) {
1119 miicontrol
= mii_rw(dev
, np
->phyaddr
, MII_BMCR
, MII_READ
);
1120 /* FIXME: 100 tries seem excessive */
1127 static int phy_init(struct net_device
*dev
)
1129 struct fe_priv
*np
= get_nvpriv(dev
);
1130 u8 __iomem
*base
= get_hwbase(dev
);
1131 u32 phyinterface
, phy_reserved
, mii_status
, mii_control
, mii_control_1000
,reg
;
1133 /* phy errata for E3016 phy */
1134 if (np
->phy_model
== PHY_MODEL_MARVELL_E3016
) {
1135 reg
= mii_rw(dev
, np
->phyaddr
, MII_NCONFIG
, MII_READ
);
1136 reg
&= ~PHY_MARVELL_E3016_INITMASK
;
1137 if (mii_rw(dev
, np
->phyaddr
, MII_NCONFIG
, reg
)) {
1138 printk(KERN_INFO
"%s: phy write to errata reg failed.\n", pci_name(np
->pci_dev
));
1142 if (np
->phy_oui
== PHY_OUI_REALTEK
) {
1143 if (np
->phy_model
== PHY_MODEL_REALTEK_8211
&&
1144 np
->phy_rev
== PHY_REV_REALTEK_8211B
) {
1145 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG1
, PHY_REALTEK_INIT1
)) {
1146 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1149 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG2
, PHY_REALTEK_INIT2
)) {
1150 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1153 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG1
, PHY_REALTEK_INIT3
)) {
1154 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1157 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG3
, PHY_REALTEK_INIT4
)) {
1158 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1161 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG4
, PHY_REALTEK_INIT5
)) {
1162 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1165 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG5
, PHY_REALTEK_INIT6
)) {
1166 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1169 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG1
, PHY_REALTEK_INIT1
)) {
1170 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1174 if (np
->phy_model
== PHY_MODEL_REALTEK_8211
&&
1175 np
->phy_rev
== PHY_REV_REALTEK_8211C
) {
1176 u32 powerstate
= readl(base
+ NvRegPowerState2
);
1178 /* need to perform hw phy reset */
1179 powerstate
|= NVREG_POWERSTATE2_PHY_RESET
;
1180 writel(powerstate
, base
+ NvRegPowerState2
);
1183 powerstate
&= ~NVREG_POWERSTATE2_PHY_RESET
;
1184 writel(powerstate
, base
+ NvRegPowerState2
);
1187 reg
= mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG6
, MII_READ
);
1188 reg
|= PHY_REALTEK_INIT9
;
1189 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG6
, reg
)) {
1190 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1193 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG1
, PHY_REALTEK_INIT10
)) {
1194 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1197 reg
= mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG7
, MII_READ
);
1198 if (!(reg
& PHY_REALTEK_INIT11
)) {
1199 reg
|= PHY_REALTEK_INIT11
;
1200 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG7
, reg
)) {
1201 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1205 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG1
, PHY_REALTEK_INIT1
)) {
1206 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1210 if (np
->phy_model
== PHY_MODEL_REALTEK_8201
) {
1211 if (np
->device_id
== PCI_DEVICE_ID_NVIDIA_NVENET_32
||
1212 np
->device_id
== PCI_DEVICE_ID_NVIDIA_NVENET_33
||
1213 np
->device_id
== PCI_DEVICE_ID_NVIDIA_NVENET_34
||
1214 np
->device_id
== PCI_DEVICE_ID_NVIDIA_NVENET_35
||
1215 np
->device_id
== PCI_DEVICE_ID_NVIDIA_NVENET_36
||
1216 np
->device_id
== PCI_DEVICE_ID_NVIDIA_NVENET_37
||
1217 np
->device_id
== PCI_DEVICE_ID_NVIDIA_NVENET_38
||
1218 np
->device_id
== PCI_DEVICE_ID_NVIDIA_NVENET_39
) {
1219 phy_reserved
= mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG6
, MII_READ
);
1220 phy_reserved
|= PHY_REALTEK_INIT7
;
1221 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG6
, phy_reserved
)) {
1222 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1229 /* set advertise register */
1230 reg
= mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, MII_READ
);
1231 reg
|= (ADVERTISE_10HALF
|ADVERTISE_10FULL
|ADVERTISE_100HALF
|ADVERTISE_100FULL
|ADVERTISE_PAUSE_ASYM
|ADVERTISE_PAUSE_CAP
);
1232 if (mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, reg
)) {
1233 printk(KERN_INFO
"%s: phy write to advertise failed.\n", pci_name(np
->pci_dev
));
1237 /* get phy interface type */
1238 phyinterface
= readl(base
+ NvRegPhyInterface
);
1240 /* see if gigabit phy */
1241 mii_status
= mii_rw(dev
, np
->phyaddr
, MII_BMSR
, MII_READ
);
1242 if (mii_status
& PHY_GIGABIT
) {
1243 np
->gigabit
= PHY_GIGABIT
;
1244 mii_control_1000
= mii_rw(dev
, np
->phyaddr
, MII_CTRL1000
, MII_READ
);
1245 mii_control_1000
&= ~ADVERTISE_1000HALF
;
1246 if (phyinterface
& PHY_RGMII
)
1247 mii_control_1000
|= ADVERTISE_1000FULL
;
1249 mii_control_1000
&= ~ADVERTISE_1000FULL
;
1251 if (mii_rw(dev
, np
->phyaddr
, MII_CTRL1000
, mii_control_1000
)) {
1252 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1259 mii_control
= mii_rw(dev
, np
->phyaddr
, MII_BMCR
, MII_READ
);
1260 mii_control
|= BMCR_ANENABLE
;
1262 if (np
->phy_oui
== PHY_OUI_REALTEK
&&
1263 np
->phy_model
== PHY_MODEL_REALTEK_8211
&&
1264 np
->phy_rev
== PHY_REV_REALTEK_8211C
) {
1265 /* start autoneg since we already performed hw reset above */
1266 mii_control
|= BMCR_ANRESTART
;
1267 if (mii_rw(dev
, np
->phyaddr
, MII_BMCR
, mii_control
)) {
1268 printk(KERN_INFO
"%s: phy init failed\n", pci_name(np
->pci_dev
));
1273 * (certain phys need bmcr to be setup with reset)
1275 if (phy_reset(dev
, mii_control
)) {
1276 printk(KERN_INFO
"%s: phy reset failed\n", pci_name(np
->pci_dev
));
1281 /* phy vendor specific configuration */
1282 if ((np
->phy_oui
== PHY_OUI_CICADA
) && (phyinterface
& PHY_RGMII
) ) {
1283 phy_reserved
= mii_rw(dev
, np
->phyaddr
, MII_RESV1
, MII_READ
);
1284 phy_reserved
&= ~(PHY_CICADA_INIT1
| PHY_CICADA_INIT2
);
1285 phy_reserved
|= (PHY_CICADA_INIT3
| PHY_CICADA_INIT4
);
1286 if (mii_rw(dev
, np
->phyaddr
, MII_RESV1
, phy_reserved
)) {
1287 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1290 phy_reserved
= mii_rw(dev
, np
->phyaddr
, MII_NCONFIG
, MII_READ
);
1291 phy_reserved
|= PHY_CICADA_INIT5
;
1292 if (mii_rw(dev
, np
->phyaddr
, MII_NCONFIG
, phy_reserved
)) {
1293 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1297 if (np
->phy_oui
== PHY_OUI_CICADA
) {
1298 phy_reserved
= mii_rw(dev
, np
->phyaddr
, MII_SREVISION
, MII_READ
);
1299 phy_reserved
|= PHY_CICADA_INIT6
;
1300 if (mii_rw(dev
, np
->phyaddr
, MII_SREVISION
, phy_reserved
)) {
1301 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1305 if (np
->phy_oui
== PHY_OUI_VITESSE
) {
1306 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG1
, PHY_VITESSE_INIT1
)) {
1307 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1310 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG2
, PHY_VITESSE_INIT2
)) {
1311 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1314 phy_reserved
= mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG4
, MII_READ
);
1315 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG4
, phy_reserved
)) {
1316 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1319 phy_reserved
= mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG3
, MII_READ
);
1320 phy_reserved
&= ~PHY_VITESSE_INIT_MSK1
;
1321 phy_reserved
|= PHY_VITESSE_INIT3
;
1322 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG3
, phy_reserved
)) {
1323 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1326 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG2
, PHY_VITESSE_INIT4
)) {
1327 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1330 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG2
, PHY_VITESSE_INIT5
)) {
1331 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1334 phy_reserved
= mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG4
, MII_READ
);
1335 phy_reserved
&= ~PHY_VITESSE_INIT_MSK1
;
1336 phy_reserved
|= PHY_VITESSE_INIT3
;
1337 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG4
, phy_reserved
)) {
1338 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1341 phy_reserved
= mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG3
, MII_READ
);
1342 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG3
, phy_reserved
)) {
1343 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1346 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG2
, PHY_VITESSE_INIT6
)) {
1347 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1350 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG2
, PHY_VITESSE_INIT7
)) {
1351 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1354 phy_reserved
= mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG4
, MII_READ
);
1355 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG4
, phy_reserved
)) {
1356 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1359 phy_reserved
= mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG3
, MII_READ
);
1360 phy_reserved
&= ~PHY_VITESSE_INIT_MSK2
;
1361 phy_reserved
|= PHY_VITESSE_INIT8
;
1362 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG3
, phy_reserved
)) {
1363 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1366 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG2
, PHY_VITESSE_INIT9
)) {
1367 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1370 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG1
, PHY_VITESSE_INIT10
)) {
1371 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1375 if (np
->phy_oui
== PHY_OUI_REALTEK
) {
1376 if (np
->phy_model
== PHY_MODEL_REALTEK_8211
&&
1377 np
->phy_rev
== PHY_REV_REALTEK_8211B
) {
1378 /* reset could have cleared these out, set them back */
1379 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG1
, PHY_REALTEK_INIT1
)) {
1380 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1383 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG2
, PHY_REALTEK_INIT2
)) {
1384 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1387 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG1
, PHY_REALTEK_INIT3
)) {
1388 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1391 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG3
, PHY_REALTEK_INIT4
)) {
1392 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1395 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG4
, PHY_REALTEK_INIT5
)) {
1396 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1399 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG5
, PHY_REALTEK_INIT6
)) {
1400 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1403 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG1
, PHY_REALTEK_INIT1
)) {
1404 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1408 if (np
->phy_model
== PHY_MODEL_REALTEK_8201
) {
1409 if (np
->device_id
== PCI_DEVICE_ID_NVIDIA_NVENET_32
||
1410 np
->device_id
== PCI_DEVICE_ID_NVIDIA_NVENET_33
||
1411 np
->device_id
== PCI_DEVICE_ID_NVIDIA_NVENET_34
||
1412 np
->device_id
== PCI_DEVICE_ID_NVIDIA_NVENET_35
||
1413 np
->device_id
== PCI_DEVICE_ID_NVIDIA_NVENET_36
||
1414 np
->device_id
== PCI_DEVICE_ID_NVIDIA_NVENET_37
||
1415 np
->device_id
== PCI_DEVICE_ID_NVIDIA_NVENET_38
||
1416 np
->device_id
== PCI_DEVICE_ID_NVIDIA_NVENET_39
) {
1417 phy_reserved
= mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG6
, MII_READ
);
1418 phy_reserved
|= PHY_REALTEK_INIT7
;
1419 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG6
, phy_reserved
)) {
1420 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1424 if (phy_cross
== NV_CROSSOVER_DETECTION_DISABLED
) {
1425 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG1
, PHY_REALTEK_INIT3
)) {
1426 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1429 phy_reserved
= mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG2
, MII_READ
);
1430 phy_reserved
&= ~PHY_REALTEK_INIT_MSK1
;
1431 phy_reserved
|= PHY_REALTEK_INIT3
;
1432 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG2
, phy_reserved
)) {
1433 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1436 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG1
, PHY_REALTEK_INIT1
)) {
1437 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1444 /* some phys clear out pause advertisment on reset, set it back */
1445 mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, reg
);
1447 /* restart auto negotiation */
1448 mii_control
= mii_rw(dev
, np
->phyaddr
, MII_BMCR
, MII_READ
);
1449 mii_control
|= (BMCR_ANRESTART
| BMCR_ANENABLE
);
1450 if (mii_rw(dev
, np
->phyaddr
, MII_BMCR
, mii_control
)) {
1457 static void nv_start_rx(struct net_device
*dev
)
1459 struct fe_priv
*np
= netdev_priv(dev
);
1460 u8 __iomem
*base
= get_hwbase(dev
);
1461 u32 rx_ctrl
= readl(base
+ NvRegReceiverControl
);
1463 dprintk(KERN_DEBUG
"%s: nv_start_rx\n", dev
->name
);
1464 /* Already running? Stop it. */
1465 if ((readl(base
+ NvRegReceiverControl
) & NVREG_RCVCTL_START
) && !np
->mac_in_use
) {
1466 rx_ctrl
&= ~NVREG_RCVCTL_START
;
1467 writel(rx_ctrl
, base
+ NvRegReceiverControl
);
1470 writel(np
->linkspeed
, base
+ NvRegLinkSpeed
);
1472 rx_ctrl
|= NVREG_RCVCTL_START
;
1474 rx_ctrl
&= ~NVREG_RCVCTL_RX_PATH_EN
;
1475 writel(rx_ctrl
, base
+ NvRegReceiverControl
);
1476 dprintk(KERN_DEBUG
"%s: nv_start_rx to duplex %d, speed 0x%08x.\n",
1477 dev
->name
, np
->duplex
, np
->linkspeed
);
1481 static void nv_stop_rx(struct net_device
*dev
)
1483 struct fe_priv
*np
= netdev_priv(dev
);
1484 u8 __iomem
*base
= get_hwbase(dev
);
1485 u32 rx_ctrl
= readl(base
+ NvRegReceiverControl
);
1487 dprintk(KERN_DEBUG
"%s: nv_stop_rx\n", dev
->name
);
1488 if (!np
->mac_in_use
)
1489 rx_ctrl
&= ~NVREG_RCVCTL_START
;
1491 rx_ctrl
|= NVREG_RCVCTL_RX_PATH_EN
;
1492 writel(rx_ctrl
, base
+ NvRegReceiverControl
);
1493 reg_delay(dev
, NvRegReceiverStatus
, NVREG_RCVSTAT_BUSY
, 0,
1494 NV_RXSTOP_DELAY1
, NV_RXSTOP_DELAY1MAX
,
1495 KERN_INFO
"nv_stop_rx: ReceiverStatus remained busy");
1497 udelay(NV_RXSTOP_DELAY2
);
1498 if (!np
->mac_in_use
)
1499 writel(0, base
+ NvRegLinkSpeed
);
1502 static void nv_start_tx(struct net_device
*dev
)
1504 struct fe_priv
*np
= netdev_priv(dev
);
1505 u8 __iomem
*base
= get_hwbase(dev
);
1506 u32 tx_ctrl
= readl(base
+ NvRegTransmitterControl
);
1508 dprintk(KERN_DEBUG
"%s: nv_start_tx\n", dev
->name
);
1509 tx_ctrl
|= NVREG_XMITCTL_START
;
1511 tx_ctrl
&= ~NVREG_XMITCTL_TX_PATH_EN
;
1512 writel(tx_ctrl
, base
+ NvRegTransmitterControl
);
1516 static void nv_stop_tx(struct net_device
*dev
)
1518 struct fe_priv
*np
= netdev_priv(dev
);
1519 u8 __iomem
*base
= get_hwbase(dev
);
1520 u32 tx_ctrl
= readl(base
+ NvRegTransmitterControl
);
1522 dprintk(KERN_DEBUG
"%s: nv_stop_tx\n", dev
->name
);
1523 if (!np
->mac_in_use
)
1524 tx_ctrl
&= ~NVREG_XMITCTL_START
;
1526 tx_ctrl
|= NVREG_XMITCTL_TX_PATH_EN
;
1527 writel(tx_ctrl
, base
+ NvRegTransmitterControl
);
1528 reg_delay(dev
, NvRegTransmitterStatus
, NVREG_XMITSTAT_BUSY
, 0,
1529 NV_TXSTOP_DELAY1
, NV_TXSTOP_DELAY1MAX
,
1530 KERN_INFO
"nv_stop_tx: TransmitterStatus remained busy");
1532 udelay(NV_TXSTOP_DELAY2
);
1533 if (!np
->mac_in_use
)
1534 writel(readl(base
+ NvRegTransmitPoll
) & NVREG_TRANSMITPOLL_MAC_ADDR_REV
,
1535 base
+ NvRegTransmitPoll
);
1538 static void nv_start_rxtx(struct net_device
*dev
)
1544 static void nv_stop_rxtx(struct net_device
*dev
)
1550 static void nv_txrx_reset(struct net_device
*dev
)
1552 struct fe_priv
*np
= netdev_priv(dev
);
1553 u8 __iomem
*base
= get_hwbase(dev
);
1555 dprintk(KERN_DEBUG
"%s: nv_txrx_reset\n", dev
->name
);
1556 writel(NVREG_TXRXCTL_BIT2
| NVREG_TXRXCTL_RESET
| np
->txrxctl_bits
, base
+ NvRegTxRxControl
);
1558 udelay(NV_TXRX_RESET_DELAY
);
1559 writel(NVREG_TXRXCTL_BIT2
| np
->txrxctl_bits
, base
+ NvRegTxRxControl
);
1563 static void nv_mac_reset(struct net_device
*dev
)
1565 struct fe_priv
*np
= netdev_priv(dev
);
1566 u8 __iomem
*base
= get_hwbase(dev
);
1567 u32 temp1
, temp2
, temp3
;
1569 dprintk(KERN_DEBUG
"%s: nv_mac_reset\n", dev
->name
);
1571 writel(NVREG_TXRXCTL_BIT2
| NVREG_TXRXCTL_RESET
| np
->txrxctl_bits
, base
+ NvRegTxRxControl
);
1574 /* save registers since they will be cleared on reset */
1575 temp1
= readl(base
+ NvRegMacAddrA
);
1576 temp2
= readl(base
+ NvRegMacAddrB
);
1577 temp3
= readl(base
+ NvRegTransmitPoll
);
1579 writel(NVREG_MAC_RESET_ASSERT
, base
+ NvRegMacReset
);
1581 udelay(NV_MAC_RESET_DELAY
);
1582 writel(0, base
+ NvRegMacReset
);
1584 udelay(NV_MAC_RESET_DELAY
);
1586 /* restore saved registers */
1587 writel(temp1
, base
+ NvRegMacAddrA
);
1588 writel(temp2
, base
+ NvRegMacAddrB
);
1589 writel(temp3
, base
+ NvRegTransmitPoll
);
1591 writel(NVREG_TXRXCTL_BIT2
| np
->txrxctl_bits
, base
+ NvRegTxRxControl
);
1595 static void nv_get_hw_stats(struct net_device
*dev
)
1597 struct fe_priv
*np
= netdev_priv(dev
);
1598 u8 __iomem
*base
= get_hwbase(dev
);
1600 np
->estats
.tx_bytes
+= readl(base
+ NvRegTxCnt
);
1601 np
->estats
.tx_zero_rexmt
+= readl(base
+ NvRegTxZeroReXmt
);
1602 np
->estats
.tx_one_rexmt
+= readl(base
+ NvRegTxOneReXmt
);
1603 np
->estats
.tx_many_rexmt
+= readl(base
+ NvRegTxManyReXmt
);
1604 np
->estats
.tx_late_collision
+= readl(base
+ NvRegTxLateCol
);
1605 np
->estats
.tx_fifo_errors
+= readl(base
+ NvRegTxUnderflow
);
1606 np
->estats
.tx_carrier_errors
+= readl(base
+ NvRegTxLossCarrier
);
1607 np
->estats
.tx_excess_deferral
+= readl(base
+ NvRegTxExcessDef
);
1608 np
->estats
.tx_retry_error
+= readl(base
+ NvRegTxRetryErr
);
1609 np
->estats
.rx_frame_error
+= readl(base
+ NvRegRxFrameErr
);
1610 np
->estats
.rx_extra_byte
+= readl(base
+ NvRegRxExtraByte
);
1611 np
->estats
.rx_late_collision
+= readl(base
+ NvRegRxLateCol
);
1612 np
->estats
.rx_runt
+= readl(base
+ NvRegRxRunt
);
1613 np
->estats
.rx_frame_too_long
+= readl(base
+ NvRegRxFrameTooLong
);
1614 np
->estats
.rx_over_errors
+= readl(base
+ NvRegRxOverflow
);
1615 np
->estats
.rx_crc_errors
+= readl(base
+ NvRegRxFCSErr
);
1616 np
->estats
.rx_frame_align_error
+= readl(base
+ NvRegRxFrameAlignErr
);
1617 np
->estats
.rx_length_error
+= readl(base
+ NvRegRxLenErr
);
1618 np
->estats
.rx_unicast
+= readl(base
+ NvRegRxUnicast
);
1619 np
->estats
.rx_multicast
+= readl(base
+ NvRegRxMulticast
);
1620 np
->estats
.rx_broadcast
+= readl(base
+ NvRegRxBroadcast
);
1621 np
->estats
.rx_packets
=
1622 np
->estats
.rx_unicast
+
1623 np
->estats
.rx_multicast
+
1624 np
->estats
.rx_broadcast
;
1625 np
->estats
.rx_errors_total
=
1626 np
->estats
.rx_crc_errors
+
1627 np
->estats
.rx_over_errors
+
1628 np
->estats
.rx_frame_error
+
1629 (np
->estats
.rx_frame_align_error
- np
->estats
.rx_extra_byte
) +
1630 np
->estats
.rx_late_collision
+
1631 np
->estats
.rx_runt
+
1632 np
->estats
.rx_frame_too_long
;
1633 np
->estats
.tx_errors_total
=
1634 np
->estats
.tx_late_collision
+
1635 np
->estats
.tx_fifo_errors
+
1636 np
->estats
.tx_carrier_errors
+
1637 np
->estats
.tx_excess_deferral
+
1638 np
->estats
.tx_retry_error
;
1640 if (np
->driver_data
& DEV_HAS_STATISTICS_V2
) {
1641 np
->estats
.tx_deferral
+= readl(base
+ NvRegTxDef
);
1642 np
->estats
.tx_packets
+= readl(base
+ NvRegTxFrame
);
1643 np
->estats
.rx_bytes
+= readl(base
+ NvRegRxCnt
);
1644 np
->estats
.tx_pause
+= readl(base
+ NvRegTxPause
);
1645 np
->estats
.rx_pause
+= readl(base
+ NvRegRxPause
);
1646 np
->estats
.rx_drop_frame
+= readl(base
+ NvRegRxDropFrame
);
1649 if (np
->driver_data
& DEV_HAS_STATISTICS_V3
) {
1650 np
->estats
.tx_unicast
+= readl(base
+ NvRegTxUnicast
);
1651 np
->estats
.tx_multicast
+= readl(base
+ NvRegTxMulticast
);
1652 np
->estats
.tx_broadcast
+= readl(base
+ NvRegTxBroadcast
);
1657 * nv_get_stats: dev->get_stats function
1658 * Get latest stats value from the nic.
1659 * Called with read_lock(&dev_base_lock) held for read -
1660 * only synchronized against unregister_netdevice.
1662 static struct net_device_stats
*nv_get_stats(struct net_device
*dev
)
1664 struct fe_priv
*np
= netdev_priv(dev
);
1666 /* If the nic supports hw counters then retrieve latest values */
1667 if (np
->driver_data
& (DEV_HAS_STATISTICS_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_STATISTICS_V3
)) {
1668 nv_get_hw_stats(dev
);
1670 /* copy to net_device stats */
1671 dev
->stats
.tx_bytes
= np
->estats
.tx_bytes
;
1672 dev
->stats
.tx_fifo_errors
= np
->estats
.tx_fifo_errors
;
1673 dev
->stats
.tx_carrier_errors
= np
->estats
.tx_carrier_errors
;
1674 dev
->stats
.rx_crc_errors
= np
->estats
.rx_crc_errors
;
1675 dev
->stats
.rx_over_errors
= np
->estats
.rx_over_errors
;
1676 dev
->stats
.rx_errors
= np
->estats
.rx_errors_total
;
1677 dev
->stats
.tx_errors
= np
->estats
.tx_errors_total
;
1684 * nv_alloc_rx: fill rx ring entries.
1685 * Return 1 if the allocations for the skbs failed and the
1686 * rx engine is without Available descriptors
1688 static int nv_alloc_rx(struct net_device
*dev
)
1690 struct fe_priv
*np
= netdev_priv(dev
);
1691 struct ring_desc
* less_rx
;
1693 less_rx
= np
->get_rx
.orig
;
1694 if (less_rx
-- == np
->first_rx
.orig
)
1695 less_rx
= np
->last_rx
.orig
;
1697 while (np
->put_rx
.orig
!= less_rx
) {
1698 struct sk_buff
*skb
= dev_alloc_skb(np
->rx_buf_sz
+ NV_RX_ALLOC_PAD
);
1700 np
->put_rx_ctx
->skb
= skb
;
1701 np
->put_rx_ctx
->dma
= pci_map_single(np
->pci_dev
,
1704 PCI_DMA_FROMDEVICE
);
1705 np
->put_rx_ctx
->dma_len
= skb_tailroom(skb
);
1706 np
->put_rx
.orig
->buf
= cpu_to_le32(np
->put_rx_ctx
->dma
);
1708 np
->put_rx
.orig
->flaglen
= cpu_to_le32(np
->rx_buf_sz
| NV_RX_AVAIL
);
1709 if (unlikely(np
->put_rx
.orig
++ == np
->last_rx
.orig
))
1710 np
->put_rx
.orig
= np
->first_rx
.orig
;
1711 if (unlikely(np
->put_rx_ctx
++ == np
->last_rx_ctx
))
1712 np
->put_rx_ctx
= np
->first_rx_ctx
;
1720 static int nv_alloc_rx_optimized(struct net_device
*dev
)
1722 struct fe_priv
*np
= netdev_priv(dev
);
1723 struct ring_desc_ex
* less_rx
;
1725 less_rx
= np
->get_rx
.ex
;
1726 if (less_rx
-- == np
->first_rx
.ex
)
1727 less_rx
= np
->last_rx
.ex
;
1729 while (np
->put_rx
.ex
!= less_rx
) {
1730 struct sk_buff
*skb
= dev_alloc_skb(np
->rx_buf_sz
+ NV_RX_ALLOC_PAD
);
1732 np
->put_rx_ctx
->skb
= skb
;
1733 np
->put_rx_ctx
->dma
= pci_map_single(np
->pci_dev
,
1736 PCI_DMA_FROMDEVICE
);
1737 np
->put_rx_ctx
->dma_len
= skb_tailroom(skb
);
1738 np
->put_rx
.ex
->bufhigh
= cpu_to_le32(dma_high(np
->put_rx_ctx
->dma
));
1739 np
->put_rx
.ex
->buflow
= cpu_to_le32(dma_low(np
->put_rx_ctx
->dma
));
1741 np
->put_rx
.ex
->flaglen
= cpu_to_le32(np
->rx_buf_sz
| NV_RX2_AVAIL
);
1742 if (unlikely(np
->put_rx
.ex
++ == np
->last_rx
.ex
))
1743 np
->put_rx
.ex
= np
->first_rx
.ex
;
1744 if (unlikely(np
->put_rx_ctx
++ == np
->last_rx_ctx
))
1745 np
->put_rx_ctx
= np
->first_rx_ctx
;
1753 /* If rx bufs are exhausted called after 50ms to attempt to refresh */
1754 #ifdef CONFIG_FORCEDETH_NAPI
1755 static void nv_do_rx_refill(unsigned long data
)
1757 struct net_device
*dev
= (struct net_device
*) data
;
1758 struct fe_priv
*np
= netdev_priv(dev
);
1760 /* Just reschedule NAPI rx processing */
1761 netif_rx_schedule(dev
, &np
->napi
);
1764 static void nv_do_rx_refill(unsigned long data
)
1766 struct net_device
*dev
= (struct net_device
*) data
;
1767 struct fe_priv
*np
= netdev_priv(dev
);
1770 if (!using_multi_irqs(dev
)) {
1771 if (np
->msi_flags
& NV_MSI_X_ENABLED
)
1772 disable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_ALL
].vector
);
1774 disable_irq(np
->pci_dev
->irq
);
1776 disable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_RX
].vector
);
1778 if (!nv_optimized(np
))
1779 retcode
= nv_alloc_rx(dev
);
1781 retcode
= nv_alloc_rx_optimized(dev
);
1783 spin_lock_irq(&np
->lock
);
1784 if (!np
->in_shutdown
)
1785 mod_timer(&np
->oom_kick
, jiffies
+ OOM_REFILL
);
1786 spin_unlock_irq(&np
->lock
);
1788 if (!using_multi_irqs(dev
)) {
1789 if (np
->msi_flags
& NV_MSI_X_ENABLED
)
1790 enable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_ALL
].vector
);
1792 enable_irq(np
->pci_dev
->irq
);
1794 enable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_RX
].vector
);
1799 static void nv_init_rx(struct net_device
*dev
)
1801 struct fe_priv
*np
= netdev_priv(dev
);
1804 np
->get_rx
= np
->put_rx
= np
->first_rx
= np
->rx_ring
;
1806 if (!nv_optimized(np
))
1807 np
->last_rx
.orig
= &np
->rx_ring
.orig
[np
->rx_ring_size
-1];
1809 np
->last_rx
.ex
= &np
->rx_ring
.ex
[np
->rx_ring_size
-1];
1810 np
->get_rx_ctx
= np
->put_rx_ctx
= np
->first_rx_ctx
= np
->rx_skb
;
1811 np
->last_rx_ctx
= &np
->rx_skb
[np
->rx_ring_size
-1];
1813 for (i
= 0; i
< np
->rx_ring_size
; i
++) {
1814 if (!nv_optimized(np
)) {
1815 np
->rx_ring
.orig
[i
].flaglen
= 0;
1816 np
->rx_ring
.orig
[i
].buf
= 0;
1818 np
->rx_ring
.ex
[i
].flaglen
= 0;
1819 np
->rx_ring
.ex
[i
].txvlan
= 0;
1820 np
->rx_ring
.ex
[i
].bufhigh
= 0;
1821 np
->rx_ring
.ex
[i
].buflow
= 0;
1823 np
->rx_skb
[i
].skb
= NULL
;
1824 np
->rx_skb
[i
].dma
= 0;
1828 static void nv_init_tx(struct net_device
*dev
)
1830 struct fe_priv
*np
= netdev_priv(dev
);
1833 np
->get_tx
= np
->put_tx
= np
->first_tx
= np
->tx_ring
;
1835 if (!nv_optimized(np
))
1836 np
->last_tx
.orig
= &np
->tx_ring
.orig
[np
->tx_ring_size
-1];
1838 np
->last_tx
.ex
= &np
->tx_ring
.ex
[np
->tx_ring_size
-1];
1839 np
->get_tx_ctx
= np
->put_tx_ctx
= np
->first_tx_ctx
= np
->tx_skb
;
1840 np
->last_tx_ctx
= &np
->tx_skb
[np
->tx_ring_size
-1];
1841 np
->tx_pkts_in_progress
= 0;
1842 np
->tx_change_owner
= NULL
;
1843 np
->tx_end_flip
= NULL
;
1845 for (i
= 0; i
< np
->tx_ring_size
; i
++) {
1846 if (!nv_optimized(np
)) {
1847 np
->tx_ring
.orig
[i
].flaglen
= 0;
1848 np
->tx_ring
.orig
[i
].buf
= 0;
1850 np
->tx_ring
.ex
[i
].flaglen
= 0;
1851 np
->tx_ring
.ex
[i
].txvlan
= 0;
1852 np
->tx_ring
.ex
[i
].bufhigh
= 0;
1853 np
->tx_ring
.ex
[i
].buflow
= 0;
1855 np
->tx_skb
[i
].skb
= NULL
;
1856 np
->tx_skb
[i
].dma
= 0;
1857 np
->tx_skb
[i
].dma_len
= 0;
1858 np
->tx_skb
[i
].first_tx_desc
= NULL
;
1859 np
->tx_skb
[i
].next_tx_ctx
= NULL
;
1863 static int nv_init_ring(struct net_device
*dev
)
1865 struct fe_priv
*np
= netdev_priv(dev
);
1870 if (!nv_optimized(np
))
1871 return nv_alloc_rx(dev
);
1873 return nv_alloc_rx_optimized(dev
);
1876 static int nv_release_txskb(struct net_device
*dev
, struct nv_skb_map
* tx_skb
)
1878 struct fe_priv
*np
= netdev_priv(dev
);
1881 pci_unmap_page(np
->pci_dev
, tx_skb
->dma
,
1887 dev_kfree_skb_any(tx_skb
->skb
);
1895 static void nv_drain_tx(struct net_device
*dev
)
1897 struct fe_priv
*np
= netdev_priv(dev
);
1900 for (i
= 0; i
< np
->tx_ring_size
; i
++) {
1901 if (!nv_optimized(np
)) {
1902 np
->tx_ring
.orig
[i
].flaglen
= 0;
1903 np
->tx_ring
.orig
[i
].buf
= 0;
1905 np
->tx_ring
.ex
[i
].flaglen
= 0;
1906 np
->tx_ring
.ex
[i
].txvlan
= 0;
1907 np
->tx_ring
.ex
[i
].bufhigh
= 0;
1908 np
->tx_ring
.ex
[i
].buflow
= 0;
1910 if (nv_release_txskb(dev
, &np
->tx_skb
[i
]))
1911 dev
->stats
.tx_dropped
++;
1912 np
->tx_skb
[i
].dma
= 0;
1913 np
->tx_skb
[i
].dma_len
= 0;
1914 np
->tx_skb
[i
].first_tx_desc
= NULL
;
1915 np
->tx_skb
[i
].next_tx_ctx
= NULL
;
1917 np
->tx_pkts_in_progress
= 0;
1918 np
->tx_change_owner
= NULL
;
1919 np
->tx_end_flip
= NULL
;
1922 static void nv_drain_rx(struct net_device
*dev
)
1924 struct fe_priv
*np
= netdev_priv(dev
);
1927 for (i
= 0; i
< np
->rx_ring_size
; i
++) {
1928 if (!nv_optimized(np
)) {
1929 np
->rx_ring
.orig
[i
].flaglen
= 0;
1930 np
->rx_ring
.orig
[i
].buf
= 0;
1932 np
->rx_ring
.ex
[i
].flaglen
= 0;
1933 np
->rx_ring
.ex
[i
].txvlan
= 0;
1934 np
->rx_ring
.ex
[i
].bufhigh
= 0;
1935 np
->rx_ring
.ex
[i
].buflow
= 0;
1938 if (np
->rx_skb
[i
].skb
) {
1939 pci_unmap_single(np
->pci_dev
, np
->rx_skb
[i
].dma
,
1940 (skb_end_pointer(np
->rx_skb
[i
].skb
) -
1941 np
->rx_skb
[i
].skb
->data
),
1942 PCI_DMA_FROMDEVICE
);
1943 dev_kfree_skb(np
->rx_skb
[i
].skb
);
1944 np
->rx_skb
[i
].skb
= NULL
;
1949 static void nv_drain_rxtx(struct net_device
*dev
)
1955 static inline u32
nv_get_empty_tx_slots(struct fe_priv
*np
)
1957 return (u32
)(np
->tx_ring_size
- ((np
->tx_ring_size
+ (np
->put_tx_ctx
- np
->get_tx_ctx
)) % np
->tx_ring_size
));
1960 static void nv_legacybackoff_reseed(struct net_device
*dev
)
1962 u8 __iomem
*base
= get_hwbase(dev
);
1967 reg
= readl(base
+ NvRegSlotTime
) & ~NVREG_SLOTTIME_MASK
;
1968 get_random_bytes(&low
, sizeof(low
));
1969 reg
|= low
& NVREG_SLOTTIME_MASK
;
1971 /* Need to stop tx before change takes effect.
1972 * Caller has already gained np->lock.
1974 tx_status
= readl(base
+ NvRegTransmitterControl
) & NVREG_XMITCTL_START
;
1978 writel(reg
, base
+ NvRegSlotTime
);
1984 /* Gear Backoff Seeds */
1985 #define BACKOFF_SEEDSET_ROWS 8
1986 #define BACKOFF_SEEDSET_LFSRS 15
1988 /* Known Good seed sets */
1989 static const u32 main_seedset
[BACKOFF_SEEDSET_ROWS
][BACKOFF_SEEDSET_LFSRS
] = {
1990 {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
1991 {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 385, 761, 790, 974},
1992 {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
1993 {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 386, 761, 790, 974},
1994 {266, 265, 276, 585, 397, 208, 345, 355, 365, 376, 385, 396, 771, 700, 984},
1995 {266, 265, 276, 586, 397, 208, 346, 355, 365, 376, 285, 396, 771, 700, 984},
1996 {366, 365, 376, 686, 497, 308, 447, 455, 466, 476, 485, 496, 871, 800, 84},
1997 {466, 465, 476, 786, 597, 408, 547, 555, 566, 576, 585, 597, 971, 900, 184}};
1999 static const u32 gear_seedset
[BACKOFF_SEEDSET_ROWS
][BACKOFF_SEEDSET_LFSRS
] = {
2000 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
2001 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
2002 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 397},
2003 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
2004 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
2005 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
2006 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
2007 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395}};
2009 static void nv_gear_backoff_reseed(struct net_device
*dev
)
2011 u8 __iomem
*base
= get_hwbase(dev
);
2012 u32 miniseed1
, miniseed2
, miniseed2_reversed
, miniseed3
, miniseed3_reversed
;
2013 u32 temp
, seedset
, combinedSeed
;
2016 /* Setup seed for free running LFSR */
2017 /* We are going to read the time stamp counter 3 times
2018 and swizzle bits around to increase randomness */
2019 get_random_bytes(&miniseed1
, sizeof(miniseed1
));
2020 miniseed1
&= 0x0fff;
2024 get_random_bytes(&miniseed2
, sizeof(miniseed2
));
2025 miniseed2
&= 0x0fff;
2028 miniseed2_reversed
=
2029 ((miniseed2
& 0xF00) >> 8) |
2030 (miniseed2
& 0x0F0) |
2031 ((miniseed2
& 0x00F) << 8);
2033 get_random_bytes(&miniseed3
, sizeof(miniseed3
));
2034 miniseed3
&= 0x0fff;
2037 miniseed3_reversed
=
2038 ((miniseed3
& 0xF00) >> 8) |
2039 (miniseed3
& 0x0F0) |
2040 ((miniseed3
& 0x00F) << 8);
2042 combinedSeed
= ((miniseed1
^ miniseed2_reversed
) << 12) |
2043 (miniseed2
^ miniseed3_reversed
);
2045 /* Seeds can not be zero */
2046 if ((combinedSeed
& NVREG_BKOFFCTRL_SEED_MASK
) == 0)
2047 combinedSeed
|= 0x08;
2048 if ((combinedSeed
& (NVREG_BKOFFCTRL_SEED_MASK
<< NVREG_BKOFFCTRL_GEAR
)) == 0)
2049 combinedSeed
|= 0x8000;
2051 /* No need to disable tx here */
2052 temp
= NVREG_BKOFFCTRL_DEFAULT
| (0 << NVREG_BKOFFCTRL_SELECT
);
2053 temp
|= combinedSeed
& NVREG_BKOFFCTRL_SEED_MASK
;
2054 temp
|= combinedSeed
>> NVREG_BKOFFCTRL_GEAR
;
2055 writel(temp
,base
+ NvRegBackOffControl
);
2057 /* Setup seeds for all gear LFSRs. */
2058 get_random_bytes(&seedset
, sizeof(seedset
));
2059 seedset
= seedset
% BACKOFF_SEEDSET_ROWS
;
2060 for (i
= 1; i
<= BACKOFF_SEEDSET_LFSRS
; i
++)
2062 temp
= NVREG_BKOFFCTRL_DEFAULT
| (i
<< NVREG_BKOFFCTRL_SELECT
);
2063 temp
|= main_seedset
[seedset
][i
-1] & 0x3ff;
2064 temp
|= ((gear_seedset
[seedset
][i
-1] & 0x3ff) << NVREG_BKOFFCTRL_GEAR
);
2065 writel(temp
, base
+ NvRegBackOffControl
);
2070 * nv_start_xmit: dev->hard_start_xmit function
2071 * Called with netif_tx_lock held.
2073 static int nv_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
2075 struct fe_priv
*np
= netdev_priv(dev
);
2077 u32 tx_flags_extra
= (np
->desc_ver
== DESC_VER_1
? NV_TX_LASTPACKET
: NV_TX2_LASTPACKET
);
2078 unsigned int fragments
= skb_shinfo(skb
)->nr_frags
;
2082 u32 size
= skb
->len
-skb
->data_len
;
2083 u32 entries
= (size
>> NV_TX2_TSO_MAX_SHIFT
) + ((size
& (NV_TX2_TSO_MAX_SIZE
-1)) ? 1 : 0);
2085 struct ring_desc
* put_tx
;
2086 struct ring_desc
* start_tx
;
2087 struct ring_desc
* prev_tx
;
2088 struct nv_skb_map
* prev_tx_ctx
;
2089 unsigned long flags
;
2091 /* add fragments to entries count */
2092 for (i
= 0; i
< fragments
; i
++) {
2093 entries
+= (skb_shinfo(skb
)->frags
[i
].size
>> NV_TX2_TSO_MAX_SHIFT
) +
2094 ((skb_shinfo(skb
)->frags
[i
].size
& (NV_TX2_TSO_MAX_SIZE
-1)) ? 1 : 0);
2097 empty_slots
= nv_get_empty_tx_slots(np
);
2098 if (unlikely(empty_slots
<= entries
)) {
2099 spin_lock_irqsave(&np
->lock
, flags
);
2100 netif_stop_queue(dev
);
2102 spin_unlock_irqrestore(&np
->lock
, flags
);
2103 return NETDEV_TX_BUSY
;
2106 start_tx
= put_tx
= np
->put_tx
.orig
;
2108 /* setup the header buffer */
2111 prev_tx_ctx
= np
->put_tx_ctx
;
2112 bcnt
= (size
> NV_TX2_TSO_MAX_SIZE
) ? NV_TX2_TSO_MAX_SIZE
: size
;
2113 np
->put_tx_ctx
->dma
= pci_map_single(np
->pci_dev
, skb
->data
+ offset
, bcnt
,
2115 np
->put_tx_ctx
->dma_len
= bcnt
;
2116 put_tx
->buf
= cpu_to_le32(np
->put_tx_ctx
->dma
);
2117 put_tx
->flaglen
= cpu_to_le32((bcnt
-1) | tx_flags
);
2119 tx_flags
= np
->tx_flags
;
2122 if (unlikely(put_tx
++ == np
->last_tx
.orig
))
2123 put_tx
= np
->first_tx
.orig
;
2124 if (unlikely(np
->put_tx_ctx
++ == np
->last_tx_ctx
))
2125 np
->put_tx_ctx
= np
->first_tx_ctx
;
2128 /* setup the fragments */
2129 for (i
= 0; i
< fragments
; i
++) {
2130 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
2131 u32 size
= frag
->size
;
2136 prev_tx_ctx
= np
->put_tx_ctx
;
2137 bcnt
= (size
> NV_TX2_TSO_MAX_SIZE
) ? NV_TX2_TSO_MAX_SIZE
: size
;
2138 np
->put_tx_ctx
->dma
= pci_map_page(np
->pci_dev
, frag
->page
, frag
->page_offset
+offset
, bcnt
,
2140 np
->put_tx_ctx
->dma_len
= bcnt
;
2141 put_tx
->buf
= cpu_to_le32(np
->put_tx_ctx
->dma
);
2142 put_tx
->flaglen
= cpu_to_le32((bcnt
-1) | tx_flags
);
2146 if (unlikely(put_tx
++ == np
->last_tx
.orig
))
2147 put_tx
= np
->first_tx
.orig
;
2148 if (unlikely(np
->put_tx_ctx
++ == np
->last_tx_ctx
))
2149 np
->put_tx_ctx
= np
->first_tx_ctx
;
2153 /* set last fragment flag */
2154 prev_tx
->flaglen
|= cpu_to_le32(tx_flags_extra
);
2156 /* save skb in this slot's context area */
2157 prev_tx_ctx
->skb
= skb
;
2159 if (skb_is_gso(skb
))
2160 tx_flags_extra
= NV_TX2_TSO
| (skb_shinfo(skb
)->gso_size
<< NV_TX2_TSO_SHIFT
);
2162 tx_flags_extra
= skb
->ip_summed
== CHECKSUM_PARTIAL
?
2163 NV_TX2_CHECKSUM_L3
| NV_TX2_CHECKSUM_L4
: 0;
2165 spin_lock_irqsave(&np
->lock
, flags
);
2168 start_tx
->flaglen
|= cpu_to_le32(tx_flags
| tx_flags_extra
);
2169 np
->put_tx
.orig
= put_tx
;
2171 spin_unlock_irqrestore(&np
->lock
, flags
);
2173 dprintk(KERN_DEBUG
"%s: nv_start_xmit: entries %d queued for transmission. tx_flags_extra: %x\n",
2174 dev
->name
, entries
, tx_flags_extra
);
2177 for (j
=0; j
<64; j
++) {
2179 dprintk("\n%03x:", j
);
2180 dprintk(" %02x", ((unsigned char*)skb
->data
)[j
]);
2185 dev
->trans_start
= jiffies
;
2186 writel(NVREG_TXRXCTL_KICK
|np
->txrxctl_bits
, get_hwbase(dev
) + NvRegTxRxControl
);
2187 return NETDEV_TX_OK
;
2190 static int nv_start_xmit_optimized(struct sk_buff
*skb
, struct net_device
*dev
)
2192 struct fe_priv
*np
= netdev_priv(dev
);
2195 unsigned int fragments
= skb_shinfo(skb
)->nr_frags
;
2199 u32 size
= skb
->len
-skb
->data_len
;
2200 u32 entries
= (size
>> NV_TX2_TSO_MAX_SHIFT
) + ((size
& (NV_TX2_TSO_MAX_SIZE
-1)) ? 1 : 0);
2202 struct ring_desc_ex
* put_tx
;
2203 struct ring_desc_ex
* start_tx
;
2204 struct ring_desc_ex
* prev_tx
;
2205 struct nv_skb_map
* prev_tx_ctx
;
2206 struct nv_skb_map
* start_tx_ctx
;
2207 unsigned long flags
;
2209 /* add fragments to entries count */
2210 for (i
= 0; i
< fragments
; i
++) {
2211 entries
+= (skb_shinfo(skb
)->frags
[i
].size
>> NV_TX2_TSO_MAX_SHIFT
) +
2212 ((skb_shinfo(skb
)->frags
[i
].size
& (NV_TX2_TSO_MAX_SIZE
-1)) ? 1 : 0);
2215 empty_slots
= nv_get_empty_tx_slots(np
);
2216 if (unlikely(empty_slots
<= entries
)) {
2217 spin_lock_irqsave(&np
->lock
, flags
);
2218 netif_stop_queue(dev
);
2220 spin_unlock_irqrestore(&np
->lock
, flags
);
2221 return NETDEV_TX_BUSY
;
2224 start_tx
= put_tx
= np
->put_tx
.ex
;
2225 start_tx_ctx
= np
->put_tx_ctx
;
2227 /* setup the header buffer */
2230 prev_tx_ctx
= np
->put_tx_ctx
;
2231 bcnt
= (size
> NV_TX2_TSO_MAX_SIZE
) ? NV_TX2_TSO_MAX_SIZE
: size
;
2232 np
->put_tx_ctx
->dma
= pci_map_single(np
->pci_dev
, skb
->data
+ offset
, bcnt
,
2234 np
->put_tx_ctx
->dma_len
= bcnt
;
2235 put_tx
->bufhigh
= cpu_to_le32(dma_high(np
->put_tx_ctx
->dma
));
2236 put_tx
->buflow
= cpu_to_le32(dma_low(np
->put_tx_ctx
->dma
));
2237 put_tx
->flaglen
= cpu_to_le32((bcnt
-1) | tx_flags
);
2239 tx_flags
= NV_TX2_VALID
;
2242 if (unlikely(put_tx
++ == np
->last_tx
.ex
))
2243 put_tx
= np
->first_tx
.ex
;
2244 if (unlikely(np
->put_tx_ctx
++ == np
->last_tx_ctx
))
2245 np
->put_tx_ctx
= np
->first_tx_ctx
;
2248 /* setup the fragments */
2249 for (i
= 0; i
< fragments
; i
++) {
2250 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
2251 u32 size
= frag
->size
;
2256 prev_tx_ctx
= np
->put_tx_ctx
;
2257 bcnt
= (size
> NV_TX2_TSO_MAX_SIZE
) ? NV_TX2_TSO_MAX_SIZE
: size
;
2258 np
->put_tx_ctx
->dma
= pci_map_page(np
->pci_dev
, frag
->page
, frag
->page_offset
+offset
, bcnt
,
2260 np
->put_tx_ctx
->dma_len
= bcnt
;
2261 put_tx
->bufhigh
= cpu_to_le32(dma_high(np
->put_tx_ctx
->dma
));
2262 put_tx
->buflow
= cpu_to_le32(dma_low(np
->put_tx_ctx
->dma
));
2263 put_tx
->flaglen
= cpu_to_le32((bcnt
-1) | tx_flags
);
2267 if (unlikely(put_tx
++ == np
->last_tx
.ex
))
2268 put_tx
= np
->first_tx
.ex
;
2269 if (unlikely(np
->put_tx_ctx
++ == np
->last_tx_ctx
))
2270 np
->put_tx_ctx
= np
->first_tx_ctx
;
2274 /* set last fragment flag */
2275 prev_tx
->flaglen
|= cpu_to_le32(NV_TX2_LASTPACKET
);
2277 /* save skb in this slot's context area */
2278 prev_tx_ctx
->skb
= skb
;
2280 if (skb_is_gso(skb
))
2281 tx_flags_extra
= NV_TX2_TSO
| (skb_shinfo(skb
)->gso_size
<< NV_TX2_TSO_SHIFT
);
2283 tx_flags_extra
= skb
->ip_summed
== CHECKSUM_PARTIAL
?
2284 NV_TX2_CHECKSUM_L3
| NV_TX2_CHECKSUM_L4
: 0;
2287 if (likely(!np
->vlangrp
)) {
2288 start_tx
->txvlan
= 0;
2290 if (vlan_tx_tag_present(skb
))
2291 start_tx
->txvlan
= cpu_to_le32(NV_TX3_VLAN_TAG_PRESENT
| vlan_tx_tag_get(skb
));
2293 start_tx
->txvlan
= 0;
2296 spin_lock_irqsave(&np
->lock
, flags
);
2299 /* Limit the number of outstanding tx. Setup all fragments, but
2300 * do not set the VALID bit on the first descriptor. Save a pointer
2301 * to that descriptor and also for next skb_map element.
2304 if (np
->tx_pkts_in_progress
== NV_TX_LIMIT_COUNT
) {
2305 if (!np
->tx_change_owner
)
2306 np
->tx_change_owner
= start_tx_ctx
;
2308 /* remove VALID bit */
2309 tx_flags
&= ~NV_TX2_VALID
;
2310 start_tx_ctx
->first_tx_desc
= start_tx
;
2311 start_tx_ctx
->next_tx_ctx
= np
->put_tx_ctx
;
2312 np
->tx_end_flip
= np
->put_tx_ctx
;
2314 np
->tx_pkts_in_progress
++;
2319 start_tx
->flaglen
|= cpu_to_le32(tx_flags
| tx_flags_extra
);
2320 np
->put_tx
.ex
= put_tx
;
2322 spin_unlock_irqrestore(&np
->lock
, flags
);
2324 dprintk(KERN_DEBUG
"%s: nv_start_xmit_optimized: entries %d queued for transmission. tx_flags_extra: %x\n",
2325 dev
->name
, entries
, tx_flags_extra
);
2328 for (j
=0; j
<64; j
++) {
2330 dprintk("\n%03x:", j
);
2331 dprintk(" %02x", ((unsigned char*)skb
->data
)[j
]);
2336 dev
->trans_start
= jiffies
;
2337 writel(NVREG_TXRXCTL_KICK
|np
->txrxctl_bits
, get_hwbase(dev
) + NvRegTxRxControl
);
2338 return NETDEV_TX_OK
;
2341 static inline void nv_tx_flip_ownership(struct net_device
*dev
)
2343 struct fe_priv
*np
= netdev_priv(dev
);
2345 np
->tx_pkts_in_progress
--;
2346 if (np
->tx_change_owner
) {
2347 np
->tx_change_owner
->first_tx_desc
->flaglen
|=
2348 cpu_to_le32(NV_TX2_VALID
);
2349 np
->tx_pkts_in_progress
++;
2351 np
->tx_change_owner
= np
->tx_change_owner
->next_tx_ctx
;
2352 if (np
->tx_change_owner
== np
->tx_end_flip
)
2353 np
->tx_change_owner
= NULL
;
2355 writel(NVREG_TXRXCTL_KICK
|np
->txrxctl_bits
, get_hwbase(dev
) + NvRegTxRxControl
);
2360 * nv_tx_done: check for completed packets, release the skbs.
2362 * Caller must own np->lock.
2364 static void nv_tx_done(struct net_device
*dev
)
2366 struct fe_priv
*np
= netdev_priv(dev
);
2368 struct ring_desc
* orig_get_tx
= np
->get_tx
.orig
;
2370 while ((np
->get_tx
.orig
!= np
->put_tx
.orig
) &&
2371 !((flags
= le32_to_cpu(np
->get_tx
.orig
->flaglen
)) & NV_TX_VALID
)) {
2373 dprintk(KERN_DEBUG
"%s: nv_tx_done: flags 0x%x.\n",
2376 pci_unmap_page(np
->pci_dev
, np
->get_tx_ctx
->dma
,
2377 np
->get_tx_ctx
->dma_len
,
2379 np
->get_tx_ctx
->dma
= 0;
2381 if (np
->desc_ver
== DESC_VER_1
) {
2382 if (flags
& NV_TX_LASTPACKET
) {
2383 if (flags
& NV_TX_ERROR
) {
2384 if (flags
& NV_TX_UNDERFLOW
)
2385 dev
->stats
.tx_fifo_errors
++;
2386 if (flags
& NV_TX_CARRIERLOST
)
2387 dev
->stats
.tx_carrier_errors
++;
2388 if ((flags
& NV_TX_RETRYERROR
) && !(flags
& NV_TX_RETRYCOUNT_MASK
))
2389 nv_legacybackoff_reseed(dev
);
2390 dev
->stats
.tx_errors
++;
2392 dev
->stats
.tx_packets
++;
2393 dev
->stats
.tx_bytes
+= np
->get_tx_ctx
->skb
->len
;
2395 dev_kfree_skb_any(np
->get_tx_ctx
->skb
);
2396 np
->get_tx_ctx
->skb
= NULL
;
2399 if (flags
& NV_TX2_LASTPACKET
) {
2400 if (flags
& NV_TX2_ERROR
) {
2401 if (flags
& NV_TX2_UNDERFLOW
)
2402 dev
->stats
.tx_fifo_errors
++;
2403 if (flags
& NV_TX2_CARRIERLOST
)
2404 dev
->stats
.tx_carrier_errors
++;
2405 if ((flags
& NV_TX2_RETRYERROR
) && !(flags
& NV_TX2_RETRYCOUNT_MASK
))
2406 nv_legacybackoff_reseed(dev
);
2407 dev
->stats
.tx_errors
++;
2409 dev
->stats
.tx_packets
++;
2410 dev
->stats
.tx_bytes
+= np
->get_tx_ctx
->skb
->len
;
2412 dev_kfree_skb_any(np
->get_tx_ctx
->skb
);
2413 np
->get_tx_ctx
->skb
= NULL
;
2416 if (unlikely(np
->get_tx
.orig
++ == np
->last_tx
.orig
))
2417 np
->get_tx
.orig
= np
->first_tx
.orig
;
2418 if (unlikely(np
->get_tx_ctx
++ == np
->last_tx_ctx
))
2419 np
->get_tx_ctx
= np
->first_tx_ctx
;
2421 if (unlikely((np
->tx_stop
== 1) && (np
->get_tx
.orig
!= orig_get_tx
))) {
2423 netif_wake_queue(dev
);
2427 static void nv_tx_done_optimized(struct net_device
*dev
, int limit
)
2429 struct fe_priv
*np
= netdev_priv(dev
);
2431 struct ring_desc_ex
* orig_get_tx
= np
->get_tx
.ex
;
2433 while ((np
->get_tx
.ex
!= np
->put_tx
.ex
) &&
2434 !((flags
= le32_to_cpu(np
->get_tx
.ex
->flaglen
)) & NV_TX_VALID
) &&
2437 dprintk(KERN_DEBUG
"%s: nv_tx_done_optimized: flags 0x%x.\n",
2440 pci_unmap_page(np
->pci_dev
, np
->get_tx_ctx
->dma
,
2441 np
->get_tx_ctx
->dma_len
,
2443 np
->get_tx_ctx
->dma
= 0;
2445 if (flags
& NV_TX2_LASTPACKET
) {
2446 if (!(flags
& NV_TX2_ERROR
))
2447 dev
->stats
.tx_packets
++;
2449 if ((flags
& NV_TX2_RETRYERROR
) && !(flags
& NV_TX2_RETRYCOUNT_MASK
)) {
2450 if (np
->driver_data
& DEV_HAS_GEAR_MODE
)
2451 nv_gear_backoff_reseed(dev
);
2453 nv_legacybackoff_reseed(dev
);
2457 dev_kfree_skb_any(np
->get_tx_ctx
->skb
);
2458 np
->get_tx_ctx
->skb
= NULL
;
2461 nv_tx_flip_ownership(dev
);
2464 if (unlikely(np
->get_tx
.ex
++ == np
->last_tx
.ex
))
2465 np
->get_tx
.ex
= np
->first_tx
.ex
;
2466 if (unlikely(np
->get_tx_ctx
++ == np
->last_tx_ctx
))
2467 np
->get_tx_ctx
= np
->first_tx_ctx
;
2469 if (unlikely((np
->tx_stop
== 1) && (np
->get_tx
.ex
!= orig_get_tx
))) {
2471 netif_wake_queue(dev
);
2476 * nv_tx_timeout: dev->tx_timeout function
2477 * Called with netif_tx_lock held.
2479 static void nv_tx_timeout(struct net_device
*dev
)
2481 struct fe_priv
*np
= netdev_priv(dev
);
2482 u8 __iomem
*base
= get_hwbase(dev
);
2485 if (np
->msi_flags
& NV_MSI_X_ENABLED
)
2486 status
= readl(base
+ NvRegMSIXIrqStatus
) & NVREG_IRQSTAT_MASK
;
2488 status
= readl(base
+ NvRegIrqStatus
) & NVREG_IRQSTAT_MASK
;
2490 printk(KERN_INFO
"%s: Got tx_timeout. irq: %08x\n", dev
->name
, status
);
2495 printk(KERN_INFO
"%s: Ring at %lx\n",
2496 dev
->name
, (unsigned long)np
->ring_addr
);
2497 printk(KERN_INFO
"%s: Dumping tx registers\n", dev
->name
);
2498 for (i
=0;i
<=np
->register_size
;i
+= 32) {
2499 printk(KERN_INFO
"%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
2501 readl(base
+ i
+ 0), readl(base
+ i
+ 4),
2502 readl(base
+ i
+ 8), readl(base
+ i
+ 12),
2503 readl(base
+ i
+ 16), readl(base
+ i
+ 20),
2504 readl(base
+ i
+ 24), readl(base
+ i
+ 28));
2506 printk(KERN_INFO
"%s: Dumping tx ring\n", dev
->name
);
2507 for (i
=0;i
<np
->tx_ring_size
;i
+= 4) {
2508 if (!nv_optimized(np
)) {
2509 printk(KERN_INFO
"%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
2511 le32_to_cpu(np
->tx_ring
.orig
[i
].buf
),
2512 le32_to_cpu(np
->tx_ring
.orig
[i
].flaglen
),
2513 le32_to_cpu(np
->tx_ring
.orig
[i
+1].buf
),
2514 le32_to_cpu(np
->tx_ring
.orig
[i
+1].flaglen
),
2515 le32_to_cpu(np
->tx_ring
.orig
[i
+2].buf
),
2516 le32_to_cpu(np
->tx_ring
.orig
[i
+2].flaglen
),
2517 le32_to_cpu(np
->tx_ring
.orig
[i
+3].buf
),
2518 le32_to_cpu(np
->tx_ring
.orig
[i
+3].flaglen
));
2520 printk(KERN_INFO
"%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n",
2522 le32_to_cpu(np
->tx_ring
.ex
[i
].bufhigh
),
2523 le32_to_cpu(np
->tx_ring
.ex
[i
].buflow
),
2524 le32_to_cpu(np
->tx_ring
.ex
[i
].flaglen
),
2525 le32_to_cpu(np
->tx_ring
.ex
[i
+1].bufhigh
),
2526 le32_to_cpu(np
->tx_ring
.ex
[i
+1].buflow
),
2527 le32_to_cpu(np
->tx_ring
.ex
[i
+1].flaglen
),
2528 le32_to_cpu(np
->tx_ring
.ex
[i
+2].bufhigh
),
2529 le32_to_cpu(np
->tx_ring
.ex
[i
+2].buflow
),
2530 le32_to_cpu(np
->tx_ring
.ex
[i
+2].flaglen
),
2531 le32_to_cpu(np
->tx_ring
.ex
[i
+3].bufhigh
),
2532 le32_to_cpu(np
->tx_ring
.ex
[i
+3].buflow
),
2533 le32_to_cpu(np
->tx_ring
.ex
[i
+3].flaglen
));
2538 spin_lock_irq(&np
->lock
);
2540 /* 1) stop tx engine */
2543 /* 2) check that the packets were not sent already: */
2544 if (!nv_optimized(np
))
2547 nv_tx_done_optimized(dev
, np
->tx_ring_size
);
2549 /* 3) if there are dead entries: clear everything */
2550 if (np
->get_tx_ctx
!= np
->put_tx_ctx
) {
2551 printk(KERN_DEBUG
"%s: tx_timeout: dead entries!\n", dev
->name
);
2554 setup_hw_rings(dev
, NV_SETUP_TX_RING
);
2557 netif_wake_queue(dev
);
2559 /* 4) restart tx engine */
2561 spin_unlock_irq(&np
->lock
);
2565 * Called when the nic notices a mismatch between the actual data len on the
2566 * wire and the len indicated in the 802 header
2568 static int nv_getlen(struct net_device
*dev
, void *packet
, int datalen
)
2570 int hdrlen
; /* length of the 802 header */
2571 int protolen
; /* length as stored in the proto field */
2573 /* 1) calculate len according to header */
2574 if ( ((struct vlan_ethhdr
*)packet
)->h_vlan_proto
== htons(ETH_P_8021Q
)) {
2575 protolen
= ntohs( ((struct vlan_ethhdr
*)packet
)->h_vlan_encapsulated_proto
);
2578 protolen
= ntohs( ((struct ethhdr
*)packet
)->h_proto
);
2581 dprintk(KERN_DEBUG
"%s: nv_getlen: datalen %d, protolen %d, hdrlen %d\n",
2582 dev
->name
, datalen
, protolen
, hdrlen
);
2583 if (protolen
> ETH_DATA_LEN
)
2584 return datalen
; /* Value in proto field not a len, no checks possible */
2587 /* consistency checks: */
2588 if (datalen
> ETH_ZLEN
) {
2589 if (datalen
>= protolen
) {
2590 /* more data on wire than in 802 header, trim of
2593 dprintk(KERN_DEBUG
"%s: nv_getlen: accepting %d bytes.\n",
2594 dev
->name
, protolen
);
2597 /* less data on wire than mentioned in header.
2598 * Discard the packet.
2600 dprintk(KERN_DEBUG
"%s: nv_getlen: discarding long packet.\n",
2605 /* short packet. Accept only if 802 values are also short */
2606 if (protolen
> ETH_ZLEN
) {
2607 dprintk(KERN_DEBUG
"%s: nv_getlen: discarding short packet.\n",
2611 dprintk(KERN_DEBUG
"%s: nv_getlen: accepting %d bytes.\n",
2612 dev
->name
, datalen
);
2617 static int nv_rx_process(struct net_device
*dev
, int limit
)
2619 struct fe_priv
*np
= netdev_priv(dev
);
2622 struct sk_buff
*skb
;
2625 while((np
->get_rx
.orig
!= np
->put_rx
.orig
) &&
2626 !((flags
= le32_to_cpu(np
->get_rx
.orig
->flaglen
)) & NV_RX_AVAIL
) &&
2627 (rx_work
< limit
)) {
2629 dprintk(KERN_DEBUG
"%s: nv_rx_process: flags 0x%x.\n",
2633 * the packet is for us - immediately tear down the pci mapping.
2634 * TODO: check if a prefetch of the first cacheline improves
2637 pci_unmap_single(np
->pci_dev
, np
->get_rx_ctx
->dma
,
2638 np
->get_rx_ctx
->dma_len
,
2639 PCI_DMA_FROMDEVICE
);
2640 skb
= np
->get_rx_ctx
->skb
;
2641 np
->get_rx_ctx
->skb
= NULL
;
2645 dprintk(KERN_DEBUG
"Dumping packet (flags 0x%x).",flags
);
2646 for (j
=0; j
<64; j
++) {
2648 dprintk("\n%03x:", j
);
2649 dprintk(" %02x", ((unsigned char*)skb
->data
)[j
]);
2653 /* look at what we actually got: */
2654 if (np
->desc_ver
== DESC_VER_1
) {
2655 if (likely(flags
& NV_RX_DESCRIPTORVALID
)) {
2656 len
= flags
& LEN_MASK_V1
;
2657 if (unlikely(flags
& NV_RX_ERROR
)) {
2658 if ((flags
& NV_RX_ERROR_MASK
) == NV_RX_ERROR4
) {
2659 len
= nv_getlen(dev
, skb
->data
, len
);
2661 dev
->stats
.rx_errors
++;
2666 /* framing errors are soft errors */
2667 else if ((flags
& NV_RX_ERROR_MASK
) == NV_RX_FRAMINGERR
) {
2668 if (flags
& NV_RX_SUBSTRACT1
) {
2672 /* the rest are hard errors */
2674 if (flags
& NV_RX_MISSEDFRAME
)
2675 dev
->stats
.rx_missed_errors
++;
2676 if (flags
& NV_RX_CRCERR
)
2677 dev
->stats
.rx_crc_errors
++;
2678 if (flags
& NV_RX_OVERFLOW
)
2679 dev
->stats
.rx_over_errors
++;
2680 dev
->stats
.rx_errors
++;
2690 if (likely(flags
& NV_RX2_DESCRIPTORVALID
)) {
2691 len
= flags
& LEN_MASK_V2
;
2692 if (unlikely(flags
& NV_RX2_ERROR
)) {
2693 if ((flags
& NV_RX2_ERROR_MASK
) == NV_RX2_ERROR4
) {
2694 len
= nv_getlen(dev
, skb
->data
, len
);
2696 dev
->stats
.rx_errors
++;
2701 /* framing errors are soft errors */
2702 else if ((flags
& NV_RX2_ERROR_MASK
) == NV_RX2_FRAMINGERR
) {
2703 if (flags
& NV_RX2_SUBSTRACT1
) {
2707 /* the rest are hard errors */
2709 if (flags
& NV_RX2_CRCERR
)
2710 dev
->stats
.rx_crc_errors
++;
2711 if (flags
& NV_RX2_OVERFLOW
)
2712 dev
->stats
.rx_over_errors
++;
2713 dev
->stats
.rx_errors
++;
2718 if (((flags
& NV_RX2_CHECKSUMMASK
) == NV_RX2_CHECKSUM_IP_TCP
) || /*ip and tcp */
2719 ((flags
& NV_RX2_CHECKSUMMASK
) == NV_RX2_CHECKSUM_IP_UDP
)) /*ip and udp */
2720 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2726 /* got a valid packet - forward it to the network core */
2728 skb
->protocol
= eth_type_trans(skb
, dev
);
2729 dprintk(KERN_DEBUG
"%s: nv_rx_process: %d bytes, proto %d accepted.\n",
2730 dev
->name
, len
, skb
->protocol
);
2731 #ifdef CONFIG_FORCEDETH_NAPI
2732 netif_receive_skb(skb
);
2736 dev
->last_rx
= jiffies
;
2737 dev
->stats
.rx_packets
++;
2738 dev
->stats
.rx_bytes
+= len
;
2740 if (unlikely(np
->get_rx
.orig
++ == np
->last_rx
.orig
))
2741 np
->get_rx
.orig
= np
->first_rx
.orig
;
2742 if (unlikely(np
->get_rx_ctx
++ == np
->last_rx_ctx
))
2743 np
->get_rx_ctx
= np
->first_rx_ctx
;
2751 static int nv_rx_process_optimized(struct net_device
*dev
, int limit
)
2753 struct fe_priv
*np
= netdev_priv(dev
);
2757 struct sk_buff
*skb
;
2760 while((np
->get_rx
.ex
!= np
->put_rx
.ex
) &&
2761 !((flags
= le32_to_cpu(np
->get_rx
.ex
->flaglen
)) & NV_RX2_AVAIL
) &&
2762 (rx_work
< limit
)) {
2764 dprintk(KERN_DEBUG
"%s: nv_rx_process_optimized: flags 0x%x.\n",
2768 * the packet is for us - immediately tear down the pci mapping.
2769 * TODO: check if a prefetch of the first cacheline improves
2772 pci_unmap_single(np
->pci_dev
, np
->get_rx_ctx
->dma
,
2773 np
->get_rx_ctx
->dma_len
,
2774 PCI_DMA_FROMDEVICE
);
2775 skb
= np
->get_rx_ctx
->skb
;
2776 np
->get_rx_ctx
->skb
= NULL
;
2780 dprintk(KERN_DEBUG
"Dumping packet (flags 0x%x).",flags
);
2781 for (j
=0; j
<64; j
++) {
2783 dprintk("\n%03x:", j
);
2784 dprintk(" %02x", ((unsigned char*)skb
->data
)[j
]);
2788 /* look at what we actually got: */
2789 if (likely(flags
& NV_RX2_DESCRIPTORVALID
)) {
2790 len
= flags
& LEN_MASK_V2
;
2791 if (unlikely(flags
& NV_RX2_ERROR
)) {
2792 if ((flags
& NV_RX2_ERROR_MASK
) == NV_RX2_ERROR4
) {
2793 len
= nv_getlen(dev
, skb
->data
, len
);
2799 /* framing errors are soft errors */
2800 else if ((flags
& NV_RX2_ERROR_MASK
) == NV_RX2_FRAMINGERR
) {
2801 if (flags
& NV_RX2_SUBSTRACT1
) {
2805 /* the rest are hard errors */
2812 if (((flags
& NV_RX2_CHECKSUMMASK
) == NV_RX2_CHECKSUM_IP_TCP
) || /*ip and tcp */
2813 ((flags
& NV_RX2_CHECKSUMMASK
) == NV_RX2_CHECKSUM_IP_UDP
)) /*ip and udp */
2814 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2816 /* got a valid packet - forward it to the network core */
2818 skb
->protocol
= eth_type_trans(skb
, dev
);
2819 prefetch(skb
->data
);
2821 dprintk(KERN_DEBUG
"%s: nv_rx_process_optimized: %d bytes, proto %d accepted.\n",
2822 dev
->name
, len
, skb
->protocol
);
2824 if (likely(!np
->vlangrp
)) {
2825 #ifdef CONFIG_FORCEDETH_NAPI
2826 netif_receive_skb(skb
);
2831 vlanflags
= le32_to_cpu(np
->get_rx
.ex
->buflow
);
2832 if (vlanflags
& NV_RX3_VLAN_TAG_PRESENT
) {
2833 #ifdef CONFIG_FORCEDETH_NAPI
2834 vlan_hwaccel_receive_skb(skb
, np
->vlangrp
,
2835 vlanflags
& NV_RX3_VLAN_TAG_MASK
);
2837 vlan_hwaccel_rx(skb
, np
->vlangrp
,
2838 vlanflags
& NV_RX3_VLAN_TAG_MASK
);
2841 #ifdef CONFIG_FORCEDETH_NAPI
2842 netif_receive_skb(skb
);
2849 dev
->last_rx
= jiffies
;
2850 dev
->stats
.rx_packets
++;
2851 dev
->stats
.rx_bytes
+= len
;
2856 if (unlikely(np
->get_rx
.ex
++ == np
->last_rx
.ex
))
2857 np
->get_rx
.ex
= np
->first_rx
.ex
;
2858 if (unlikely(np
->get_rx_ctx
++ == np
->last_rx_ctx
))
2859 np
->get_rx_ctx
= np
->first_rx_ctx
;
2867 static void set_bufsize(struct net_device
*dev
)
2869 struct fe_priv
*np
= netdev_priv(dev
);
2871 if (dev
->mtu
<= ETH_DATA_LEN
)
2872 np
->rx_buf_sz
= ETH_DATA_LEN
+ NV_RX_HEADERS
;
2874 np
->rx_buf_sz
= dev
->mtu
+ NV_RX_HEADERS
;
2878 * nv_change_mtu: dev->change_mtu function
2879 * Called with dev_base_lock held for read.
2881 static int nv_change_mtu(struct net_device
*dev
, int new_mtu
)
2883 struct fe_priv
*np
= netdev_priv(dev
);
2886 if (new_mtu
< 64 || new_mtu
> np
->pkt_limit
)
2892 /* return early if the buffer sizes will not change */
2893 if (old_mtu
<= ETH_DATA_LEN
&& new_mtu
<= ETH_DATA_LEN
)
2895 if (old_mtu
== new_mtu
)
2898 /* synchronized against open : rtnl_lock() held by caller */
2899 if (netif_running(dev
)) {
2900 u8 __iomem
*base
= get_hwbase(dev
);
2902 * It seems that the nic preloads valid ring entries into an
2903 * internal buffer. The procedure for flushing everything is
2904 * guessed, there is probably a simpler approach.
2905 * Changing the MTU is a rare event, it shouldn't matter.
2907 nv_disable_irq(dev
);
2908 netif_tx_lock_bh(dev
);
2909 netif_addr_lock(dev
);
2910 spin_lock(&np
->lock
);
2914 /* drain rx queue */
2916 /* reinit driver view of the rx queue */
2918 if (nv_init_ring(dev
)) {
2919 if (!np
->in_shutdown
)
2920 mod_timer(&np
->oom_kick
, jiffies
+ OOM_REFILL
);
2922 /* reinit nic view of the rx queue */
2923 writel(np
->rx_buf_sz
, base
+ NvRegOffloadConfig
);
2924 setup_hw_rings(dev
, NV_SETUP_RX_RING
| NV_SETUP_TX_RING
);
2925 writel( ((np
->rx_ring_size
-1) << NVREG_RINGSZ_RXSHIFT
) + ((np
->tx_ring_size
-1) << NVREG_RINGSZ_TXSHIFT
),
2926 base
+ NvRegRingSizes
);
2928 writel(NVREG_TXRXCTL_KICK
|np
->txrxctl_bits
, get_hwbase(dev
) + NvRegTxRxControl
);
2931 /* restart rx engine */
2933 spin_unlock(&np
->lock
);
2934 netif_addr_unlock(dev
);
2935 netif_tx_unlock_bh(dev
);
2941 static void nv_copy_mac_to_hw(struct net_device
*dev
)
2943 u8 __iomem
*base
= get_hwbase(dev
);
2946 mac
[0] = (dev
->dev_addr
[0] << 0) + (dev
->dev_addr
[1] << 8) +
2947 (dev
->dev_addr
[2] << 16) + (dev
->dev_addr
[3] << 24);
2948 mac
[1] = (dev
->dev_addr
[4] << 0) + (dev
->dev_addr
[5] << 8);
2950 writel(mac
[0], base
+ NvRegMacAddrA
);
2951 writel(mac
[1], base
+ NvRegMacAddrB
);
2955 * nv_set_mac_address: dev->set_mac_address function
2956 * Called with rtnl_lock() held.
2958 static int nv_set_mac_address(struct net_device
*dev
, void *addr
)
2960 struct fe_priv
*np
= netdev_priv(dev
);
2961 struct sockaddr
*macaddr
= (struct sockaddr
*)addr
;
2963 if (!is_valid_ether_addr(macaddr
->sa_data
))
2964 return -EADDRNOTAVAIL
;
2966 /* synchronized against open : rtnl_lock() held by caller */
2967 memcpy(dev
->dev_addr
, macaddr
->sa_data
, ETH_ALEN
);
2969 if (netif_running(dev
)) {
2970 netif_tx_lock_bh(dev
);
2971 netif_addr_lock(dev
);
2972 spin_lock_irq(&np
->lock
);
2974 /* stop rx engine */
2977 /* set mac address */
2978 nv_copy_mac_to_hw(dev
);
2980 /* restart rx engine */
2982 spin_unlock_irq(&np
->lock
);
2983 netif_addr_unlock(dev
);
2984 netif_tx_unlock_bh(dev
);
2986 nv_copy_mac_to_hw(dev
);
2992 * nv_set_multicast: dev->set_multicast function
2993 * Called with netif_tx_lock held.
2995 static void nv_set_multicast(struct net_device
*dev
)
2997 struct fe_priv
*np
= netdev_priv(dev
);
2998 u8 __iomem
*base
= get_hwbase(dev
);
3001 u32 pff
= readl(base
+ NvRegPacketFilterFlags
) & NVREG_PFF_PAUSE_RX
;
3003 memset(addr
, 0, sizeof(addr
));
3004 memset(mask
, 0, sizeof(mask
));
3006 if (dev
->flags
& IFF_PROMISC
) {
3007 pff
|= NVREG_PFF_PROMISC
;
3009 pff
|= NVREG_PFF_MYADDR
;
3011 if (dev
->flags
& IFF_ALLMULTI
|| dev
->mc_list
) {
3015 alwaysOn
[0] = alwaysOn
[1] = alwaysOff
[0] = alwaysOff
[1] = 0xffffffff;
3016 if (dev
->flags
& IFF_ALLMULTI
) {
3017 alwaysOn
[0] = alwaysOn
[1] = alwaysOff
[0] = alwaysOff
[1] = 0;
3019 struct dev_mc_list
*walk
;
3021 walk
= dev
->mc_list
;
3022 while (walk
!= NULL
) {
3024 a
= le32_to_cpu(*(__le32
*) walk
->dmi_addr
);
3025 b
= le16_to_cpu(*(__le16
*) (&walk
->dmi_addr
[4]));
3033 addr
[0] = alwaysOn
[0];
3034 addr
[1] = alwaysOn
[1];
3035 mask
[0] = alwaysOn
[0] | alwaysOff
[0];
3036 mask
[1] = alwaysOn
[1] | alwaysOff
[1];
3038 mask
[0] = NVREG_MCASTMASKA_NONE
;
3039 mask
[1] = NVREG_MCASTMASKB_NONE
;
3042 addr
[0] |= NVREG_MCASTADDRA_FORCE
;
3043 pff
|= NVREG_PFF_ALWAYS
;
3044 spin_lock_irq(&np
->lock
);
3046 writel(addr
[0], base
+ NvRegMulticastAddrA
);
3047 writel(addr
[1], base
+ NvRegMulticastAddrB
);
3048 writel(mask
[0], base
+ NvRegMulticastMaskA
);
3049 writel(mask
[1], base
+ NvRegMulticastMaskB
);
3050 writel(pff
, base
+ NvRegPacketFilterFlags
);
3051 dprintk(KERN_INFO
"%s: reconfiguration for multicast lists.\n",
3054 spin_unlock_irq(&np
->lock
);
3057 static void nv_update_pause(struct net_device
*dev
, u32 pause_flags
)
3059 struct fe_priv
*np
= netdev_priv(dev
);
3060 u8 __iomem
*base
= get_hwbase(dev
);
3062 np
->pause_flags
&= ~(NV_PAUSEFRAME_TX_ENABLE
| NV_PAUSEFRAME_RX_ENABLE
);
3064 if (np
->pause_flags
& NV_PAUSEFRAME_RX_CAPABLE
) {
3065 u32 pff
= readl(base
+ NvRegPacketFilterFlags
) & ~NVREG_PFF_PAUSE_RX
;
3066 if (pause_flags
& NV_PAUSEFRAME_RX_ENABLE
) {
3067 writel(pff
|NVREG_PFF_PAUSE_RX
, base
+ NvRegPacketFilterFlags
);
3068 np
->pause_flags
|= NV_PAUSEFRAME_RX_ENABLE
;
3070 writel(pff
, base
+ NvRegPacketFilterFlags
);
3073 if (np
->pause_flags
& NV_PAUSEFRAME_TX_CAPABLE
) {
3074 u32 regmisc
= readl(base
+ NvRegMisc1
) & ~NVREG_MISC1_PAUSE_TX
;
3075 if (pause_flags
& NV_PAUSEFRAME_TX_ENABLE
) {
3076 u32 pause_enable
= NVREG_TX_PAUSEFRAME_ENABLE_V1
;
3077 if (np
->driver_data
& DEV_HAS_PAUSEFRAME_TX_V2
)
3078 pause_enable
= NVREG_TX_PAUSEFRAME_ENABLE_V2
;
3079 if (np
->driver_data
& DEV_HAS_PAUSEFRAME_TX_V3
)
3080 pause_enable
= NVREG_TX_PAUSEFRAME_ENABLE_V3
;
3081 writel(pause_enable
, base
+ NvRegTxPauseFrame
);
3082 writel(regmisc
|NVREG_MISC1_PAUSE_TX
, base
+ NvRegMisc1
);
3083 np
->pause_flags
|= NV_PAUSEFRAME_TX_ENABLE
;
3085 writel(NVREG_TX_PAUSEFRAME_DISABLE
, base
+ NvRegTxPauseFrame
);
3086 writel(regmisc
, base
+ NvRegMisc1
);
3092 * nv_update_linkspeed: Setup the MAC according to the link partner
3093 * @dev: Network device to be configured
3095 * The function queries the PHY and checks if there is a link partner.
3096 * If yes, then it sets up the MAC accordingly. Otherwise, the MAC is
3097 * set to 10 MBit HD.
3099 * The function returns 0 if there is no link partner and 1 if there is
3100 * a good link partner.
3102 static int nv_update_linkspeed(struct net_device
*dev
)
3104 struct fe_priv
*np
= netdev_priv(dev
);
3105 u8 __iomem
*base
= get_hwbase(dev
);
3108 int adv_lpa
, adv_pause
, lpa_pause
;
3109 int newls
= np
->linkspeed
;
3110 int newdup
= np
->duplex
;
3113 u32 control_1000
, status_1000
, phyreg
, pause_flags
, txreg
;
3117 /* BMSR_LSTATUS is latched, read it twice:
3118 * we want the current value.
3120 mii_rw(dev
, np
->phyaddr
, MII_BMSR
, MII_READ
);
3121 mii_status
= mii_rw(dev
, np
->phyaddr
, MII_BMSR
, MII_READ
);
3123 if (!(mii_status
& BMSR_LSTATUS
)) {
3124 dprintk(KERN_DEBUG
"%s: no link detected by phy - falling back to 10HD.\n",
3126 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_10
;
3132 if (np
->autoneg
== 0) {
3133 dprintk(KERN_DEBUG
"%s: nv_update_linkspeed: autoneg off, PHY set to 0x%04x.\n",
3134 dev
->name
, np
->fixed_mode
);
3135 if (np
->fixed_mode
& LPA_100FULL
) {
3136 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_100
;
3138 } else if (np
->fixed_mode
& LPA_100HALF
) {
3139 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_100
;
3141 } else if (np
->fixed_mode
& LPA_10FULL
) {
3142 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_10
;
3145 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_10
;
3151 /* check auto negotiation is complete */
3152 if (!(mii_status
& BMSR_ANEGCOMPLETE
)) {
3153 /* still in autonegotiation - configure nic for 10 MBit HD and wait. */
3154 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_10
;
3157 dprintk(KERN_DEBUG
"%s: autoneg not completed - falling back to 10HD.\n", dev
->name
);
3161 adv
= mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, MII_READ
);
3162 lpa
= mii_rw(dev
, np
->phyaddr
, MII_LPA
, MII_READ
);
3163 dprintk(KERN_DEBUG
"%s: nv_update_linkspeed: PHY advertises 0x%04x, lpa 0x%04x.\n",
3164 dev
->name
, adv
, lpa
);
3167 if (np
->gigabit
== PHY_GIGABIT
) {
3168 control_1000
= mii_rw(dev
, np
->phyaddr
, MII_CTRL1000
, MII_READ
);
3169 status_1000
= mii_rw(dev
, np
->phyaddr
, MII_STAT1000
, MII_READ
);
3171 if ((control_1000
& ADVERTISE_1000FULL
) &&
3172 (status_1000
& LPA_1000FULL
)) {
3173 dprintk(KERN_DEBUG
"%s: nv_update_linkspeed: GBit ethernet detected.\n",
3175 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_1000
;
3181 /* FIXME: handle parallel detection properly */
3182 adv_lpa
= lpa
& adv
;
3183 if (adv_lpa
& LPA_100FULL
) {
3184 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_100
;
3186 } else if (adv_lpa
& LPA_100HALF
) {
3187 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_100
;
3189 } else if (adv_lpa
& LPA_10FULL
) {
3190 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_10
;
3192 } else if (adv_lpa
& LPA_10HALF
) {
3193 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_10
;
3196 dprintk(KERN_DEBUG
"%s: bad ability %04x - falling back to 10HD.\n", dev
->name
, adv_lpa
);
3197 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_10
;
3202 if (np
->duplex
== newdup
&& np
->linkspeed
== newls
)
3205 dprintk(KERN_INFO
"%s: changing link setting from %d/%d to %d/%d.\n",
3206 dev
->name
, np
->linkspeed
, np
->duplex
, newls
, newdup
);
3208 np
->duplex
= newdup
;
3209 np
->linkspeed
= newls
;
3211 /* The transmitter and receiver must be restarted for safe update */
3212 if (readl(base
+ NvRegTransmitterControl
) & NVREG_XMITCTL_START
) {
3213 txrxFlags
|= NV_RESTART_TX
;
3216 if (readl(base
+ NvRegReceiverControl
) & NVREG_RCVCTL_START
) {
3217 txrxFlags
|= NV_RESTART_RX
;
3221 if (np
->gigabit
== PHY_GIGABIT
) {
3222 phyreg
= readl(base
+ NvRegSlotTime
);
3223 phyreg
&= ~(0x3FF00);
3224 if (((np
->linkspeed
& 0xFFF) == NVREG_LINKSPEED_10
) ||
3225 ((np
->linkspeed
& 0xFFF) == NVREG_LINKSPEED_100
))
3226 phyreg
|= NVREG_SLOTTIME_10_100_FULL
;
3227 else if ((np
->linkspeed
& 0xFFF) == NVREG_LINKSPEED_1000
)
3228 phyreg
|= NVREG_SLOTTIME_1000_FULL
;
3229 writel(phyreg
, base
+ NvRegSlotTime
);
3232 phyreg
= readl(base
+ NvRegPhyInterface
);
3233 phyreg
&= ~(PHY_HALF
|PHY_100
|PHY_1000
);
3234 if (np
->duplex
== 0)
3236 if ((np
->linkspeed
& NVREG_LINKSPEED_MASK
) == NVREG_LINKSPEED_100
)
3238 else if ((np
->linkspeed
& NVREG_LINKSPEED_MASK
) == NVREG_LINKSPEED_1000
)
3240 writel(phyreg
, base
+ NvRegPhyInterface
);
3242 phy_exp
= mii_rw(dev
, np
->phyaddr
, MII_EXPANSION
, MII_READ
) & EXPANSION_NWAY
; /* autoneg capable */
3243 if (phyreg
& PHY_RGMII
) {
3244 if ((np
->linkspeed
& NVREG_LINKSPEED_MASK
) == NVREG_LINKSPEED_1000
) {
3245 txreg
= NVREG_TX_DEFERRAL_RGMII_1000
;
3247 if (!phy_exp
&& !np
->duplex
&& (np
->driver_data
& DEV_HAS_COLLISION_FIX
)) {
3248 if ((np
->linkspeed
& NVREG_LINKSPEED_MASK
) == NVREG_LINKSPEED_10
)
3249 txreg
= NVREG_TX_DEFERRAL_RGMII_STRETCH_10
;
3251 txreg
= NVREG_TX_DEFERRAL_RGMII_STRETCH_100
;
3253 txreg
= NVREG_TX_DEFERRAL_RGMII_10_100
;
3257 if (!phy_exp
&& !np
->duplex
&& (np
->driver_data
& DEV_HAS_COLLISION_FIX
))
3258 txreg
= NVREG_TX_DEFERRAL_MII_STRETCH
;
3260 txreg
= NVREG_TX_DEFERRAL_DEFAULT
;
3262 writel(txreg
, base
+ NvRegTxDeferral
);
3264 if (np
->desc_ver
== DESC_VER_1
) {
3265 txreg
= NVREG_TX_WM_DESC1_DEFAULT
;
3267 if ((np
->linkspeed
& NVREG_LINKSPEED_MASK
) == NVREG_LINKSPEED_1000
)
3268 txreg
= NVREG_TX_WM_DESC2_3_1000
;
3270 txreg
= NVREG_TX_WM_DESC2_3_DEFAULT
;
3272 writel(txreg
, base
+ NvRegTxWatermark
);
3274 writel(NVREG_MISC1_FORCE
| ( np
->duplex
? 0 : NVREG_MISC1_HD
),
3277 writel(np
->linkspeed
, base
+ NvRegLinkSpeed
);
3281 /* setup pause frame */
3282 if (np
->duplex
!= 0) {
3283 if (np
->autoneg
&& np
->pause_flags
& NV_PAUSEFRAME_AUTONEG
) {
3284 adv_pause
= adv
& (ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
);
3285 lpa_pause
= lpa
& (LPA_PAUSE_CAP
| LPA_PAUSE_ASYM
);
3287 switch (adv_pause
) {
3288 case ADVERTISE_PAUSE_CAP
:
3289 if (lpa_pause
& LPA_PAUSE_CAP
) {
3290 pause_flags
|= NV_PAUSEFRAME_RX_ENABLE
;
3291 if (np
->pause_flags
& NV_PAUSEFRAME_TX_REQ
)
3292 pause_flags
|= NV_PAUSEFRAME_TX_ENABLE
;
3295 case ADVERTISE_PAUSE_ASYM
:
3296 if (lpa_pause
== (LPA_PAUSE_CAP
| LPA_PAUSE_ASYM
))
3298 pause_flags
|= NV_PAUSEFRAME_TX_ENABLE
;
3301 case ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
:
3302 if (lpa_pause
& LPA_PAUSE_CAP
)
3304 pause_flags
|= NV_PAUSEFRAME_RX_ENABLE
;
3305 if (np
->pause_flags
& NV_PAUSEFRAME_TX_REQ
)
3306 pause_flags
|= NV_PAUSEFRAME_TX_ENABLE
;
3308 if (lpa_pause
== LPA_PAUSE_ASYM
)
3310 pause_flags
|= NV_PAUSEFRAME_RX_ENABLE
;
3315 pause_flags
= np
->pause_flags
;
3318 nv_update_pause(dev
, pause_flags
);
3320 if (txrxFlags
& NV_RESTART_TX
)
3322 if (txrxFlags
& NV_RESTART_RX
)
3328 static void nv_linkchange(struct net_device
*dev
)
3330 if (nv_update_linkspeed(dev
)) {
3331 if (!netif_carrier_ok(dev
)) {
3332 netif_carrier_on(dev
);
3333 printk(KERN_INFO
"%s: link up.\n", dev
->name
);
3337 if (netif_carrier_ok(dev
)) {
3338 netif_carrier_off(dev
);
3339 printk(KERN_INFO
"%s: link down.\n", dev
->name
);
3345 static void nv_link_irq(struct net_device
*dev
)
3347 u8 __iomem
*base
= get_hwbase(dev
);
3350 miistat
= readl(base
+ NvRegMIIStatus
);
3351 writel(NVREG_MIISTAT_LINKCHANGE
, base
+ NvRegMIIStatus
);
3352 dprintk(KERN_INFO
"%s: link change irq, status 0x%x.\n", dev
->name
, miistat
);
3354 if (miistat
& (NVREG_MIISTAT_LINKCHANGE
))
3356 dprintk(KERN_DEBUG
"%s: link change notification done.\n", dev
->name
);
3359 static void nv_msi_workaround(struct fe_priv
*np
)
3362 /* Need to toggle the msi irq mask within the ethernet device,
3363 * otherwise, future interrupts will not be detected.
3365 if (np
->msi_flags
& NV_MSI_ENABLED
) {
3366 u8 __iomem
*base
= np
->base
;
3368 writel(0, base
+ NvRegMSIIrqMask
);
3369 writel(NVREG_MSI_VECTOR_0_ENABLED
, base
+ NvRegMSIIrqMask
);
3373 static irqreturn_t
nv_nic_irq(int foo
, void *data
)
3375 struct net_device
*dev
= (struct net_device
*) data
;
3376 struct fe_priv
*np
= netdev_priv(dev
);
3377 u8 __iomem
*base
= get_hwbase(dev
);
3381 dprintk(KERN_DEBUG
"%s: nv_nic_irq\n", dev
->name
);
3384 if (!(np
->msi_flags
& NV_MSI_X_ENABLED
)) {
3385 events
= readl(base
+ NvRegIrqStatus
) & NVREG_IRQSTAT_MASK
;
3386 writel(NVREG_IRQSTAT_MASK
, base
+ NvRegIrqStatus
);
3388 events
= readl(base
+ NvRegMSIXIrqStatus
) & NVREG_IRQSTAT_MASK
;
3389 writel(NVREG_IRQSTAT_MASK
, base
+ NvRegMSIXIrqStatus
);
3391 dprintk(KERN_DEBUG
"%s: irq: %08x\n", dev
->name
, events
);
3392 if (!(events
& np
->irqmask
))
3395 nv_msi_workaround(np
);
3397 spin_lock(&np
->lock
);
3399 spin_unlock(&np
->lock
);
3401 #ifdef CONFIG_FORCEDETH_NAPI
3402 if (events
& NVREG_IRQ_RX_ALL
) {
3403 netif_rx_schedule(dev
, &np
->napi
);
3405 /* Disable furthur receive irq's */
3406 spin_lock(&np
->lock
);
3407 np
->irqmask
&= ~NVREG_IRQ_RX_ALL
;
3409 if (np
->msi_flags
& NV_MSI_X_ENABLED
)
3410 writel(NVREG_IRQ_RX_ALL
, base
+ NvRegIrqMask
);
3412 writel(np
->irqmask
, base
+ NvRegIrqMask
);
3413 spin_unlock(&np
->lock
);
3416 if (nv_rx_process(dev
, RX_WORK_PER_LOOP
)) {
3417 if (unlikely(nv_alloc_rx(dev
))) {
3418 spin_lock(&np
->lock
);
3419 if (!np
->in_shutdown
)
3420 mod_timer(&np
->oom_kick
, jiffies
+ OOM_REFILL
);
3421 spin_unlock(&np
->lock
);
3425 if (unlikely(events
& NVREG_IRQ_LINK
)) {
3426 spin_lock(&np
->lock
);
3428 spin_unlock(&np
->lock
);
3430 if (unlikely(np
->need_linktimer
&& time_after(jiffies
, np
->link_timeout
))) {
3431 spin_lock(&np
->lock
);
3433 spin_unlock(&np
->lock
);
3434 np
->link_timeout
= jiffies
+ LINK_TIMEOUT
;
3436 if (unlikely(events
& (NVREG_IRQ_TX_ERR
))) {
3437 dprintk(KERN_DEBUG
"%s: received irq with events 0x%x. Probably TX fail.\n",
3440 if (unlikely(events
& (NVREG_IRQ_UNKNOWN
))) {
3441 printk(KERN_DEBUG
"%s: received irq with unknown events 0x%x. Please report\n",
3444 if (unlikely(events
& NVREG_IRQ_RECOVER_ERROR
)) {
3445 spin_lock(&np
->lock
);
3446 /* disable interrupts on the nic */
3447 if (!(np
->msi_flags
& NV_MSI_X_ENABLED
))
3448 writel(0, base
+ NvRegIrqMask
);
3450 writel(np
->irqmask
, base
+ NvRegIrqMask
);
3453 if (!np
->in_shutdown
) {
3454 np
->nic_poll_irq
= np
->irqmask
;
3455 np
->recover_error
= 1;
3456 mod_timer(&np
->nic_poll
, jiffies
+ POLL_WAIT
);
3458 spin_unlock(&np
->lock
);
3461 if (unlikely(i
> max_interrupt_work
)) {
3462 spin_lock(&np
->lock
);
3463 /* disable interrupts on the nic */
3464 if (!(np
->msi_flags
& NV_MSI_X_ENABLED
))
3465 writel(0, base
+ NvRegIrqMask
);
3467 writel(np
->irqmask
, base
+ NvRegIrqMask
);
3470 if (!np
->in_shutdown
) {
3471 np
->nic_poll_irq
= np
->irqmask
;
3472 mod_timer(&np
->nic_poll
, jiffies
+ POLL_WAIT
);
3474 spin_unlock(&np
->lock
);
3475 printk(KERN_DEBUG
"%s: too many iterations (%d) in nv_nic_irq.\n", dev
->name
, i
);
3480 dprintk(KERN_DEBUG
"%s: nv_nic_irq completed\n", dev
->name
);
3482 return IRQ_RETVAL(i
);
3486 * All _optimized functions are used to help increase performance
3487 * (reduce CPU and increase throughput). They use descripter version 3,
3488 * compiler directives, and reduce memory accesses.
3490 static irqreturn_t
nv_nic_irq_optimized(int foo
, void *data
)
3492 struct net_device
*dev
= (struct net_device
*) data
;
3493 struct fe_priv
*np
= netdev_priv(dev
);
3494 u8 __iomem
*base
= get_hwbase(dev
);
3498 dprintk(KERN_DEBUG
"%s: nv_nic_irq_optimized\n", dev
->name
);
3501 if (!(np
->msi_flags
& NV_MSI_X_ENABLED
)) {
3502 events
= readl(base
+ NvRegIrqStatus
) & NVREG_IRQSTAT_MASK
;
3503 writel(NVREG_IRQSTAT_MASK
, base
+ NvRegIrqStatus
);
3505 events
= readl(base
+ NvRegMSIXIrqStatus
) & NVREG_IRQSTAT_MASK
;
3506 writel(NVREG_IRQSTAT_MASK
, base
+ NvRegMSIXIrqStatus
);
3508 dprintk(KERN_DEBUG
"%s: irq: %08x\n", dev
->name
, events
);
3509 if (!(events
& np
->irqmask
))
3512 nv_msi_workaround(np
);
3514 spin_lock(&np
->lock
);
3515 nv_tx_done_optimized(dev
, TX_WORK_PER_LOOP
);
3516 spin_unlock(&np
->lock
);
3518 #ifdef CONFIG_FORCEDETH_NAPI
3519 if (events
& NVREG_IRQ_RX_ALL
) {
3520 netif_rx_schedule(dev
, &np
->napi
);
3522 /* Disable furthur receive irq's */
3523 spin_lock(&np
->lock
);
3524 np
->irqmask
&= ~NVREG_IRQ_RX_ALL
;
3526 if (np
->msi_flags
& NV_MSI_X_ENABLED
)
3527 writel(NVREG_IRQ_RX_ALL
, base
+ NvRegIrqMask
);
3529 writel(np
->irqmask
, base
+ NvRegIrqMask
);
3530 spin_unlock(&np
->lock
);
3533 if (nv_rx_process_optimized(dev
, RX_WORK_PER_LOOP
)) {
3534 if (unlikely(nv_alloc_rx_optimized(dev
))) {
3535 spin_lock(&np
->lock
);
3536 if (!np
->in_shutdown
)
3537 mod_timer(&np
->oom_kick
, jiffies
+ OOM_REFILL
);
3538 spin_unlock(&np
->lock
);
3542 if (unlikely(events
& NVREG_IRQ_LINK
)) {
3543 spin_lock(&np
->lock
);
3545 spin_unlock(&np
->lock
);
3547 if (unlikely(np
->need_linktimer
&& time_after(jiffies
, np
->link_timeout
))) {
3548 spin_lock(&np
->lock
);
3550 spin_unlock(&np
->lock
);
3551 np
->link_timeout
= jiffies
+ LINK_TIMEOUT
;
3553 if (unlikely(events
& (NVREG_IRQ_TX_ERR
))) {
3554 dprintk(KERN_DEBUG
"%s: received irq with events 0x%x. Probably TX fail.\n",
3557 if (unlikely(events
& (NVREG_IRQ_UNKNOWN
))) {
3558 printk(KERN_DEBUG
"%s: received irq with unknown events 0x%x. Please report\n",
3561 if (unlikely(events
& NVREG_IRQ_RECOVER_ERROR
)) {
3562 spin_lock(&np
->lock
);
3563 /* disable interrupts on the nic */
3564 if (!(np
->msi_flags
& NV_MSI_X_ENABLED
))
3565 writel(0, base
+ NvRegIrqMask
);
3567 writel(np
->irqmask
, base
+ NvRegIrqMask
);
3570 if (!np
->in_shutdown
) {
3571 np
->nic_poll_irq
= np
->irqmask
;
3572 np
->recover_error
= 1;
3573 mod_timer(&np
->nic_poll
, jiffies
+ POLL_WAIT
);
3575 spin_unlock(&np
->lock
);
3579 if (unlikely(i
> max_interrupt_work
)) {
3580 spin_lock(&np
->lock
);
3581 /* disable interrupts on the nic */
3582 if (!(np
->msi_flags
& NV_MSI_X_ENABLED
))
3583 writel(0, base
+ NvRegIrqMask
);
3585 writel(np
->irqmask
, base
+ NvRegIrqMask
);
3588 if (!np
->in_shutdown
) {
3589 np
->nic_poll_irq
= np
->irqmask
;
3590 mod_timer(&np
->nic_poll
, jiffies
+ POLL_WAIT
);
3592 spin_unlock(&np
->lock
);
3593 printk(KERN_DEBUG
"%s: too many iterations (%d) in nv_nic_irq.\n", dev
->name
, i
);
3598 dprintk(KERN_DEBUG
"%s: nv_nic_irq_optimized completed\n", dev
->name
);
3600 return IRQ_RETVAL(i
);
3603 static irqreturn_t
nv_nic_irq_tx(int foo
, void *data
)
3605 struct net_device
*dev
= (struct net_device
*) data
;
3606 struct fe_priv
*np
= netdev_priv(dev
);
3607 u8 __iomem
*base
= get_hwbase(dev
);
3610 unsigned long flags
;
3612 dprintk(KERN_DEBUG
"%s: nv_nic_irq_tx\n", dev
->name
);
3615 events
= readl(base
+ NvRegMSIXIrqStatus
) & NVREG_IRQ_TX_ALL
;
3616 writel(NVREG_IRQ_TX_ALL
, base
+ NvRegMSIXIrqStatus
);
3617 dprintk(KERN_DEBUG
"%s: tx irq: %08x\n", dev
->name
, events
);
3618 if (!(events
& np
->irqmask
))
3621 spin_lock_irqsave(&np
->lock
, flags
);
3622 nv_tx_done_optimized(dev
, TX_WORK_PER_LOOP
);
3623 spin_unlock_irqrestore(&np
->lock
, flags
);
3625 if (unlikely(events
& (NVREG_IRQ_TX_ERR
))) {
3626 dprintk(KERN_DEBUG
"%s: received irq with events 0x%x. Probably TX fail.\n",
3629 if (unlikely(i
> max_interrupt_work
)) {
3630 spin_lock_irqsave(&np
->lock
, flags
);
3631 /* disable interrupts on the nic */
3632 writel(NVREG_IRQ_TX_ALL
, base
+ NvRegIrqMask
);
3635 if (!np
->in_shutdown
) {
3636 np
->nic_poll_irq
|= NVREG_IRQ_TX_ALL
;
3637 mod_timer(&np
->nic_poll
, jiffies
+ POLL_WAIT
);
3639 spin_unlock_irqrestore(&np
->lock
, flags
);
3640 printk(KERN_DEBUG
"%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev
->name
, i
);
3645 dprintk(KERN_DEBUG
"%s: nv_nic_irq_tx completed\n", dev
->name
);
3647 return IRQ_RETVAL(i
);
3650 #ifdef CONFIG_FORCEDETH_NAPI
3651 static int nv_napi_poll(struct napi_struct
*napi
, int budget
)
3653 struct fe_priv
*np
= container_of(napi
, struct fe_priv
, napi
);
3654 struct net_device
*dev
= np
->dev
;
3655 u8 __iomem
*base
= get_hwbase(dev
);
3656 unsigned long flags
;
3659 if (!nv_optimized(np
)) {
3660 pkts
= nv_rx_process(dev
, budget
);
3661 retcode
= nv_alloc_rx(dev
);
3663 pkts
= nv_rx_process_optimized(dev
, budget
);
3664 retcode
= nv_alloc_rx_optimized(dev
);
3668 spin_lock_irqsave(&np
->lock
, flags
);
3669 if (!np
->in_shutdown
)
3670 mod_timer(&np
->oom_kick
, jiffies
+ OOM_REFILL
);
3671 spin_unlock_irqrestore(&np
->lock
, flags
);
3674 if (pkts
< budget
) {
3675 /* re-enable receive interrupts */
3676 spin_lock_irqsave(&np
->lock
, flags
);
3678 __netif_rx_complete(dev
, napi
);
3680 np
->irqmask
|= NVREG_IRQ_RX_ALL
;
3681 if (np
->msi_flags
& NV_MSI_X_ENABLED
)
3682 writel(NVREG_IRQ_RX_ALL
, base
+ NvRegIrqMask
);
3684 writel(np
->irqmask
, base
+ NvRegIrqMask
);
3686 spin_unlock_irqrestore(&np
->lock
, flags
);
3692 #ifdef CONFIG_FORCEDETH_NAPI
3693 static irqreturn_t
nv_nic_irq_rx(int foo
, void *data
)
3695 struct net_device
*dev
= (struct net_device
*) data
;
3696 struct fe_priv
*np
= netdev_priv(dev
);
3697 u8 __iomem
*base
= get_hwbase(dev
);
3700 events
= readl(base
+ NvRegMSIXIrqStatus
) & NVREG_IRQ_RX_ALL
;
3701 writel(NVREG_IRQ_RX_ALL
, base
+ NvRegMSIXIrqStatus
);
3704 netif_rx_schedule(dev
, &np
->napi
);
3705 /* disable receive interrupts on the nic */
3706 writel(NVREG_IRQ_RX_ALL
, base
+ NvRegIrqMask
);
3712 static irqreturn_t
nv_nic_irq_rx(int foo
, void *data
)
3714 struct net_device
*dev
= (struct net_device
*) data
;
3715 struct fe_priv
*np
= netdev_priv(dev
);
3716 u8 __iomem
*base
= get_hwbase(dev
);
3719 unsigned long flags
;
3721 dprintk(KERN_DEBUG
"%s: nv_nic_irq_rx\n", dev
->name
);
3724 events
= readl(base
+ NvRegMSIXIrqStatus
) & NVREG_IRQ_RX_ALL
;
3725 writel(NVREG_IRQ_RX_ALL
, base
+ NvRegMSIXIrqStatus
);
3726 dprintk(KERN_DEBUG
"%s: rx irq: %08x\n", dev
->name
, events
);
3727 if (!(events
& np
->irqmask
))
3730 if (nv_rx_process_optimized(dev
, RX_WORK_PER_LOOP
)) {
3731 if (unlikely(nv_alloc_rx_optimized(dev
))) {
3732 spin_lock_irqsave(&np
->lock
, flags
);
3733 if (!np
->in_shutdown
)
3734 mod_timer(&np
->oom_kick
, jiffies
+ OOM_REFILL
);
3735 spin_unlock_irqrestore(&np
->lock
, flags
);
3739 if (unlikely(i
> max_interrupt_work
)) {
3740 spin_lock_irqsave(&np
->lock
, flags
);
3741 /* disable interrupts on the nic */
3742 writel(NVREG_IRQ_RX_ALL
, base
+ NvRegIrqMask
);
3745 if (!np
->in_shutdown
) {
3746 np
->nic_poll_irq
|= NVREG_IRQ_RX_ALL
;
3747 mod_timer(&np
->nic_poll
, jiffies
+ POLL_WAIT
);
3749 spin_unlock_irqrestore(&np
->lock
, flags
);
3750 printk(KERN_DEBUG
"%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev
->name
, i
);
3754 dprintk(KERN_DEBUG
"%s: nv_nic_irq_rx completed\n", dev
->name
);
3756 return IRQ_RETVAL(i
);
3760 static irqreturn_t
nv_nic_irq_other(int foo
, void *data
)
3762 struct net_device
*dev
= (struct net_device
*) data
;
3763 struct fe_priv
*np
= netdev_priv(dev
);
3764 u8 __iomem
*base
= get_hwbase(dev
);
3767 unsigned long flags
;
3769 dprintk(KERN_DEBUG
"%s: nv_nic_irq_other\n", dev
->name
);
3772 events
= readl(base
+ NvRegMSIXIrqStatus
) & NVREG_IRQ_OTHER
;
3773 writel(NVREG_IRQ_OTHER
, base
+ NvRegMSIXIrqStatus
);
3774 dprintk(KERN_DEBUG
"%s: irq: %08x\n", dev
->name
, events
);
3775 if (!(events
& np
->irqmask
))
3778 /* check tx in case we reached max loop limit in tx isr */
3779 spin_lock_irqsave(&np
->lock
, flags
);
3780 nv_tx_done_optimized(dev
, TX_WORK_PER_LOOP
);
3781 spin_unlock_irqrestore(&np
->lock
, flags
);
3783 if (events
& NVREG_IRQ_LINK
) {
3784 spin_lock_irqsave(&np
->lock
, flags
);
3786 spin_unlock_irqrestore(&np
->lock
, flags
);
3788 if (np
->need_linktimer
&& time_after(jiffies
, np
->link_timeout
)) {
3789 spin_lock_irqsave(&np
->lock
, flags
);
3791 spin_unlock_irqrestore(&np
->lock
, flags
);
3792 np
->link_timeout
= jiffies
+ LINK_TIMEOUT
;
3794 if (events
& NVREG_IRQ_RECOVER_ERROR
) {
3795 spin_lock_irq(&np
->lock
);
3796 /* disable interrupts on the nic */
3797 writel(NVREG_IRQ_OTHER
, base
+ NvRegIrqMask
);
3800 if (!np
->in_shutdown
) {
3801 np
->nic_poll_irq
|= NVREG_IRQ_OTHER
;
3802 np
->recover_error
= 1;
3803 mod_timer(&np
->nic_poll
, jiffies
+ POLL_WAIT
);
3805 spin_unlock_irq(&np
->lock
);
3808 if (events
& (NVREG_IRQ_UNKNOWN
)) {
3809 printk(KERN_DEBUG
"%s: received irq with unknown events 0x%x. Please report\n",
3812 if (unlikely(i
> max_interrupt_work
)) {
3813 spin_lock_irqsave(&np
->lock
, flags
);
3814 /* disable interrupts on the nic */
3815 writel(NVREG_IRQ_OTHER
, base
+ NvRegIrqMask
);
3818 if (!np
->in_shutdown
) {
3819 np
->nic_poll_irq
|= NVREG_IRQ_OTHER
;
3820 mod_timer(&np
->nic_poll
, jiffies
+ POLL_WAIT
);
3822 spin_unlock_irqrestore(&np
->lock
, flags
);
3823 printk(KERN_DEBUG
"%s: too many iterations (%d) in nv_nic_irq_other.\n", dev
->name
, i
);
3828 dprintk(KERN_DEBUG
"%s: nv_nic_irq_other completed\n", dev
->name
);
3830 return IRQ_RETVAL(i
);
3833 static irqreturn_t
nv_nic_irq_test(int foo
, void *data
)
3835 struct net_device
*dev
= (struct net_device
*) data
;
3836 struct fe_priv
*np
= netdev_priv(dev
);
3837 u8 __iomem
*base
= get_hwbase(dev
);
3840 dprintk(KERN_DEBUG
"%s: nv_nic_irq_test\n", dev
->name
);
3842 if (!(np
->msi_flags
& NV_MSI_X_ENABLED
)) {
3843 events
= readl(base
+ NvRegIrqStatus
) & NVREG_IRQSTAT_MASK
;
3844 writel(NVREG_IRQ_TIMER
, base
+ NvRegIrqStatus
);
3846 events
= readl(base
+ NvRegMSIXIrqStatus
) & NVREG_IRQSTAT_MASK
;
3847 writel(NVREG_IRQ_TIMER
, base
+ NvRegMSIXIrqStatus
);
3850 dprintk(KERN_DEBUG
"%s: irq: %08x\n", dev
->name
, events
);
3851 if (!(events
& NVREG_IRQ_TIMER
))
3852 return IRQ_RETVAL(0);
3854 nv_msi_workaround(np
);
3856 spin_lock(&np
->lock
);
3858 spin_unlock(&np
->lock
);
3860 dprintk(KERN_DEBUG
"%s: nv_nic_irq_test completed\n", dev
->name
);
3862 return IRQ_RETVAL(1);
3865 static void set_msix_vector_map(struct net_device
*dev
, u32 vector
, u32 irqmask
)
3867 u8 __iomem
*base
= get_hwbase(dev
);
3871 /* Each interrupt bit can be mapped to a MSIX vector (4 bits).
3872 * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents
3873 * the remaining 8 interrupts.
3875 for (i
= 0; i
< 8; i
++) {
3876 if ((irqmask
>> i
) & 0x1) {
3877 msixmap
|= vector
<< (i
<< 2);
3880 writel(readl(base
+ NvRegMSIXMap0
) | msixmap
, base
+ NvRegMSIXMap0
);
3883 for (i
= 0; i
< 8; i
++) {
3884 if ((irqmask
>> (i
+ 8)) & 0x1) {
3885 msixmap
|= vector
<< (i
<< 2);
3888 writel(readl(base
+ NvRegMSIXMap1
) | msixmap
, base
+ NvRegMSIXMap1
);
3891 static int nv_request_irq(struct net_device
*dev
, int intr_test
)
3893 struct fe_priv
*np
= get_nvpriv(dev
);
3894 u8 __iomem
*base
= get_hwbase(dev
);
3897 irqreturn_t (*handler
)(int foo
, void *data
);
3900 handler
= nv_nic_irq_test
;
3902 if (nv_optimized(np
))
3903 handler
= nv_nic_irq_optimized
;
3905 handler
= nv_nic_irq
;
3908 if (np
->msi_flags
& NV_MSI_X_CAPABLE
) {
3909 for (i
= 0; i
< (np
->msi_flags
& NV_MSI_X_VECTORS_MASK
); i
++) {
3910 np
->msi_x_entry
[i
].entry
= i
;
3912 if ((ret
= pci_enable_msix(np
->pci_dev
, np
->msi_x_entry
, (np
->msi_flags
& NV_MSI_X_VECTORS_MASK
))) == 0) {
3913 np
->msi_flags
|= NV_MSI_X_ENABLED
;
3914 if (optimization_mode
== NV_OPTIMIZATION_MODE_THROUGHPUT
&& !intr_test
) {
3915 /* Request irq for rx handling */
3916 if (request_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_RX
].vector
, &nv_nic_irq_rx
, IRQF_SHARED
, dev
->name
, dev
) != 0) {
3917 printk(KERN_INFO
"forcedeth: request_irq failed for rx %d\n", ret
);
3918 pci_disable_msix(np
->pci_dev
);
3919 np
->msi_flags
&= ~NV_MSI_X_ENABLED
;
3922 /* Request irq for tx handling */
3923 if (request_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_TX
].vector
, &nv_nic_irq_tx
, IRQF_SHARED
, dev
->name
, dev
) != 0) {
3924 printk(KERN_INFO
"forcedeth: request_irq failed for tx %d\n", ret
);
3925 pci_disable_msix(np
->pci_dev
);
3926 np
->msi_flags
&= ~NV_MSI_X_ENABLED
;
3929 /* Request irq for link and timer handling */
3930 if (request_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_OTHER
].vector
, &nv_nic_irq_other
, IRQF_SHARED
, dev
->name
, dev
) != 0) {
3931 printk(KERN_INFO
"forcedeth: request_irq failed for link %d\n", ret
);
3932 pci_disable_msix(np
->pci_dev
);
3933 np
->msi_flags
&= ~NV_MSI_X_ENABLED
;
3936 /* map interrupts to their respective vector */
3937 writel(0, base
+ NvRegMSIXMap0
);
3938 writel(0, base
+ NvRegMSIXMap1
);
3939 set_msix_vector_map(dev
, NV_MSI_X_VECTOR_RX
, NVREG_IRQ_RX_ALL
);
3940 set_msix_vector_map(dev
, NV_MSI_X_VECTOR_TX
, NVREG_IRQ_TX_ALL
);
3941 set_msix_vector_map(dev
, NV_MSI_X_VECTOR_OTHER
, NVREG_IRQ_OTHER
);
3943 /* Request irq for all interrupts */
3944 if (request_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_ALL
].vector
, handler
, IRQF_SHARED
, dev
->name
, dev
) != 0) {
3945 printk(KERN_INFO
"forcedeth: request_irq failed %d\n", ret
);
3946 pci_disable_msix(np
->pci_dev
);
3947 np
->msi_flags
&= ~NV_MSI_X_ENABLED
;
3951 /* map interrupts to vector 0 */
3952 writel(0, base
+ NvRegMSIXMap0
);
3953 writel(0, base
+ NvRegMSIXMap1
);
3957 if (ret
!= 0 && np
->msi_flags
& NV_MSI_CAPABLE
) {
3958 if ((ret
= pci_enable_msi(np
->pci_dev
)) == 0) {
3959 np
->msi_flags
|= NV_MSI_ENABLED
;
3960 dev
->irq
= np
->pci_dev
->irq
;
3961 if (request_irq(np
->pci_dev
->irq
, handler
, IRQF_SHARED
, dev
->name
, dev
) != 0) {
3962 printk(KERN_INFO
"forcedeth: request_irq failed %d\n", ret
);
3963 pci_disable_msi(np
->pci_dev
);
3964 np
->msi_flags
&= ~NV_MSI_ENABLED
;
3965 dev
->irq
= np
->pci_dev
->irq
;
3969 /* map interrupts to vector 0 */
3970 writel(0, base
+ NvRegMSIMap0
);
3971 writel(0, base
+ NvRegMSIMap1
);
3972 /* enable msi vector 0 */
3973 writel(NVREG_MSI_VECTOR_0_ENABLED
, base
+ NvRegMSIIrqMask
);
3977 if (request_irq(np
->pci_dev
->irq
, handler
, IRQF_SHARED
, dev
->name
, dev
) != 0)
3984 free_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_TX
].vector
, dev
);
3986 free_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_RX
].vector
, dev
);
3991 static void nv_free_irq(struct net_device
*dev
)
3993 struct fe_priv
*np
= get_nvpriv(dev
);
3996 if (np
->msi_flags
& NV_MSI_X_ENABLED
) {
3997 for (i
= 0; i
< (np
->msi_flags
& NV_MSI_X_VECTORS_MASK
); i
++) {
3998 free_irq(np
->msi_x_entry
[i
].vector
, dev
);
4000 pci_disable_msix(np
->pci_dev
);
4001 np
->msi_flags
&= ~NV_MSI_X_ENABLED
;
4003 free_irq(np
->pci_dev
->irq
, dev
);
4004 if (np
->msi_flags
& NV_MSI_ENABLED
) {
4005 pci_disable_msi(np
->pci_dev
);
4006 np
->msi_flags
&= ~NV_MSI_ENABLED
;
4011 static void nv_do_nic_poll(unsigned long data
)
4013 struct net_device
*dev
= (struct net_device
*) data
;
4014 struct fe_priv
*np
= netdev_priv(dev
);
4015 u8 __iomem
*base
= get_hwbase(dev
);
4019 * First disable irq(s) and then
4020 * reenable interrupts on the nic, we have to do this before calling
4021 * nv_nic_irq because that may decide to do otherwise
4024 if (!using_multi_irqs(dev
)) {
4025 if (np
->msi_flags
& NV_MSI_X_ENABLED
)
4026 disable_irq_lockdep(np
->msi_x_entry
[NV_MSI_X_VECTOR_ALL
].vector
);
4028 disable_irq_lockdep(np
->pci_dev
->irq
);
4031 if (np
->nic_poll_irq
& NVREG_IRQ_RX_ALL
) {
4032 disable_irq_lockdep(np
->msi_x_entry
[NV_MSI_X_VECTOR_RX
].vector
);
4033 mask
|= NVREG_IRQ_RX_ALL
;
4035 if (np
->nic_poll_irq
& NVREG_IRQ_TX_ALL
) {
4036 disable_irq_lockdep(np
->msi_x_entry
[NV_MSI_X_VECTOR_TX
].vector
);
4037 mask
|= NVREG_IRQ_TX_ALL
;
4039 if (np
->nic_poll_irq
& NVREG_IRQ_OTHER
) {
4040 disable_irq_lockdep(np
->msi_x_entry
[NV_MSI_X_VECTOR_OTHER
].vector
);
4041 mask
|= NVREG_IRQ_OTHER
;
4044 np
->nic_poll_irq
= 0;
4046 /* disable_irq() contains synchronize_irq, thus no irq handler can run now */
4048 if (np
->recover_error
) {
4049 np
->recover_error
= 0;
4050 printk(KERN_INFO
"forcedeth: MAC in recoverable error state\n");
4051 if (netif_running(dev
)) {
4052 netif_tx_lock_bh(dev
);
4053 netif_addr_lock(dev
);
4054 spin_lock(&np
->lock
);
4058 /* drain rx queue */
4060 /* reinit driver view of the rx queue */
4062 if (nv_init_ring(dev
)) {
4063 if (!np
->in_shutdown
)
4064 mod_timer(&np
->oom_kick
, jiffies
+ OOM_REFILL
);
4066 /* reinit nic view of the rx queue */
4067 writel(np
->rx_buf_sz
, base
+ NvRegOffloadConfig
);
4068 setup_hw_rings(dev
, NV_SETUP_RX_RING
| NV_SETUP_TX_RING
);
4069 writel( ((np
->rx_ring_size
-1) << NVREG_RINGSZ_RXSHIFT
) + ((np
->tx_ring_size
-1) << NVREG_RINGSZ_TXSHIFT
),
4070 base
+ NvRegRingSizes
);
4072 writel(NVREG_TXRXCTL_KICK
|np
->txrxctl_bits
, get_hwbase(dev
) + NvRegTxRxControl
);
4075 /* restart rx engine */
4077 spin_unlock(&np
->lock
);
4078 netif_addr_unlock(dev
);
4079 netif_tx_unlock_bh(dev
);
4084 writel(mask
, base
+ NvRegIrqMask
);
4087 if (!using_multi_irqs(dev
)) {
4088 if (nv_optimized(np
))
4089 nv_nic_irq_optimized(0, dev
);
4092 if (np
->msi_flags
& NV_MSI_X_ENABLED
)
4093 enable_irq_lockdep(np
->msi_x_entry
[NV_MSI_X_VECTOR_ALL
].vector
);
4095 enable_irq_lockdep(np
->pci_dev
->irq
);
4097 if (np
->nic_poll_irq
& NVREG_IRQ_RX_ALL
) {
4098 nv_nic_irq_rx(0, dev
);
4099 enable_irq_lockdep(np
->msi_x_entry
[NV_MSI_X_VECTOR_RX
].vector
);
4101 if (np
->nic_poll_irq
& NVREG_IRQ_TX_ALL
) {
4102 nv_nic_irq_tx(0, dev
);
4103 enable_irq_lockdep(np
->msi_x_entry
[NV_MSI_X_VECTOR_TX
].vector
);
4105 if (np
->nic_poll_irq
& NVREG_IRQ_OTHER
) {
4106 nv_nic_irq_other(0, dev
);
4107 enable_irq_lockdep(np
->msi_x_entry
[NV_MSI_X_VECTOR_OTHER
].vector
);
4112 #ifdef CONFIG_NET_POLL_CONTROLLER
4113 static void nv_poll_controller(struct net_device
*dev
)
4115 nv_do_nic_poll((unsigned long) dev
);
4119 static void nv_do_stats_poll(unsigned long data
)
4121 struct net_device
*dev
= (struct net_device
*) data
;
4122 struct fe_priv
*np
= netdev_priv(dev
);
4124 nv_get_hw_stats(dev
);
4126 if (!np
->in_shutdown
)
4127 mod_timer(&np
->stats_poll
,
4128 round_jiffies(jiffies
+ STATS_INTERVAL
));
4131 static void nv_get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
4133 struct fe_priv
*np
= netdev_priv(dev
);
4134 strcpy(info
->driver
, DRV_NAME
);
4135 strcpy(info
->version
, FORCEDETH_VERSION
);
4136 strcpy(info
->bus_info
, pci_name(np
->pci_dev
));
4139 static void nv_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wolinfo
)
4141 struct fe_priv
*np
= netdev_priv(dev
);
4142 wolinfo
->supported
= WAKE_MAGIC
;
4144 spin_lock_irq(&np
->lock
);
4146 wolinfo
->wolopts
= WAKE_MAGIC
;
4147 spin_unlock_irq(&np
->lock
);
4150 static int nv_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wolinfo
)
4152 struct fe_priv
*np
= netdev_priv(dev
);
4153 u8 __iomem
*base
= get_hwbase(dev
);
4156 if (wolinfo
->wolopts
== 0) {
4158 } else if (wolinfo
->wolopts
& WAKE_MAGIC
) {
4160 flags
= NVREG_WAKEUPFLAGS_ENABLE
;
4162 if (netif_running(dev
)) {
4163 spin_lock_irq(&np
->lock
);
4164 writel(flags
, base
+ NvRegWakeUpFlags
);
4165 spin_unlock_irq(&np
->lock
);
4170 static int nv_get_settings(struct net_device
*dev
, struct ethtool_cmd
*ecmd
)
4172 struct fe_priv
*np
= netdev_priv(dev
);
4175 spin_lock_irq(&np
->lock
);
4176 ecmd
->port
= PORT_MII
;
4177 if (!netif_running(dev
)) {
4178 /* We do not track link speed / duplex setting if the
4179 * interface is disabled. Force a link check */
4180 if (nv_update_linkspeed(dev
)) {
4181 if (!netif_carrier_ok(dev
))
4182 netif_carrier_on(dev
);
4184 if (netif_carrier_ok(dev
))
4185 netif_carrier_off(dev
);
4189 if (netif_carrier_ok(dev
)) {
4190 switch(np
->linkspeed
& (NVREG_LINKSPEED_MASK
)) {
4191 case NVREG_LINKSPEED_10
:
4192 ecmd
->speed
= SPEED_10
;
4194 case NVREG_LINKSPEED_100
:
4195 ecmd
->speed
= SPEED_100
;
4197 case NVREG_LINKSPEED_1000
:
4198 ecmd
->speed
= SPEED_1000
;
4201 ecmd
->duplex
= DUPLEX_HALF
;
4203 ecmd
->duplex
= DUPLEX_FULL
;
4209 ecmd
->autoneg
= np
->autoneg
;
4211 ecmd
->advertising
= ADVERTISED_MII
;
4213 ecmd
->advertising
|= ADVERTISED_Autoneg
;
4214 adv
= mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, MII_READ
);
4215 if (adv
& ADVERTISE_10HALF
)
4216 ecmd
->advertising
|= ADVERTISED_10baseT_Half
;
4217 if (adv
& ADVERTISE_10FULL
)
4218 ecmd
->advertising
|= ADVERTISED_10baseT_Full
;
4219 if (adv
& ADVERTISE_100HALF
)
4220 ecmd
->advertising
|= ADVERTISED_100baseT_Half
;
4221 if (adv
& ADVERTISE_100FULL
)
4222 ecmd
->advertising
|= ADVERTISED_100baseT_Full
;
4223 if (np
->gigabit
== PHY_GIGABIT
) {
4224 adv
= mii_rw(dev
, np
->phyaddr
, MII_CTRL1000
, MII_READ
);
4225 if (adv
& ADVERTISE_1000FULL
)
4226 ecmd
->advertising
|= ADVERTISED_1000baseT_Full
;
4229 ecmd
->supported
= (SUPPORTED_Autoneg
|
4230 SUPPORTED_10baseT_Half
| SUPPORTED_10baseT_Full
|
4231 SUPPORTED_100baseT_Half
| SUPPORTED_100baseT_Full
|
4233 if (np
->gigabit
== PHY_GIGABIT
)
4234 ecmd
->supported
|= SUPPORTED_1000baseT_Full
;
4236 ecmd
->phy_address
= np
->phyaddr
;
4237 ecmd
->transceiver
= XCVR_EXTERNAL
;
4239 /* ignore maxtxpkt, maxrxpkt for now */
4240 spin_unlock_irq(&np
->lock
);
4244 static int nv_set_settings(struct net_device
*dev
, struct ethtool_cmd
*ecmd
)
4246 struct fe_priv
*np
= netdev_priv(dev
);
4248 if (ecmd
->port
!= PORT_MII
)
4250 if (ecmd
->transceiver
!= XCVR_EXTERNAL
)
4252 if (ecmd
->phy_address
!= np
->phyaddr
) {
4253 /* TODO: support switching between multiple phys. Should be
4254 * trivial, but not enabled due to lack of test hardware. */
4257 if (ecmd
->autoneg
== AUTONEG_ENABLE
) {
4260 mask
= ADVERTISED_10baseT_Half
| ADVERTISED_10baseT_Full
|
4261 ADVERTISED_100baseT_Half
| ADVERTISED_100baseT_Full
;
4262 if (np
->gigabit
== PHY_GIGABIT
)
4263 mask
|= ADVERTISED_1000baseT_Full
;
4265 if ((ecmd
->advertising
& mask
) == 0)
4268 } else if (ecmd
->autoneg
== AUTONEG_DISABLE
) {
4269 /* Note: autonegotiation disable, speed 1000 intentionally
4270 * forbidden - noone should need that. */
4272 if (ecmd
->speed
!= SPEED_10
&& ecmd
->speed
!= SPEED_100
)
4274 if (ecmd
->duplex
!= DUPLEX_HALF
&& ecmd
->duplex
!= DUPLEX_FULL
)
4280 netif_carrier_off(dev
);
4281 if (netif_running(dev
)) {
4282 unsigned long flags
;
4284 nv_disable_irq(dev
);
4285 netif_tx_lock_bh(dev
);
4286 netif_addr_lock(dev
);
4287 /* with plain spinlock lockdep complains */
4288 spin_lock_irqsave(&np
->lock
, flags
);
4291 * this can take some time, and interrupts are disabled
4292 * due to spin_lock_irqsave, but let's hope no daemon
4293 * is going to change the settings very often...
4295 * NV_RXSTOP_DELAY1MAX + NV_TXSTOP_DELAY1MAX
4296 * + some minor delays, which is up to a second approximately
4299 spin_unlock_irqrestore(&np
->lock
, flags
);
4300 netif_addr_unlock(dev
);
4301 netif_tx_unlock_bh(dev
);
4304 if (ecmd
->autoneg
== AUTONEG_ENABLE
) {
4309 /* advertise only what has been requested */
4310 adv
= mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, MII_READ
);
4311 adv
&= ~(ADVERTISE_ALL
| ADVERTISE_100BASE4
| ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
);
4312 if (ecmd
->advertising
& ADVERTISED_10baseT_Half
)
4313 adv
|= ADVERTISE_10HALF
;
4314 if (ecmd
->advertising
& ADVERTISED_10baseT_Full
)
4315 adv
|= ADVERTISE_10FULL
;
4316 if (ecmd
->advertising
& ADVERTISED_100baseT_Half
)
4317 adv
|= ADVERTISE_100HALF
;
4318 if (ecmd
->advertising
& ADVERTISED_100baseT_Full
)
4319 adv
|= ADVERTISE_100FULL
;
4320 if (np
->pause_flags
& NV_PAUSEFRAME_RX_REQ
) /* for rx we set both advertisments but disable tx pause */
4321 adv
|= ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
;
4322 if (np
->pause_flags
& NV_PAUSEFRAME_TX_REQ
)
4323 adv
|= ADVERTISE_PAUSE_ASYM
;
4324 mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, adv
);
4326 if (np
->gigabit
== PHY_GIGABIT
) {
4327 adv
= mii_rw(dev
, np
->phyaddr
, MII_CTRL1000
, MII_READ
);
4328 adv
&= ~ADVERTISE_1000FULL
;
4329 if (ecmd
->advertising
& ADVERTISED_1000baseT_Full
)
4330 adv
|= ADVERTISE_1000FULL
;
4331 mii_rw(dev
, np
->phyaddr
, MII_CTRL1000
, adv
);
4334 if (netif_running(dev
))
4335 printk(KERN_INFO
"%s: link down.\n", dev
->name
);
4336 bmcr
= mii_rw(dev
, np
->phyaddr
, MII_BMCR
, MII_READ
);
4337 if (np
->phy_model
== PHY_MODEL_MARVELL_E3016
) {
4338 bmcr
|= BMCR_ANENABLE
;
4339 /* reset the phy in order for settings to stick,
4340 * and cause autoneg to start */
4341 if (phy_reset(dev
, bmcr
)) {
4342 printk(KERN_INFO
"%s: phy reset failed\n", dev
->name
);
4346 bmcr
|= (BMCR_ANENABLE
| BMCR_ANRESTART
);
4347 mii_rw(dev
, np
->phyaddr
, MII_BMCR
, bmcr
);
4354 adv
= mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, MII_READ
);
4355 adv
&= ~(ADVERTISE_ALL
| ADVERTISE_100BASE4
| ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
);
4356 if (ecmd
->speed
== SPEED_10
&& ecmd
->duplex
== DUPLEX_HALF
)
4357 adv
|= ADVERTISE_10HALF
;
4358 if (ecmd
->speed
== SPEED_10
&& ecmd
->duplex
== DUPLEX_FULL
)
4359 adv
|= ADVERTISE_10FULL
;
4360 if (ecmd
->speed
== SPEED_100
&& ecmd
->duplex
== DUPLEX_HALF
)
4361 adv
|= ADVERTISE_100HALF
;
4362 if (ecmd
->speed
== SPEED_100
&& ecmd
->duplex
== DUPLEX_FULL
)
4363 adv
|= ADVERTISE_100FULL
;
4364 np
->pause_flags
&= ~(NV_PAUSEFRAME_AUTONEG
|NV_PAUSEFRAME_RX_ENABLE
|NV_PAUSEFRAME_TX_ENABLE
);
4365 if (np
->pause_flags
& NV_PAUSEFRAME_RX_REQ
) {/* for rx we set both advertisments but disable tx pause */
4366 adv
|= ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
;
4367 np
->pause_flags
|= NV_PAUSEFRAME_RX_ENABLE
;
4369 if (np
->pause_flags
& NV_PAUSEFRAME_TX_REQ
) {
4370 adv
|= ADVERTISE_PAUSE_ASYM
;
4371 np
->pause_flags
|= NV_PAUSEFRAME_TX_ENABLE
;
4373 mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, adv
);
4374 np
->fixed_mode
= adv
;
4376 if (np
->gigabit
== PHY_GIGABIT
) {
4377 adv
= mii_rw(dev
, np
->phyaddr
, MII_CTRL1000
, MII_READ
);
4378 adv
&= ~ADVERTISE_1000FULL
;
4379 mii_rw(dev
, np
->phyaddr
, MII_CTRL1000
, adv
);
4382 bmcr
= mii_rw(dev
, np
->phyaddr
, MII_BMCR
, MII_READ
);
4383 bmcr
&= ~(BMCR_ANENABLE
|BMCR_SPEED100
|BMCR_SPEED1000
|BMCR_FULLDPLX
);
4384 if (np
->fixed_mode
& (ADVERTISE_10FULL
|ADVERTISE_100FULL
))
4385 bmcr
|= BMCR_FULLDPLX
;
4386 if (np
->fixed_mode
& (ADVERTISE_100HALF
|ADVERTISE_100FULL
))
4387 bmcr
|= BMCR_SPEED100
;
4388 if (np
->phy_oui
== PHY_OUI_MARVELL
) {
4389 /* reset the phy in order for forced mode settings to stick */
4390 if (phy_reset(dev
, bmcr
)) {
4391 printk(KERN_INFO
"%s: phy reset failed\n", dev
->name
);
4395 mii_rw(dev
, np
->phyaddr
, MII_BMCR
, bmcr
);
4396 if (netif_running(dev
)) {
4397 /* Wait a bit and then reconfigure the nic. */
4404 if (netif_running(dev
)) {
4412 #define FORCEDETH_REGS_VER 1
4414 static int nv_get_regs_len(struct net_device
*dev
)
4416 struct fe_priv
*np
= netdev_priv(dev
);
4417 return np
->register_size
;
4420 static void nv_get_regs(struct net_device
*dev
, struct ethtool_regs
*regs
, void *buf
)
4422 struct fe_priv
*np
= netdev_priv(dev
);
4423 u8 __iomem
*base
= get_hwbase(dev
);
4427 regs
->version
= FORCEDETH_REGS_VER
;
4428 spin_lock_irq(&np
->lock
);
4429 for (i
= 0;i
<= np
->register_size
/sizeof(u32
); i
++)
4430 rbuf
[i
] = readl(base
+ i
*sizeof(u32
));
4431 spin_unlock_irq(&np
->lock
);
4434 static int nv_nway_reset(struct net_device
*dev
)
4436 struct fe_priv
*np
= netdev_priv(dev
);
4442 netif_carrier_off(dev
);
4443 if (netif_running(dev
)) {
4444 nv_disable_irq(dev
);
4445 netif_tx_lock_bh(dev
);
4446 netif_addr_lock(dev
);
4447 spin_lock(&np
->lock
);
4450 spin_unlock(&np
->lock
);
4451 netif_addr_unlock(dev
);
4452 netif_tx_unlock_bh(dev
);
4453 printk(KERN_INFO
"%s: link down.\n", dev
->name
);
4456 bmcr
= mii_rw(dev
, np
->phyaddr
, MII_BMCR
, MII_READ
);
4457 if (np
->phy_model
== PHY_MODEL_MARVELL_E3016
) {
4458 bmcr
|= BMCR_ANENABLE
;
4459 /* reset the phy in order for settings to stick*/
4460 if (phy_reset(dev
, bmcr
)) {
4461 printk(KERN_INFO
"%s: phy reset failed\n", dev
->name
);
4465 bmcr
|= (BMCR_ANENABLE
| BMCR_ANRESTART
);
4466 mii_rw(dev
, np
->phyaddr
, MII_BMCR
, bmcr
);
4469 if (netif_running(dev
)) {
4481 static int nv_set_tso(struct net_device
*dev
, u32 value
)
4483 struct fe_priv
*np
= netdev_priv(dev
);
4485 if ((np
->driver_data
& DEV_HAS_CHECKSUM
))
4486 return ethtool_op_set_tso(dev
, value
);
4491 static void nv_get_ringparam(struct net_device
*dev
, struct ethtool_ringparam
* ring
)
4493 struct fe_priv
*np
= netdev_priv(dev
);
4495 ring
->rx_max_pending
= (np
->desc_ver
== DESC_VER_1
) ? RING_MAX_DESC_VER_1
: RING_MAX_DESC_VER_2_3
;
4496 ring
->rx_mini_max_pending
= 0;
4497 ring
->rx_jumbo_max_pending
= 0;
4498 ring
->tx_max_pending
= (np
->desc_ver
== DESC_VER_1
) ? RING_MAX_DESC_VER_1
: RING_MAX_DESC_VER_2_3
;
4500 ring
->rx_pending
= np
->rx_ring_size
;
4501 ring
->rx_mini_pending
= 0;
4502 ring
->rx_jumbo_pending
= 0;
4503 ring
->tx_pending
= np
->tx_ring_size
;
4506 static int nv_set_ringparam(struct net_device
*dev
, struct ethtool_ringparam
* ring
)
4508 struct fe_priv
*np
= netdev_priv(dev
);
4509 u8 __iomem
*base
= get_hwbase(dev
);
4510 u8
*rxtx_ring
, *rx_skbuff
, *tx_skbuff
;
4511 dma_addr_t ring_addr
;
4513 if (ring
->rx_pending
< RX_RING_MIN
||
4514 ring
->tx_pending
< TX_RING_MIN
||
4515 ring
->rx_mini_pending
!= 0 ||
4516 ring
->rx_jumbo_pending
!= 0 ||
4517 (np
->desc_ver
== DESC_VER_1
&&
4518 (ring
->rx_pending
> RING_MAX_DESC_VER_1
||
4519 ring
->tx_pending
> RING_MAX_DESC_VER_1
)) ||
4520 (np
->desc_ver
!= DESC_VER_1
&&
4521 (ring
->rx_pending
> RING_MAX_DESC_VER_2_3
||
4522 ring
->tx_pending
> RING_MAX_DESC_VER_2_3
))) {
4526 /* allocate new rings */
4527 if (!nv_optimized(np
)) {
4528 rxtx_ring
= pci_alloc_consistent(np
->pci_dev
,
4529 sizeof(struct ring_desc
) * (ring
->rx_pending
+ ring
->tx_pending
),
4532 rxtx_ring
= pci_alloc_consistent(np
->pci_dev
,
4533 sizeof(struct ring_desc_ex
) * (ring
->rx_pending
+ ring
->tx_pending
),
4536 rx_skbuff
= kmalloc(sizeof(struct nv_skb_map
) * ring
->rx_pending
, GFP_KERNEL
);
4537 tx_skbuff
= kmalloc(sizeof(struct nv_skb_map
) * ring
->tx_pending
, GFP_KERNEL
);
4538 if (!rxtx_ring
|| !rx_skbuff
|| !tx_skbuff
) {
4539 /* fall back to old rings */
4540 if (!nv_optimized(np
)) {
4542 pci_free_consistent(np
->pci_dev
, sizeof(struct ring_desc
) * (ring
->rx_pending
+ ring
->tx_pending
),
4543 rxtx_ring
, ring_addr
);
4546 pci_free_consistent(np
->pci_dev
, sizeof(struct ring_desc_ex
) * (ring
->rx_pending
+ ring
->tx_pending
),
4547 rxtx_ring
, ring_addr
);
4556 if (netif_running(dev
)) {
4557 nv_disable_irq(dev
);
4558 netif_tx_lock_bh(dev
);
4559 netif_addr_lock(dev
);
4560 spin_lock(&np
->lock
);
4570 /* set new values */
4571 np
->rx_ring_size
= ring
->rx_pending
;
4572 np
->tx_ring_size
= ring
->tx_pending
;
4574 if (!nv_optimized(np
)) {
4575 np
->rx_ring
.orig
= (struct ring_desc
*)rxtx_ring
;
4576 np
->tx_ring
.orig
= &np
->rx_ring
.orig
[np
->rx_ring_size
];
4578 np
->rx_ring
.ex
= (struct ring_desc_ex
*)rxtx_ring
;
4579 np
->tx_ring
.ex
= &np
->rx_ring
.ex
[np
->rx_ring_size
];
4581 np
->rx_skb
= (struct nv_skb_map
*)rx_skbuff
;
4582 np
->tx_skb
= (struct nv_skb_map
*)tx_skbuff
;
4583 np
->ring_addr
= ring_addr
;
4585 memset(np
->rx_skb
, 0, sizeof(struct nv_skb_map
) * np
->rx_ring_size
);
4586 memset(np
->tx_skb
, 0, sizeof(struct nv_skb_map
) * np
->tx_ring_size
);
4588 if (netif_running(dev
)) {
4589 /* reinit driver view of the queues */
4591 if (nv_init_ring(dev
)) {
4592 if (!np
->in_shutdown
)
4593 mod_timer(&np
->oom_kick
, jiffies
+ OOM_REFILL
);
4596 /* reinit nic view of the queues */
4597 writel(np
->rx_buf_sz
, base
+ NvRegOffloadConfig
);
4598 setup_hw_rings(dev
, NV_SETUP_RX_RING
| NV_SETUP_TX_RING
);
4599 writel( ((np
->rx_ring_size
-1) << NVREG_RINGSZ_RXSHIFT
) + ((np
->tx_ring_size
-1) << NVREG_RINGSZ_TXSHIFT
),
4600 base
+ NvRegRingSizes
);
4602 writel(NVREG_TXRXCTL_KICK
|np
->txrxctl_bits
, get_hwbase(dev
) + NvRegTxRxControl
);
4605 /* restart engines */
4607 spin_unlock(&np
->lock
);
4608 netif_addr_unlock(dev
);
4609 netif_tx_unlock_bh(dev
);
4617 static void nv_get_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
* pause
)
4619 struct fe_priv
*np
= netdev_priv(dev
);
4621 pause
->autoneg
= (np
->pause_flags
& NV_PAUSEFRAME_AUTONEG
) != 0;
4622 pause
->rx_pause
= (np
->pause_flags
& NV_PAUSEFRAME_RX_ENABLE
) != 0;
4623 pause
->tx_pause
= (np
->pause_flags
& NV_PAUSEFRAME_TX_ENABLE
) != 0;
4626 static int nv_set_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
* pause
)
4628 struct fe_priv
*np
= netdev_priv(dev
);
4631 if ((!np
->autoneg
&& np
->duplex
== 0) ||
4632 (np
->autoneg
&& !pause
->autoneg
&& np
->duplex
== 0)) {
4633 printk(KERN_INFO
"%s: can not set pause settings when forced link is in half duplex.\n",
4637 if (pause
->tx_pause
&& !(np
->pause_flags
& NV_PAUSEFRAME_TX_CAPABLE
)) {
4638 printk(KERN_INFO
"%s: hardware does not support tx pause frames.\n", dev
->name
);
4642 netif_carrier_off(dev
);
4643 if (netif_running(dev
)) {
4644 nv_disable_irq(dev
);
4645 netif_tx_lock_bh(dev
);
4646 netif_addr_lock(dev
);
4647 spin_lock(&np
->lock
);
4650 spin_unlock(&np
->lock
);
4651 netif_addr_unlock(dev
);
4652 netif_tx_unlock_bh(dev
);
4655 np
->pause_flags
&= ~(NV_PAUSEFRAME_RX_REQ
|NV_PAUSEFRAME_TX_REQ
);
4656 if (pause
->rx_pause
)
4657 np
->pause_flags
|= NV_PAUSEFRAME_RX_REQ
;
4658 if (pause
->tx_pause
)
4659 np
->pause_flags
|= NV_PAUSEFRAME_TX_REQ
;
4661 if (np
->autoneg
&& pause
->autoneg
) {
4662 np
->pause_flags
|= NV_PAUSEFRAME_AUTONEG
;
4664 adv
= mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, MII_READ
);
4665 adv
&= ~(ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
);
4666 if (np
->pause_flags
& NV_PAUSEFRAME_RX_REQ
) /* for rx we set both advertisments but disable tx pause */
4667 adv
|= ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
;
4668 if (np
->pause_flags
& NV_PAUSEFRAME_TX_REQ
)
4669 adv
|= ADVERTISE_PAUSE_ASYM
;
4670 mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, adv
);
4672 if (netif_running(dev
))
4673 printk(KERN_INFO
"%s: link down.\n", dev
->name
);
4674 bmcr
= mii_rw(dev
, np
->phyaddr
, MII_BMCR
, MII_READ
);
4675 bmcr
|= (BMCR_ANENABLE
| BMCR_ANRESTART
);
4676 mii_rw(dev
, np
->phyaddr
, MII_BMCR
, bmcr
);
4678 np
->pause_flags
&= ~(NV_PAUSEFRAME_AUTONEG
|NV_PAUSEFRAME_RX_ENABLE
|NV_PAUSEFRAME_TX_ENABLE
);
4679 if (pause
->rx_pause
)
4680 np
->pause_flags
|= NV_PAUSEFRAME_RX_ENABLE
;
4681 if (pause
->tx_pause
)
4682 np
->pause_flags
|= NV_PAUSEFRAME_TX_ENABLE
;
4684 if (!netif_running(dev
))
4685 nv_update_linkspeed(dev
);
4687 nv_update_pause(dev
, np
->pause_flags
);
4690 if (netif_running(dev
)) {
4697 static u32
nv_get_rx_csum(struct net_device
*dev
)
4699 struct fe_priv
*np
= netdev_priv(dev
);
4700 return (np
->rx_csum
) != 0;
4703 static int nv_set_rx_csum(struct net_device
*dev
, u32 data
)
4705 struct fe_priv
*np
= netdev_priv(dev
);
4706 u8 __iomem
*base
= get_hwbase(dev
);
4709 if (np
->driver_data
& DEV_HAS_CHECKSUM
) {
4712 np
->txrxctl_bits
|= NVREG_TXRXCTL_RXCHECK
;
4715 /* vlan is dependent on rx checksum offload */
4716 if (!(np
->vlanctl_bits
& NVREG_VLANCONTROL_ENABLE
))
4717 np
->txrxctl_bits
&= ~NVREG_TXRXCTL_RXCHECK
;
4719 if (netif_running(dev
)) {
4720 spin_lock_irq(&np
->lock
);
4721 writel(np
->txrxctl_bits
, base
+ NvRegTxRxControl
);
4722 spin_unlock_irq(&np
->lock
);
4731 static int nv_set_tx_csum(struct net_device
*dev
, u32 data
)
4733 struct fe_priv
*np
= netdev_priv(dev
);
4735 if (np
->driver_data
& DEV_HAS_CHECKSUM
)
4736 return ethtool_op_set_tx_hw_csum(dev
, data
);
4741 static int nv_set_sg(struct net_device
*dev
, u32 data
)
4743 struct fe_priv
*np
= netdev_priv(dev
);
4745 if (np
->driver_data
& DEV_HAS_CHECKSUM
)
4746 return ethtool_op_set_sg(dev
, data
);
4751 static int nv_get_sset_count(struct net_device
*dev
, int sset
)
4753 struct fe_priv
*np
= netdev_priv(dev
);
4757 if (np
->driver_data
& DEV_HAS_TEST_EXTENDED
)
4758 return NV_TEST_COUNT_EXTENDED
;
4760 return NV_TEST_COUNT_BASE
;
4762 if (np
->driver_data
& DEV_HAS_STATISTICS_V1
)
4763 return NV_DEV_STATISTICS_V1_COUNT
;
4764 else if (np
->driver_data
& DEV_HAS_STATISTICS_V2
)
4765 return NV_DEV_STATISTICS_V2_COUNT
;
4766 else if (np
->driver_data
& DEV_HAS_STATISTICS_V3
)
4767 return NV_DEV_STATISTICS_V3_COUNT
;
4775 static void nv_get_ethtool_stats(struct net_device
*dev
, struct ethtool_stats
*estats
, u64
*buffer
)
4777 struct fe_priv
*np
= netdev_priv(dev
);
4780 nv_do_stats_poll((unsigned long)dev
);
4782 memcpy(buffer
, &np
->estats
, nv_get_sset_count(dev
, ETH_SS_STATS
)*sizeof(u64
));
4785 static int nv_link_test(struct net_device
*dev
)
4787 struct fe_priv
*np
= netdev_priv(dev
);
4790 mii_rw(dev
, np
->phyaddr
, MII_BMSR
, MII_READ
);
4791 mii_status
= mii_rw(dev
, np
->phyaddr
, MII_BMSR
, MII_READ
);
4793 /* check phy link status */
4794 if (!(mii_status
& BMSR_LSTATUS
))
4800 static int nv_register_test(struct net_device
*dev
)
4802 u8 __iomem
*base
= get_hwbase(dev
);
4804 u32 orig_read
, new_read
;
4807 orig_read
= readl(base
+ nv_registers_test
[i
].reg
);
4809 /* xor with mask to toggle bits */
4810 orig_read
^= nv_registers_test
[i
].mask
;
4812 writel(orig_read
, base
+ nv_registers_test
[i
].reg
);
4814 new_read
= readl(base
+ nv_registers_test
[i
].reg
);
4816 if ((new_read
& nv_registers_test
[i
].mask
) != (orig_read
& nv_registers_test
[i
].mask
))
4819 /* restore original value */
4820 orig_read
^= nv_registers_test
[i
].mask
;
4821 writel(orig_read
, base
+ nv_registers_test
[i
].reg
);
4823 } while (nv_registers_test
[++i
].reg
!= 0);
4828 static int nv_interrupt_test(struct net_device
*dev
)
4830 struct fe_priv
*np
= netdev_priv(dev
);
4831 u8 __iomem
*base
= get_hwbase(dev
);
4834 u32 save_msi_flags
, save_poll_interval
= 0;
4836 if (netif_running(dev
)) {
4837 /* free current irq */
4839 save_poll_interval
= readl(base
+NvRegPollingInterval
);
4842 /* flag to test interrupt handler */
4845 /* setup test irq */
4846 save_msi_flags
= np
->msi_flags
;
4847 np
->msi_flags
&= ~NV_MSI_X_VECTORS_MASK
;
4848 np
->msi_flags
|= 0x001; /* setup 1 vector */
4849 if (nv_request_irq(dev
, 1))
4852 /* setup timer interrupt */
4853 writel(NVREG_POLL_DEFAULT_CPU
, base
+ NvRegPollingInterval
);
4854 writel(NVREG_UNKSETUP6_VAL
, base
+ NvRegUnknownSetupReg6
);
4856 nv_enable_hw_interrupts(dev
, NVREG_IRQ_TIMER
);
4858 /* wait for at least one interrupt */
4861 spin_lock_irq(&np
->lock
);
4863 /* flag should be set within ISR */
4864 testcnt
= np
->intr_test
;
4868 nv_disable_hw_interrupts(dev
, NVREG_IRQ_TIMER
);
4869 if (!(np
->msi_flags
& NV_MSI_X_ENABLED
))
4870 writel(NVREG_IRQSTAT_MASK
, base
+ NvRegIrqStatus
);
4872 writel(NVREG_IRQSTAT_MASK
, base
+ NvRegMSIXIrqStatus
);
4874 spin_unlock_irq(&np
->lock
);
4878 np
->msi_flags
= save_msi_flags
;
4880 if (netif_running(dev
)) {
4881 writel(save_poll_interval
, base
+ NvRegPollingInterval
);
4882 writel(NVREG_UNKSETUP6_VAL
, base
+ NvRegUnknownSetupReg6
);
4883 /* restore original irq */
4884 if (nv_request_irq(dev
, 0))
4891 static int nv_loopback_test(struct net_device
*dev
)
4893 struct fe_priv
*np
= netdev_priv(dev
);
4894 u8 __iomem
*base
= get_hwbase(dev
);
4895 struct sk_buff
*tx_skb
, *rx_skb
;
4896 dma_addr_t test_dma_addr
;
4897 u32 tx_flags_extra
= (np
->desc_ver
== DESC_VER_1
? NV_TX_LASTPACKET
: NV_TX2_LASTPACKET
);
4899 int len
, i
, pkt_len
;
4901 u32 filter_flags
= 0;
4902 u32 misc1_flags
= 0;
4905 if (netif_running(dev
)) {
4906 nv_disable_irq(dev
);
4907 filter_flags
= readl(base
+ NvRegPacketFilterFlags
);
4908 misc1_flags
= readl(base
+ NvRegMisc1
);
4913 /* reinit driver view of the rx queue */
4917 /* setup hardware for loopback */
4918 writel(NVREG_MISC1_FORCE
, base
+ NvRegMisc1
);
4919 writel(NVREG_PFF_ALWAYS
| NVREG_PFF_LOOPBACK
, base
+ NvRegPacketFilterFlags
);
4921 /* reinit nic view of the rx queue */
4922 writel(np
->rx_buf_sz
, base
+ NvRegOffloadConfig
);
4923 setup_hw_rings(dev
, NV_SETUP_RX_RING
| NV_SETUP_TX_RING
);
4924 writel( ((np
->rx_ring_size
-1) << NVREG_RINGSZ_RXSHIFT
) + ((np
->tx_ring_size
-1) << NVREG_RINGSZ_TXSHIFT
),
4925 base
+ NvRegRingSizes
);
4928 /* restart rx engine */
4931 /* setup packet for tx */
4932 pkt_len
= ETH_DATA_LEN
;
4933 tx_skb
= dev_alloc_skb(pkt_len
);
4935 printk(KERN_ERR
"dev_alloc_skb() failed during loopback test"
4936 " of %s\n", dev
->name
);
4940 test_dma_addr
= pci_map_single(np
->pci_dev
, tx_skb
->data
,
4941 skb_tailroom(tx_skb
),
4942 PCI_DMA_FROMDEVICE
);
4943 pkt_data
= skb_put(tx_skb
, pkt_len
);
4944 for (i
= 0; i
< pkt_len
; i
++)
4945 pkt_data
[i
] = (u8
)(i
& 0xff);
4947 if (!nv_optimized(np
)) {
4948 np
->tx_ring
.orig
[0].buf
= cpu_to_le32(test_dma_addr
);
4949 np
->tx_ring
.orig
[0].flaglen
= cpu_to_le32((pkt_len
-1) | np
->tx_flags
| tx_flags_extra
);
4951 np
->tx_ring
.ex
[0].bufhigh
= cpu_to_le32(dma_high(test_dma_addr
));
4952 np
->tx_ring
.ex
[0].buflow
= cpu_to_le32(dma_low(test_dma_addr
));
4953 np
->tx_ring
.ex
[0].flaglen
= cpu_to_le32((pkt_len
-1) | np
->tx_flags
| tx_flags_extra
);
4955 writel(NVREG_TXRXCTL_KICK
|np
->txrxctl_bits
, get_hwbase(dev
) + NvRegTxRxControl
);
4956 pci_push(get_hwbase(dev
));
4960 /* check for rx of the packet */
4961 if (!nv_optimized(np
)) {
4962 flags
= le32_to_cpu(np
->rx_ring
.orig
[0].flaglen
);
4963 len
= nv_descr_getlength(&np
->rx_ring
.orig
[0], np
->desc_ver
);
4966 flags
= le32_to_cpu(np
->rx_ring
.ex
[0].flaglen
);
4967 len
= nv_descr_getlength_ex(&np
->rx_ring
.ex
[0], np
->desc_ver
);
4970 if (flags
& NV_RX_AVAIL
) {
4972 } else if (np
->desc_ver
== DESC_VER_1
) {
4973 if (flags
& NV_RX_ERROR
)
4976 if (flags
& NV_RX2_ERROR
) {
4982 if (len
!= pkt_len
) {
4984 dprintk(KERN_DEBUG
"%s: loopback len mismatch %d vs %d\n",
4985 dev
->name
, len
, pkt_len
);
4987 rx_skb
= np
->rx_skb
[0].skb
;
4988 for (i
= 0; i
< pkt_len
; i
++) {
4989 if (rx_skb
->data
[i
] != (u8
)(i
& 0xff)) {
4991 dprintk(KERN_DEBUG
"%s: loopback pattern check failed on byte %d\n",
4998 dprintk(KERN_DEBUG
"%s: loopback - did not receive test packet\n", dev
->name
);
5001 pci_unmap_page(np
->pci_dev
, test_dma_addr
,
5002 (skb_end_pointer(tx_skb
) - tx_skb
->data
),
5004 dev_kfree_skb_any(tx_skb
);
5009 /* drain rx queue */
5012 if (netif_running(dev
)) {
5013 writel(misc1_flags
, base
+ NvRegMisc1
);
5014 writel(filter_flags
, base
+ NvRegPacketFilterFlags
);
5021 static void nv_self_test(struct net_device
*dev
, struct ethtool_test
*test
, u64
*buffer
)
5023 struct fe_priv
*np
= netdev_priv(dev
);
5024 u8 __iomem
*base
= get_hwbase(dev
);
5026 memset(buffer
, 0, nv_get_sset_count(dev
, ETH_SS_TEST
)*sizeof(u64
));
5028 if (!nv_link_test(dev
)) {
5029 test
->flags
|= ETH_TEST_FL_FAILED
;
5033 if (test
->flags
& ETH_TEST_FL_OFFLINE
) {
5034 if (netif_running(dev
)) {
5035 netif_stop_queue(dev
);
5036 #ifdef CONFIG_FORCEDETH_NAPI
5037 napi_disable(&np
->napi
);
5039 netif_tx_lock_bh(dev
);
5040 netif_addr_lock(dev
);
5041 spin_lock_irq(&np
->lock
);
5042 nv_disable_hw_interrupts(dev
, np
->irqmask
);
5043 if (!(np
->msi_flags
& NV_MSI_X_ENABLED
)) {
5044 writel(NVREG_IRQSTAT_MASK
, base
+ NvRegIrqStatus
);
5046 writel(NVREG_IRQSTAT_MASK
, base
+ NvRegMSIXIrqStatus
);
5051 /* drain rx queue */
5053 spin_unlock_irq(&np
->lock
);
5054 netif_addr_unlock(dev
);
5055 netif_tx_unlock_bh(dev
);
5058 if (!nv_register_test(dev
)) {
5059 test
->flags
|= ETH_TEST_FL_FAILED
;
5063 result
= nv_interrupt_test(dev
);
5065 test
->flags
|= ETH_TEST_FL_FAILED
;
5073 if (!nv_loopback_test(dev
)) {
5074 test
->flags
|= ETH_TEST_FL_FAILED
;
5078 if (netif_running(dev
)) {
5079 /* reinit driver view of the rx queue */
5081 if (nv_init_ring(dev
)) {
5082 if (!np
->in_shutdown
)
5083 mod_timer(&np
->oom_kick
, jiffies
+ OOM_REFILL
);
5085 /* reinit nic view of the rx queue */
5086 writel(np
->rx_buf_sz
, base
+ NvRegOffloadConfig
);
5087 setup_hw_rings(dev
, NV_SETUP_RX_RING
| NV_SETUP_TX_RING
);
5088 writel( ((np
->rx_ring_size
-1) << NVREG_RINGSZ_RXSHIFT
) + ((np
->tx_ring_size
-1) << NVREG_RINGSZ_TXSHIFT
),
5089 base
+ NvRegRingSizes
);
5091 writel(NVREG_TXRXCTL_KICK
|np
->txrxctl_bits
, get_hwbase(dev
) + NvRegTxRxControl
);
5093 /* restart rx engine */
5095 netif_start_queue(dev
);
5096 #ifdef CONFIG_FORCEDETH_NAPI
5097 napi_enable(&np
->napi
);
5099 nv_enable_hw_interrupts(dev
, np
->irqmask
);
5104 static void nv_get_strings(struct net_device
*dev
, u32 stringset
, u8
*buffer
)
5106 switch (stringset
) {
5108 memcpy(buffer
, &nv_estats_str
, nv_get_sset_count(dev
, ETH_SS_STATS
)*sizeof(struct nv_ethtool_str
));
5111 memcpy(buffer
, &nv_etests_str
, nv_get_sset_count(dev
, ETH_SS_TEST
)*sizeof(struct nv_ethtool_str
));
5116 static const struct ethtool_ops ops
= {
5117 .get_drvinfo
= nv_get_drvinfo
,
5118 .get_link
= ethtool_op_get_link
,
5119 .get_wol
= nv_get_wol
,
5120 .set_wol
= nv_set_wol
,
5121 .get_settings
= nv_get_settings
,
5122 .set_settings
= nv_set_settings
,
5123 .get_regs_len
= nv_get_regs_len
,
5124 .get_regs
= nv_get_regs
,
5125 .nway_reset
= nv_nway_reset
,
5126 .set_tso
= nv_set_tso
,
5127 .get_ringparam
= nv_get_ringparam
,
5128 .set_ringparam
= nv_set_ringparam
,
5129 .get_pauseparam
= nv_get_pauseparam
,
5130 .set_pauseparam
= nv_set_pauseparam
,
5131 .get_rx_csum
= nv_get_rx_csum
,
5132 .set_rx_csum
= nv_set_rx_csum
,
5133 .set_tx_csum
= nv_set_tx_csum
,
5134 .set_sg
= nv_set_sg
,
5135 .get_strings
= nv_get_strings
,
5136 .get_ethtool_stats
= nv_get_ethtool_stats
,
5137 .get_sset_count
= nv_get_sset_count
,
5138 .self_test
= nv_self_test
,
5141 static void nv_vlan_rx_register(struct net_device
*dev
, struct vlan_group
*grp
)
5143 struct fe_priv
*np
= get_nvpriv(dev
);
5145 spin_lock_irq(&np
->lock
);
5147 /* save vlan group */
5151 /* enable vlan on MAC */
5152 np
->txrxctl_bits
|= NVREG_TXRXCTL_VLANSTRIP
| NVREG_TXRXCTL_VLANINS
;
5154 /* disable vlan on MAC */
5155 np
->txrxctl_bits
&= ~NVREG_TXRXCTL_VLANSTRIP
;
5156 np
->txrxctl_bits
&= ~NVREG_TXRXCTL_VLANINS
;
5159 writel(np
->txrxctl_bits
, get_hwbase(dev
) + NvRegTxRxControl
);
5161 spin_unlock_irq(&np
->lock
);
5164 /* The mgmt unit and driver use a semaphore to access the phy during init */
5165 static int nv_mgmt_acquire_sema(struct net_device
*dev
)
5167 u8 __iomem
*base
= get_hwbase(dev
);
5169 u32 tx_ctrl
, mgmt_sema
;
5171 for (i
= 0; i
< 10; i
++) {
5172 mgmt_sema
= readl(base
+ NvRegTransmitterControl
) & NVREG_XMITCTL_MGMT_SEMA_MASK
;
5173 if (mgmt_sema
== NVREG_XMITCTL_MGMT_SEMA_FREE
)
5178 if (mgmt_sema
!= NVREG_XMITCTL_MGMT_SEMA_FREE
)
5181 for (i
= 0; i
< 2; i
++) {
5182 tx_ctrl
= readl(base
+ NvRegTransmitterControl
);
5183 tx_ctrl
|= NVREG_XMITCTL_HOST_SEMA_ACQ
;
5184 writel(tx_ctrl
, base
+ NvRegTransmitterControl
);
5186 /* verify that semaphore was acquired */
5187 tx_ctrl
= readl(base
+ NvRegTransmitterControl
);
5188 if (((tx_ctrl
& NVREG_XMITCTL_HOST_SEMA_MASK
) == NVREG_XMITCTL_HOST_SEMA_ACQ
) &&
5189 ((tx_ctrl
& NVREG_XMITCTL_MGMT_SEMA_MASK
) == NVREG_XMITCTL_MGMT_SEMA_FREE
))
5198 static int nv_open(struct net_device
*dev
)
5200 struct fe_priv
*np
= netdev_priv(dev
);
5201 u8 __iomem
*base
= get_hwbase(dev
);
5206 dprintk(KERN_DEBUG
"nv_open: begin\n");
5208 /* erase previous misconfiguration */
5209 if (np
->driver_data
& DEV_HAS_POWER_CNTRL
)
5211 writel(NVREG_MCASTADDRA_FORCE
, base
+ NvRegMulticastAddrA
);
5212 writel(0, base
+ NvRegMulticastAddrB
);
5213 writel(NVREG_MCASTMASKA_NONE
, base
+ NvRegMulticastMaskA
);
5214 writel(NVREG_MCASTMASKB_NONE
, base
+ NvRegMulticastMaskB
);
5215 writel(0, base
+ NvRegPacketFilterFlags
);
5217 writel(0, base
+ NvRegTransmitterControl
);
5218 writel(0, base
+ NvRegReceiverControl
);
5220 writel(0, base
+ NvRegAdapterControl
);
5222 if (np
->pause_flags
& NV_PAUSEFRAME_TX_CAPABLE
)
5223 writel(NVREG_TX_PAUSEFRAME_DISABLE
, base
+ NvRegTxPauseFrame
);
5225 /* initialize descriptor rings */
5227 oom
= nv_init_ring(dev
);
5229 writel(0, base
+ NvRegLinkSpeed
);
5230 writel(readl(base
+ NvRegTransmitPoll
) & NVREG_TRANSMITPOLL_MAC_ADDR_REV
, base
+ NvRegTransmitPoll
);
5232 writel(0, base
+ NvRegUnknownSetupReg6
);
5234 np
->in_shutdown
= 0;
5237 setup_hw_rings(dev
, NV_SETUP_RX_RING
| NV_SETUP_TX_RING
);
5238 writel( ((np
->rx_ring_size
-1) << NVREG_RINGSZ_RXSHIFT
) + ((np
->tx_ring_size
-1) << NVREG_RINGSZ_TXSHIFT
),
5239 base
+ NvRegRingSizes
);
5241 writel(np
->linkspeed
, base
+ NvRegLinkSpeed
);
5242 if (np
->desc_ver
== DESC_VER_1
)
5243 writel(NVREG_TX_WM_DESC1_DEFAULT
, base
+ NvRegTxWatermark
);
5245 writel(NVREG_TX_WM_DESC2_3_DEFAULT
, base
+ NvRegTxWatermark
);
5246 writel(np
->txrxctl_bits
, base
+ NvRegTxRxControl
);
5247 writel(np
->vlanctl_bits
, base
+ NvRegVlanControl
);
5249 writel(NVREG_TXRXCTL_BIT1
|np
->txrxctl_bits
, base
+ NvRegTxRxControl
);
5250 reg_delay(dev
, NvRegUnknownSetupReg5
, NVREG_UNKSETUP5_BIT31
, NVREG_UNKSETUP5_BIT31
,
5251 NV_SETUP5_DELAY
, NV_SETUP5_DELAYMAX
,
5252 KERN_INFO
"open: SetupReg5, Bit 31 remained off\n");
5254 writel(0, base
+ NvRegMIIMask
);
5255 writel(NVREG_IRQSTAT_MASK
, base
+ NvRegIrqStatus
);
5256 writel(NVREG_MIISTAT_MASK_ALL
, base
+ NvRegMIIStatus
);
5258 writel(NVREG_MISC1_FORCE
| NVREG_MISC1_HD
, base
+ NvRegMisc1
);
5259 writel(readl(base
+ NvRegTransmitterStatus
), base
+ NvRegTransmitterStatus
);
5260 writel(NVREG_PFF_ALWAYS
, base
+ NvRegPacketFilterFlags
);
5261 writel(np
->rx_buf_sz
, base
+ NvRegOffloadConfig
);
5263 writel(readl(base
+ NvRegReceiverStatus
), base
+ NvRegReceiverStatus
);
5265 get_random_bytes(&low
, sizeof(low
));
5266 low
&= NVREG_SLOTTIME_MASK
;
5267 if (np
->desc_ver
== DESC_VER_1
) {
5268 writel(low
|NVREG_SLOTTIME_DEFAULT
, base
+ NvRegSlotTime
);
5270 if (!(np
->driver_data
& DEV_HAS_GEAR_MODE
)) {
5271 /* setup legacy backoff */
5272 writel(NVREG_SLOTTIME_LEGBF_ENABLED
|NVREG_SLOTTIME_10_100_FULL
|low
, base
+ NvRegSlotTime
);
5274 writel(NVREG_SLOTTIME_10_100_FULL
, base
+ NvRegSlotTime
);
5275 nv_gear_backoff_reseed(dev
);
5278 writel(NVREG_TX_DEFERRAL_DEFAULT
, base
+ NvRegTxDeferral
);
5279 writel(NVREG_RX_DEFERRAL_DEFAULT
, base
+ NvRegRxDeferral
);
5280 if (poll_interval
== -1) {
5281 if (optimization_mode
== NV_OPTIMIZATION_MODE_THROUGHPUT
)
5282 writel(NVREG_POLL_DEFAULT_THROUGHPUT
, base
+ NvRegPollingInterval
);
5284 writel(NVREG_POLL_DEFAULT_CPU
, base
+ NvRegPollingInterval
);
5287 writel(poll_interval
& 0xFFFF, base
+ NvRegPollingInterval
);
5288 writel(NVREG_UNKSETUP6_VAL
, base
+ NvRegUnknownSetupReg6
);
5289 writel((np
->phyaddr
<< NVREG_ADAPTCTL_PHYSHIFT
)|NVREG_ADAPTCTL_PHYVALID
|NVREG_ADAPTCTL_RUNNING
,
5290 base
+ NvRegAdapterControl
);
5291 writel(NVREG_MIISPEED_BIT8
|NVREG_MIIDELAY
, base
+ NvRegMIISpeed
);
5292 writel(NVREG_MII_LINKCHANGE
, base
+ NvRegMIIMask
);
5294 writel(NVREG_WAKEUPFLAGS_ENABLE
, base
+ NvRegWakeUpFlags
);
5296 i
= readl(base
+ NvRegPowerState
);
5297 if ( (i
& NVREG_POWERSTATE_POWEREDUP
) == 0)
5298 writel(NVREG_POWERSTATE_POWEREDUP
|i
, base
+ NvRegPowerState
);
5302 writel(readl(base
+ NvRegPowerState
) | NVREG_POWERSTATE_VALID
, base
+ NvRegPowerState
);
5304 nv_disable_hw_interrupts(dev
, np
->irqmask
);
5306 writel(NVREG_MIISTAT_MASK_ALL
, base
+ NvRegMIIStatus
);
5307 writel(NVREG_IRQSTAT_MASK
, base
+ NvRegIrqStatus
);
5310 if (nv_request_irq(dev
, 0)) {
5314 /* ask for interrupts */
5315 nv_enable_hw_interrupts(dev
, np
->irqmask
);
5317 spin_lock_irq(&np
->lock
);
5318 writel(NVREG_MCASTADDRA_FORCE
, base
+ NvRegMulticastAddrA
);
5319 writel(0, base
+ NvRegMulticastAddrB
);
5320 writel(NVREG_MCASTMASKA_NONE
, base
+ NvRegMulticastMaskA
);
5321 writel(NVREG_MCASTMASKB_NONE
, base
+ NvRegMulticastMaskB
);
5322 writel(NVREG_PFF_ALWAYS
|NVREG_PFF_MYADDR
, base
+ NvRegPacketFilterFlags
);
5323 /* One manual link speed update: Interrupts are enabled, future link
5324 * speed changes cause interrupts and are handled by nv_link_irq().
5328 miistat
= readl(base
+ NvRegMIIStatus
);
5329 writel(NVREG_MIISTAT_MASK_ALL
, base
+ NvRegMIIStatus
);
5330 dprintk(KERN_INFO
"startup: got 0x%08x.\n", miistat
);
5332 /* set linkspeed to invalid value, thus force nv_update_linkspeed
5335 ret
= nv_update_linkspeed(dev
);
5337 netif_start_queue(dev
);
5338 #ifdef CONFIG_FORCEDETH_NAPI
5339 napi_enable(&np
->napi
);
5343 netif_carrier_on(dev
);
5345 printk(KERN_INFO
"%s: no link during initialization.\n", dev
->name
);
5346 netif_carrier_off(dev
);
5349 mod_timer(&np
->oom_kick
, jiffies
+ OOM_REFILL
);
5351 /* start statistics timer */
5352 if (np
->driver_data
& (DEV_HAS_STATISTICS_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_STATISTICS_V3
))
5353 mod_timer(&np
->stats_poll
,
5354 round_jiffies(jiffies
+ STATS_INTERVAL
));
5356 spin_unlock_irq(&np
->lock
);
5364 static int nv_close(struct net_device
*dev
)
5366 struct fe_priv
*np
= netdev_priv(dev
);
5369 spin_lock_irq(&np
->lock
);
5370 np
->in_shutdown
= 1;
5371 spin_unlock_irq(&np
->lock
);
5372 #ifdef CONFIG_FORCEDETH_NAPI
5373 napi_disable(&np
->napi
);
5375 synchronize_irq(np
->pci_dev
->irq
);
5377 del_timer_sync(&np
->oom_kick
);
5378 del_timer_sync(&np
->nic_poll
);
5379 del_timer_sync(&np
->stats_poll
);
5381 netif_stop_queue(dev
);
5382 spin_lock_irq(&np
->lock
);
5386 /* disable interrupts on the nic or we will lock up */
5387 base
= get_hwbase(dev
);
5388 nv_disable_hw_interrupts(dev
, np
->irqmask
);
5390 dprintk(KERN_INFO
"%s: Irqmask is zero again\n", dev
->name
);
5392 spin_unlock_irq(&np
->lock
);
5398 if (np
->wolenabled
) {
5399 writel(NVREG_PFF_ALWAYS
|NVREG_PFF_MYADDR
, base
+ NvRegPacketFilterFlags
);
5403 /* FIXME: power down nic */
5408 static int __devinit
nv_probe(struct pci_dev
*pci_dev
, const struct pci_device_id
*id
)
5410 struct net_device
*dev
;
5415 u32 powerstate
, txreg
;
5416 u32 phystate_orig
= 0, phystate
;
5417 int phyinitialized
= 0;
5418 DECLARE_MAC_BUF(mac
);
5419 static int printed_version
;
5421 if (!printed_version
++)
5422 printk(KERN_INFO
"%s: Reverse Engineered nForce ethernet"
5423 " driver. Version %s.\n", DRV_NAME
, FORCEDETH_VERSION
);
5425 dev
= alloc_etherdev(sizeof(struct fe_priv
));
5430 np
= netdev_priv(dev
);
5432 np
->pci_dev
= pci_dev
;
5433 spin_lock_init(&np
->lock
);
5434 SET_NETDEV_DEV(dev
, &pci_dev
->dev
);
5436 init_timer(&np
->oom_kick
);
5437 np
->oom_kick
.data
= (unsigned long) dev
;
5438 np
->oom_kick
.function
= &nv_do_rx_refill
; /* timer handler */
5439 init_timer(&np
->nic_poll
);
5440 np
->nic_poll
.data
= (unsigned long) dev
;
5441 np
->nic_poll
.function
= &nv_do_nic_poll
; /* timer handler */
5442 init_timer(&np
->stats_poll
);
5443 np
->stats_poll
.data
= (unsigned long) dev
;
5444 np
->stats_poll
.function
= &nv_do_stats_poll
; /* timer handler */
5446 err
= pci_enable_device(pci_dev
);
5450 pci_set_master(pci_dev
);
5452 err
= pci_request_regions(pci_dev
, DRV_NAME
);
5456 if (id
->driver_data
& (DEV_HAS_VLAN
|DEV_HAS_MSI_X
|DEV_HAS_POWER_CNTRL
|DEV_HAS_STATISTICS_V2
|DEV_HAS_STATISTICS_V3
))
5457 np
->register_size
= NV_PCI_REGSZ_VER3
;
5458 else if (id
->driver_data
& DEV_HAS_STATISTICS_V1
)
5459 np
->register_size
= NV_PCI_REGSZ_VER2
;
5461 np
->register_size
= NV_PCI_REGSZ_VER1
;
5465 for (i
= 0; i
< DEVICE_COUNT_RESOURCE
; i
++) {
5466 dprintk(KERN_DEBUG
"%s: resource %d start %p len %ld flags 0x%08lx.\n",
5467 pci_name(pci_dev
), i
, (void*)pci_resource_start(pci_dev
, i
),
5468 pci_resource_len(pci_dev
, i
),
5469 pci_resource_flags(pci_dev
, i
));
5470 if (pci_resource_flags(pci_dev
, i
) & IORESOURCE_MEM
&&
5471 pci_resource_len(pci_dev
, i
) >= np
->register_size
) {
5472 addr
= pci_resource_start(pci_dev
, i
);
5476 if (i
== DEVICE_COUNT_RESOURCE
) {
5477 dev_printk(KERN_INFO
, &pci_dev
->dev
,
5478 "Couldn't find register window\n");
5482 /* copy of driver data */
5483 np
->driver_data
= id
->driver_data
;
5484 /* copy of device id */
5485 np
->device_id
= id
->device
;
5487 /* handle different descriptor versions */
5488 if (id
->driver_data
& DEV_HAS_HIGH_DMA
) {
5489 /* packet format 3: supports 40-bit addressing */
5490 np
->desc_ver
= DESC_VER_3
;
5491 np
->txrxctl_bits
= NVREG_TXRXCTL_DESC_3
;
5493 if (pci_set_dma_mask(pci_dev
, DMA_39BIT_MASK
))
5494 dev_printk(KERN_INFO
, &pci_dev
->dev
,
5495 "64-bit DMA failed, using 32-bit addressing\n");
5497 dev
->features
|= NETIF_F_HIGHDMA
;
5498 if (pci_set_consistent_dma_mask(pci_dev
, DMA_39BIT_MASK
)) {
5499 dev_printk(KERN_INFO
, &pci_dev
->dev
,
5500 "64-bit DMA (consistent) failed, using 32-bit ring buffers\n");
5503 } else if (id
->driver_data
& DEV_HAS_LARGEDESC
) {
5504 /* packet format 2: supports jumbo frames */
5505 np
->desc_ver
= DESC_VER_2
;
5506 np
->txrxctl_bits
= NVREG_TXRXCTL_DESC_2
;
5508 /* original packet format */
5509 np
->desc_ver
= DESC_VER_1
;
5510 np
->txrxctl_bits
= NVREG_TXRXCTL_DESC_1
;
5513 np
->pkt_limit
= NV_PKTLIMIT_1
;
5514 if (id
->driver_data
& DEV_HAS_LARGEDESC
)
5515 np
->pkt_limit
= NV_PKTLIMIT_2
;
5517 if (id
->driver_data
& DEV_HAS_CHECKSUM
) {
5519 np
->txrxctl_bits
|= NVREG_TXRXCTL_RXCHECK
;
5520 dev
->features
|= NETIF_F_HW_CSUM
| NETIF_F_SG
;
5521 dev
->features
|= NETIF_F_TSO
;
5524 np
->vlanctl_bits
= 0;
5525 if (id
->driver_data
& DEV_HAS_VLAN
) {
5526 np
->vlanctl_bits
= NVREG_VLANCONTROL_ENABLE
;
5527 dev
->features
|= NETIF_F_HW_VLAN_RX
| NETIF_F_HW_VLAN_TX
;
5528 dev
->vlan_rx_register
= nv_vlan_rx_register
;
5532 if ((id
->driver_data
& DEV_HAS_MSI
) && msi
) {
5533 np
->msi_flags
|= NV_MSI_CAPABLE
;
5535 if ((id
->driver_data
& DEV_HAS_MSI_X
) && msix
) {
5536 np
->msi_flags
|= NV_MSI_X_CAPABLE
;
5539 np
->pause_flags
= NV_PAUSEFRAME_RX_CAPABLE
| NV_PAUSEFRAME_RX_REQ
| NV_PAUSEFRAME_AUTONEG
;
5540 if ((id
->driver_data
& DEV_HAS_PAUSEFRAME_TX_V1
) ||
5541 (id
->driver_data
& DEV_HAS_PAUSEFRAME_TX_V2
) ||
5542 (id
->driver_data
& DEV_HAS_PAUSEFRAME_TX_V3
)) {
5543 np
->pause_flags
|= NV_PAUSEFRAME_TX_CAPABLE
| NV_PAUSEFRAME_TX_REQ
;
5548 np
->base
= ioremap(addr
, np
->register_size
);
5551 dev
->base_addr
= (unsigned long)np
->base
;
5553 dev
->irq
= pci_dev
->irq
;
5555 np
->rx_ring_size
= RX_RING_DEFAULT
;
5556 np
->tx_ring_size
= TX_RING_DEFAULT
;
5558 if (!nv_optimized(np
)) {
5559 np
->rx_ring
.orig
= pci_alloc_consistent(pci_dev
,
5560 sizeof(struct ring_desc
) * (np
->rx_ring_size
+ np
->tx_ring_size
),
5562 if (!np
->rx_ring
.orig
)
5564 np
->tx_ring
.orig
= &np
->rx_ring
.orig
[np
->rx_ring_size
];
5566 np
->rx_ring
.ex
= pci_alloc_consistent(pci_dev
,
5567 sizeof(struct ring_desc_ex
) * (np
->rx_ring_size
+ np
->tx_ring_size
),
5569 if (!np
->rx_ring
.ex
)
5571 np
->tx_ring
.ex
= &np
->rx_ring
.ex
[np
->rx_ring_size
];
5573 np
->rx_skb
= kcalloc(np
->rx_ring_size
, sizeof(struct nv_skb_map
), GFP_KERNEL
);
5574 np
->tx_skb
= kcalloc(np
->tx_ring_size
, sizeof(struct nv_skb_map
), GFP_KERNEL
);
5575 if (!np
->rx_skb
|| !np
->tx_skb
)
5578 dev
->open
= nv_open
;
5579 dev
->stop
= nv_close
;
5581 if (!nv_optimized(np
))
5582 dev
->hard_start_xmit
= nv_start_xmit
;
5584 dev
->hard_start_xmit
= nv_start_xmit_optimized
;
5585 dev
->get_stats
= nv_get_stats
;
5586 dev
->change_mtu
= nv_change_mtu
;
5587 dev
->set_mac_address
= nv_set_mac_address
;
5588 dev
->set_multicast_list
= nv_set_multicast
;
5589 #ifdef CONFIG_NET_POLL_CONTROLLER
5590 dev
->poll_controller
= nv_poll_controller
;
5592 #ifdef CONFIG_FORCEDETH_NAPI
5593 netif_napi_add(dev
, &np
->napi
, nv_napi_poll
, RX_WORK_PER_LOOP
);
5595 SET_ETHTOOL_OPS(dev
, &ops
);
5596 dev
->tx_timeout
= nv_tx_timeout
;
5597 dev
->watchdog_timeo
= NV_WATCHDOG_TIMEO
;
5599 pci_set_drvdata(pci_dev
, dev
);
5601 /* read the mac address */
5602 base
= get_hwbase(dev
);
5603 np
->orig_mac
[0] = readl(base
+ NvRegMacAddrA
);
5604 np
->orig_mac
[1] = readl(base
+ NvRegMacAddrB
);
5606 /* check the workaround bit for correct mac address order */
5607 txreg
= readl(base
+ NvRegTransmitPoll
);
5608 if (id
->driver_data
& DEV_HAS_CORRECT_MACADDR
) {
5609 /* mac address is already in correct order */
5610 dev
->dev_addr
[0] = (np
->orig_mac
[0] >> 0) & 0xff;
5611 dev
->dev_addr
[1] = (np
->orig_mac
[0] >> 8) & 0xff;
5612 dev
->dev_addr
[2] = (np
->orig_mac
[0] >> 16) & 0xff;
5613 dev
->dev_addr
[3] = (np
->orig_mac
[0] >> 24) & 0xff;
5614 dev
->dev_addr
[4] = (np
->orig_mac
[1] >> 0) & 0xff;
5615 dev
->dev_addr
[5] = (np
->orig_mac
[1] >> 8) & 0xff;
5616 } else if (txreg
& NVREG_TRANSMITPOLL_MAC_ADDR_REV
) {
5617 /* mac address is already in correct order */
5618 dev
->dev_addr
[0] = (np
->orig_mac
[0] >> 0) & 0xff;
5619 dev
->dev_addr
[1] = (np
->orig_mac
[0] >> 8) & 0xff;
5620 dev
->dev_addr
[2] = (np
->orig_mac
[0] >> 16) & 0xff;
5621 dev
->dev_addr
[3] = (np
->orig_mac
[0] >> 24) & 0xff;
5622 dev
->dev_addr
[4] = (np
->orig_mac
[1] >> 0) & 0xff;
5623 dev
->dev_addr
[5] = (np
->orig_mac
[1] >> 8) & 0xff;
5625 * Set orig mac address back to the reversed version.
5626 * This flag will be cleared during low power transition.
5627 * Therefore, we should always put back the reversed address.
5629 np
->orig_mac
[0] = (dev
->dev_addr
[5] << 0) + (dev
->dev_addr
[4] << 8) +
5630 (dev
->dev_addr
[3] << 16) + (dev
->dev_addr
[2] << 24);
5631 np
->orig_mac
[1] = (dev
->dev_addr
[1] << 0) + (dev
->dev_addr
[0] << 8);
5633 /* need to reverse mac address to correct order */
5634 dev
->dev_addr
[0] = (np
->orig_mac
[1] >> 8) & 0xff;
5635 dev
->dev_addr
[1] = (np
->orig_mac
[1] >> 0) & 0xff;
5636 dev
->dev_addr
[2] = (np
->orig_mac
[0] >> 24) & 0xff;
5637 dev
->dev_addr
[3] = (np
->orig_mac
[0] >> 16) & 0xff;
5638 dev
->dev_addr
[4] = (np
->orig_mac
[0] >> 8) & 0xff;
5639 dev
->dev_addr
[5] = (np
->orig_mac
[0] >> 0) & 0xff;
5640 writel(txreg
|NVREG_TRANSMITPOLL_MAC_ADDR_REV
, base
+ NvRegTransmitPoll
);
5642 memcpy(dev
->perm_addr
, dev
->dev_addr
, dev
->addr_len
);
5644 if (!is_valid_ether_addr(dev
->perm_addr
)) {
5646 * Bad mac address. At least one bios sets the mac address
5647 * to 01:23:45:67:89:ab
5649 dev_printk(KERN_ERR
, &pci_dev
->dev
,
5650 "Invalid Mac address detected: %s\n",
5651 print_mac(mac
, dev
->dev_addr
));
5652 dev_printk(KERN_ERR
, &pci_dev
->dev
,
5653 "Please complain to your hardware vendor. Switching to a random MAC.\n");
5654 dev
->dev_addr
[0] = 0x00;
5655 dev
->dev_addr
[1] = 0x00;
5656 dev
->dev_addr
[2] = 0x6c;
5657 get_random_bytes(&dev
->dev_addr
[3], 3);
5660 dprintk(KERN_DEBUG
"%s: MAC Address %s\n",
5661 pci_name(pci_dev
), print_mac(mac
, dev
->dev_addr
));
5663 /* set mac address */
5664 nv_copy_mac_to_hw(dev
);
5666 /* Workaround current PCI init glitch: wakeup bits aren't
5667 * being set from PCI PM capability.
5669 device_init_wakeup(&pci_dev
->dev
, 1);
5672 writel(0, base
+ NvRegWakeUpFlags
);
5675 if (id
->driver_data
& DEV_HAS_POWER_CNTRL
) {
5677 /* take phy and nic out of low power mode */
5678 powerstate
= readl(base
+ NvRegPowerState2
);
5679 powerstate
&= ~NVREG_POWERSTATE2_POWERUP_MASK
;
5680 if ((id
->device
== PCI_DEVICE_ID_NVIDIA_NVENET_12
||
5681 id
->device
== PCI_DEVICE_ID_NVIDIA_NVENET_13
) &&
5682 pci_dev
->revision
>= 0xA3)
5683 powerstate
|= NVREG_POWERSTATE2_POWERUP_REV_A3
;
5684 writel(powerstate
, base
+ NvRegPowerState2
);
5687 if (np
->desc_ver
== DESC_VER_1
) {
5688 np
->tx_flags
= NV_TX_VALID
;
5690 np
->tx_flags
= NV_TX2_VALID
;
5692 if (optimization_mode
== NV_OPTIMIZATION_MODE_THROUGHPUT
) {
5693 np
->irqmask
= NVREG_IRQMASK_THROUGHPUT
;
5694 if (np
->msi_flags
& NV_MSI_X_CAPABLE
) /* set number of vectors */
5695 np
->msi_flags
|= 0x0003;
5697 np
->irqmask
= NVREG_IRQMASK_CPU
;
5698 if (np
->msi_flags
& NV_MSI_X_CAPABLE
) /* set number of vectors */
5699 np
->msi_flags
|= 0x0001;
5702 if (id
->driver_data
& DEV_NEED_TIMERIRQ
)
5703 np
->irqmask
|= NVREG_IRQ_TIMER
;
5704 if (id
->driver_data
& DEV_NEED_LINKTIMER
) {
5705 dprintk(KERN_INFO
"%s: link timer on.\n", pci_name(pci_dev
));
5706 np
->need_linktimer
= 1;
5707 np
->link_timeout
= jiffies
+ LINK_TIMEOUT
;
5709 dprintk(KERN_INFO
"%s: link timer off.\n", pci_name(pci_dev
));
5710 np
->need_linktimer
= 0;
5713 /* Limit the number of tx's outstanding for hw bug */
5714 if (id
->driver_data
& DEV_NEED_TX_LIMIT
) {
5716 if ((id
->device
== PCI_DEVICE_ID_NVIDIA_NVENET_32
||
5717 id
->device
== PCI_DEVICE_ID_NVIDIA_NVENET_33
||
5718 id
->device
== PCI_DEVICE_ID_NVIDIA_NVENET_34
||
5719 id
->device
== PCI_DEVICE_ID_NVIDIA_NVENET_35
||
5720 id
->device
== PCI_DEVICE_ID_NVIDIA_NVENET_36
||
5721 id
->device
== PCI_DEVICE_ID_NVIDIA_NVENET_37
||
5722 id
->device
== PCI_DEVICE_ID_NVIDIA_NVENET_38
||
5723 id
->device
== PCI_DEVICE_ID_NVIDIA_NVENET_39
) &&
5724 pci_dev
->revision
>= 0xA2)
5728 /* clear phy state and temporarily halt phy interrupts */
5729 writel(0, base
+ NvRegMIIMask
);
5730 phystate
= readl(base
+ NvRegAdapterControl
);
5731 if (phystate
& NVREG_ADAPTCTL_RUNNING
) {
5733 phystate
&= ~NVREG_ADAPTCTL_RUNNING
;
5734 writel(phystate
, base
+ NvRegAdapterControl
);
5736 writel(NVREG_MIISTAT_MASK_ALL
, base
+ NvRegMIIStatus
);
5738 if (id
->driver_data
& DEV_HAS_MGMT_UNIT
) {
5739 /* management unit running on the mac? */
5740 if (readl(base
+ NvRegTransmitterControl
) & NVREG_XMITCTL_SYNC_PHY_INIT
) {
5741 np
->mac_in_use
= readl(base
+ NvRegTransmitterControl
) & NVREG_XMITCTL_MGMT_ST
;
5742 dprintk(KERN_INFO
"%s: mgmt unit is running. mac in use %x.\n", pci_name(pci_dev
), np
->mac_in_use
);
5743 if (nv_mgmt_acquire_sema(dev
)) {
5744 /* management unit setup the phy already? */
5745 if ((readl(base
+ NvRegTransmitterControl
) & NVREG_XMITCTL_SYNC_MASK
) ==
5746 NVREG_XMITCTL_SYNC_PHY_INIT
) {
5747 /* phy is inited by mgmt unit */
5749 dprintk(KERN_INFO
"%s: Phy already initialized by mgmt unit.\n", pci_name(pci_dev
));
5751 /* we need to init the phy */
5757 /* find a suitable phy */
5758 for (i
= 1; i
<= 32; i
++) {
5760 int phyaddr
= i
& 0x1F;
5762 spin_lock_irq(&np
->lock
);
5763 id1
= mii_rw(dev
, phyaddr
, MII_PHYSID1
, MII_READ
);
5764 spin_unlock_irq(&np
->lock
);
5765 if (id1
< 0 || id1
== 0xffff)
5767 spin_lock_irq(&np
->lock
);
5768 id2
= mii_rw(dev
, phyaddr
, MII_PHYSID2
, MII_READ
);
5769 spin_unlock_irq(&np
->lock
);
5770 if (id2
< 0 || id2
== 0xffff)
5773 np
->phy_model
= id2
& PHYID2_MODEL_MASK
;
5774 id1
= (id1
& PHYID1_OUI_MASK
) << PHYID1_OUI_SHFT
;
5775 id2
= (id2
& PHYID2_OUI_MASK
) >> PHYID2_OUI_SHFT
;
5776 dprintk(KERN_DEBUG
"%s: open: Found PHY %04x:%04x at address %d.\n",
5777 pci_name(pci_dev
), id1
, id2
, phyaddr
);
5778 np
->phyaddr
= phyaddr
;
5779 np
->phy_oui
= id1
| id2
;
5781 /* Realtek hardcoded phy id1 to all zero's on certain phys */
5782 if (np
->phy_oui
== PHY_OUI_REALTEK2
)
5783 np
->phy_oui
= PHY_OUI_REALTEK
;
5784 /* Setup phy revision for Realtek */
5785 if (np
->phy_oui
== PHY_OUI_REALTEK
&& np
->phy_model
== PHY_MODEL_REALTEK_8211
)
5786 np
->phy_rev
= mii_rw(dev
, phyaddr
, MII_RESV1
, MII_READ
) & PHY_REV_MASK
;
5791 dev_printk(KERN_INFO
, &pci_dev
->dev
,
5792 "open: Could not find a valid PHY.\n");
5796 if (!phyinitialized
) {
5800 /* see if it is a gigabit phy */
5801 u32 mii_status
= mii_rw(dev
, np
->phyaddr
, MII_BMSR
, MII_READ
);
5802 if (mii_status
& PHY_GIGABIT
) {
5803 np
->gigabit
= PHY_GIGABIT
;
5807 /* set default link speed settings */
5808 np
->linkspeed
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_10
;
5812 err
= register_netdev(dev
);
5814 dev_printk(KERN_INFO
, &pci_dev
->dev
,
5815 "unable to register netdev: %d\n", err
);
5819 dev_printk(KERN_INFO
, &pci_dev
->dev
, "ifname %s, PHY OUI 0x%x @ %d, "
5820 "addr %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
5831 dev_printk(KERN_INFO
, &pci_dev
->dev
, "%s%s%s%s%s%s%s%s%s%sdesc-v%u\n",
5832 dev
->features
& NETIF_F_HIGHDMA
? "highdma " : "",
5833 dev
->features
& (NETIF_F_HW_CSUM
| NETIF_F_SG
) ?
5835 dev
->features
& (NETIF_F_HW_VLAN_RX
| NETIF_F_HW_VLAN_TX
) ?
5837 id
->driver_data
& DEV_HAS_POWER_CNTRL
? "pwrctl " : "",
5838 id
->driver_data
& DEV_HAS_MGMT_UNIT
? "mgmt " : "",
5839 id
->driver_data
& DEV_NEED_TIMERIRQ
? "timirq " : "",
5840 np
->gigabit
== PHY_GIGABIT
? "gbit " : "",
5841 np
->need_linktimer
? "lnktim " : "",
5842 np
->msi_flags
& NV_MSI_CAPABLE
? "msi " : "",
5843 np
->msi_flags
& NV_MSI_X_CAPABLE
? "msi-x " : "",
5850 writel(phystate
|NVREG_ADAPTCTL_RUNNING
, base
+ NvRegAdapterControl
);
5851 pci_set_drvdata(pci_dev
, NULL
);
5855 iounmap(get_hwbase(dev
));
5857 pci_release_regions(pci_dev
);
5859 pci_disable_device(pci_dev
);
5866 static void nv_restore_phy(struct net_device
*dev
)
5868 struct fe_priv
*np
= netdev_priv(dev
);
5869 u16 phy_reserved
, mii_control
;
5871 if (np
->phy_oui
== PHY_OUI_REALTEK
&&
5872 np
->phy_model
== PHY_MODEL_REALTEK_8201
&&
5873 phy_cross
== NV_CROSSOVER_DETECTION_DISABLED
) {
5874 mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG1
, PHY_REALTEK_INIT3
);
5875 phy_reserved
= mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG2
, MII_READ
);
5876 phy_reserved
&= ~PHY_REALTEK_INIT_MSK1
;
5877 phy_reserved
|= PHY_REALTEK_INIT8
;
5878 mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG2
, phy_reserved
);
5879 mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG1
, PHY_REALTEK_INIT1
);
5881 /* restart auto negotiation */
5882 mii_control
= mii_rw(dev
, np
->phyaddr
, MII_BMCR
, MII_READ
);
5883 mii_control
|= (BMCR_ANRESTART
| BMCR_ANENABLE
);
5884 mii_rw(dev
, np
->phyaddr
, MII_BMCR
, mii_control
);
5888 static void __devexit
nv_remove(struct pci_dev
*pci_dev
)
5890 struct net_device
*dev
= pci_get_drvdata(pci_dev
);
5891 struct fe_priv
*np
= netdev_priv(dev
);
5892 u8 __iomem
*base
= get_hwbase(dev
);
5894 unregister_netdev(dev
);
5896 /* special op: write back the misordered MAC address - otherwise
5897 * the next nv_probe would see a wrong address.
5899 writel(np
->orig_mac
[0], base
+ NvRegMacAddrA
);
5900 writel(np
->orig_mac
[1], base
+ NvRegMacAddrB
);
5901 writel(readl(base
+ NvRegTransmitPoll
) & ~NVREG_TRANSMITPOLL_MAC_ADDR_REV
,
5902 base
+ NvRegTransmitPoll
);
5904 /* restore any phy related changes */
5905 nv_restore_phy(dev
);
5907 /* free all structures */
5909 iounmap(get_hwbase(dev
));
5910 pci_release_regions(pci_dev
);
5911 pci_disable_device(pci_dev
);
5913 pci_set_drvdata(pci_dev
, NULL
);
5917 static int nv_suspend(struct pci_dev
*pdev
, pm_message_t state
)
5919 struct net_device
*dev
= pci_get_drvdata(pdev
);
5920 struct fe_priv
*np
= netdev_priv(dev
);
5921 u8 __iomem
*base
= get_hwbase(dev
);
5924 if (netif_running(dev
)) {
5928 netif_device_detach(dev
);
5930 /* save non-pci configuration space */
5931 for (i
= 0;i
<= np
->register_size
/sizeof(u32
); i
++)
5932 np
->saved_config_space
[i
] = readl(base
+ i
*sizeof(u32
));
5934 pci_save_state(pdev
);
5935 pci_enable_wake(pdev
, pci_choose_state(pdev
, state
), np
->wolenabled
);
5936 pci_disable_device(pdev
);
5937 pci_set_power_state(pdev
, pci_choose_state(pdev
, state
));
5941 static int nv_resume(struct pci_dev
*pdev
)
5943 struct net_device
*dev
= pci_get_drvdata(pdev
);
5944 struct fe_priv
*np
= netdev_priv(dev
);
5945 u8 __iomem
*base
= get_hwbase(dev
);
5948 pci_set_power_state(pdev
, PCI_D0
);
5949 pci_restore_state(pdev
);
5950 /* ack any pending wake events, disable PME */
5951 pci_enable_wake(pdev
, PCI_D0
, 0);
5953 /* restore non-pci configuration space */
5954 for (i
= 0;i
<= np
->register_size
/sizeof(u32
); i
++)
5955 writel(np
->saved_config_space
[i
], base
+i
*sizeof(u32
));
5957 netif_device_attach(dev
);
5958 if (netif_running(dev
)) {
5960 nv_set_multicast(dev
);
5965 static void nv_shutdown(struct pci_dev
*pdev
)
5967 struct net_device
*dev
= pci_get_drvdata(pdev
);
5968 struct fe_priv
*np
= netdev_priv(dev
);
5970 if (netif_running(dev
))
5973 pci_enable_wake(pdev
, PCI_D3hot
, np
->wolenabled
);
5974 pci_enable_wake(pdev
, PCI_D3cold
, np
->wolenabled
);
5975 pci_disable_device(pdev
);
5976 pci_set_power_state(pdev
, PCI_D3hot
);
5979 #define nv_suspend NULL
5980 #define nv_shutdown NULL
5981 #define nv_resume NULL
5982 #endif /* CONFIG_PM */
5984 static struct pci_device_id pci_tbl
[] = {
5985 { /* nForce Ethernet Controller */
5986 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_1
),
5987 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
,
5989 { /* nForce2 Ethernet Controller */
5990 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_2
),
5991 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
,
5993 { /* nForce3 Ethernet Controller */
5994 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_3
),
5995 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
,
5997 { /* nForce3 Ethernet Controller */
5998 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_4
),
5999 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
,
6001 { /* nForce3 Ethernet Controller */
6002 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_5
),
6003 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
,
6005 { /* nForce3 Ethernet Controller */
6006 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_6
),
6007 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
,
6009 { /* nForce3 Ethernet Controller */
6010 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_7
),
6011 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
,
6013 { /* CK804 Ethernet Controller */
6014 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_8
),
6015 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_STATISTICS_V1
|DEV_NEED_TX_LIMIT
,
6017 { /* CK804 Ethernet Controller */
6018 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_9
),
6019 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_STATISTICS_V1
|DEV_NEED_TX_LIMIT
,
6021 { /* MCP04 Ethernet Controller */
6022 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_10
),
6023 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_STATISTICS_V1
|DEV_NEED_TX_LIMIT
,
6025 { /* MCP04 Ethernet Controller */
6026 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_11
),
6027 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_STATISTICS_V1
|DEV_NEED_TX_LIMIT
,
6029 { /* MCP51 Ethernet Controller */
6030 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_12
),
6031 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_STATISTICS_V1
,
6033 { /* MCP51 Ethernet Controller */
6034 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_13
),
6035 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_STATISTICS_V1
,
6037 { /* MCP55 Ethernet Controller */
6038 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_14
),
6039 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_VLAN
|DEV_HAS_MSI
|DEV_HAS_MSI_X
|DEV_HAS_POWER_CNTRL
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_NEED_TX_LIMIT
,
6041 { /* MCP55 Ethernet Controller */
6042 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_15
),
6043 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_VLAN
|DEV_HAS_MSI
|DEV_HAS_MSI_X
|DEV_HAS_POWER_CNTRL
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_NEED_TX_LIMIT
,
6045 { /* MCP61 Ethernet Controller */
6046 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_16
),
6047 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
,
6049 { /* MCP61 Ethernet Controller */
6050 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_17
),
6051 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
,
6053 { /* MCP61 Ethernet Controller */
6054 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_18
),
6055 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
,
6057 { /* MCP61 Ethernet Controller */
6058 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_19
),
6059 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
,
6061 { /* MCP65 Ethernet Controller */
6062 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_20
),
6063 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_NEED_TX_LIMIT
|DEV_NEED_TX_LIMIT
|DEV_HAS_GEAR_MODE
,
6065 { /* MCP65 Ethernet Controller */
6066 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_21
),
6067 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_NEED_TX_LIMIT
|DEV_HAS_GEAR_MODE
,
6069 { /* MCP65 Ethernet Controller */
6070 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_22
),
6071 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_NEED_TX_LIMIT
|DEV_HAS_GEAR_MODE
,
6073 { /* MCP65 Ethernet Controller */
6074 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_23
),
6075 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_NEED_TX_LIMIT
|DEV_HAS_GEAR_MODE
,
6077 { /* MCP67 Ethernet Controller */
6078 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_24
),
6079 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_GEAR_MODE
,
6081 { /* MCP67 Ethernet Controller */
6082 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_25
),
6083 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_GEAR_MODE
,
6085 { /* MCP67 Ethernet Controller */
6086 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_26
),
6087 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_GEAR_MODE
,
6089 { /* MCP67 Ethernet Controller */
6090 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_27
),
6091 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_GEAR_MODE
,
6093 { /* MCP73 Ethernet Controller */
6094 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_28
),
6095 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_COLLISION_FIX
|DEV_HAS_GEAR_MODE
,
6097 { /* MCP73 Ethernet Controller */
6098 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_29
),
6099 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_COLLISION_FIX
|DEV_HAS_GEAR_MODE
,
6101 { /* MCP73 Ethernet Controller */
6102 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_30
),
6103 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_COLLISION_FIX
|DEV_HAS_GEAR_MODE
,
6105 { /* MCP73 Ethernet Controller */
6106 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_31
),
6107 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX_V1
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_COLLISION_FIX
|DEV_HAS_GEAR_MODE
,
6109 { /* MCP77 Ethernet Controller */
6110 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_32
),
6111 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_MSI
|DEV_HAS_POWER_CNTRL
|DEV_HAS_PAUSEFRAME_TX_V2
|DEV_HAS_STATISTICS_V3
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_COLLISION_FIX
|DEV_NEED_TX_LIMIT
|DEV_HAS_GEAR_MODE
,
6113 { /* MCP77 Ethernet Controller */
6114 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_33
),
6115 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_MSI
|DEV_HAS_POWER_CNTRL
|DEV_HAS_PAUSEFRAME_TX_V2
|DEV_HAS_STATISTICS_V3
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_COLLISION_FIX
|DEV_NEED_TX_LIMIT
|DEV_HAS_GEAR_MODE
,
6117 { /* MCP77 Ethernet Controller */
6118 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_34
),
6119 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_MSI
|DEV_HAS_POWER_CNTRL
|DEV_HAS_PAUSEFRAME_TX_V2
|DEV_HAS_STATISTICS_V3
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_COLLISION_FIX
|DEV_NEED_TX_LIMIT
|DEV_HAS_GEAR_MODE
,
6121 { /* MCP77 Ethernet Controller */
6122 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_35
),
6123 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_MSI
|DEV_HAS_POWER_CNTRL
|DEV_HAS_PAUSEFRAME_TX_V2
|DEV_HAS_STATISTICS_V3
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_COLLISION_FIX
|DEV_NEED_TX_LIMIT
|DEV_HAS_GEAR_MODE
,
6125 { /* MCP79 Ethernet Controller */
6126 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_36
),
6127 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_MSI
|DEV_HAS_POWER_CNTRL
|DEV_HAS_PAUSEFRAME_TX_V3
|DEV_HAS_STATISTICS_V3
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_COLLISION_FIX
|DEV_NEED_TX_LIMIT
|DEV_HAS_GEAR_MODE
,
6129 { /* MCP79 Ethernet Controller */
6130 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_37
),
6131 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_MSI
|DEV_HAS_POWER_CNTRL
|DEV_HAS_PAUSEFRAME_TX_V3
|DEV_HAS_STATISTICS_V3
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_COLLISION_FIX
|DEV_NEED_TX_LIMIT
|DEV_HAS_GEAR_MODE
,
6133 { /* MCP79 Ethernet Controller */
6134 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_38
),
6135 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_MSI
|DEV_HAS_POWER_CNTRL
|DEV_HAS_PAUSEFRAME_TX_V3
|DEV_HAS_STATISTICS_V3
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_COLLISION_FIX
|DEV_NEED_TX_LIMIT
|DEV_HAS_GEAR_MODE
,
6137 { /* MCP79 Ethernet Controller */
6138 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_39
),
6139 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_MSI
|DEV_HAS_POWER_CNTRL
|DEV_HAS_PAUSEFRAME_TX_V3
|DEV_HAS_STATISTICS_V3
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
|DEV_HAS_COLLISION_FIX
|DEV_NEED_TX_LIMIT
|DEV_HAS_GEAR_MODE
,
6144 static struct pci_driver driver
= {
6146 .id_table
= pci_tbl
,
6148 .remove
= __devexit_p(nv_remove
),
6149 .suspend
= nv_suspend
,
6150 .resume
= nv_resume
,
6151 .shutdown
= nv_shutdown
,
6154 static int __init
init_nic(void)
6156 return pci_register_driver(&driver
);
6159 static void __exit
exit_nic(void)
6161 pci_unregister_driver(&driver
);
6164 module_param(max_interrupt_work
, int, 0);
6165 MODULE_PARM_DESC(max_interrupt_work
, "forcedeth maximum events handled per interrupt");
6166 module_param(optimization_mode
, int, 0);
6167 MODULE_PARM_DESC(optimization_mode
, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer.");
6168 module_param(poll_interval
, int, 0);
6169 MODULE_PARM_DESC(poll_interval
, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535.");
6170 module_param(msi
, int, 0);
6171 MODULE_PARM_DESC(msi
, "MSI interrupts are enabled by setting to 1 and disabled by setting to 0.");
6172 module_param(msix
, int, 0);
6173 MODULE_PARM_DESC(msix
, "MSIX interrupts are enabled by setting to 1 and disabled by setting to 0.");
6174 module_param(dma_64bit
, int, 0);
6175 MODULE_PARM_DESC(dma_64bit
, "High DMA is enabled by setting to 1 and disabled by setting to 0.");
6176 module_param(phy_cross
, int, 0);
6177 MODULE_PARM_DESC(phy_cross
, "Phy crossover detection for Realtek 8201 phy is enabled by setting to 1 and disabled by setting to 0.");
6179 MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>");
6180 MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");
6181 MODULE_LICENSE("GPL");
6183 MODULE_DEVICE_TABLE(pci
, pci_tbl
);
6185 module_init(init_nic
);
6186 module_exit(exit_nic
);