2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2011 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
48 #include <net/checksum.h>
51 #include <asm/system.h>
53 #include <asm/byteorder.h>
54 #include <linux/uaccess.h>
57 #include <asm/idprom.h>
66 /* Functions & macros to verify TG3_FLAGS types */
68 static inline int _tg3_flag(enum TG3_FLAGS flag
, unsigned long *bits
)
70 return test_bit(flag
, bits
);
73 static inline void _tg3_flag_set(enum TG3_FLAGS flag
, unsigned long *bits
)
78 static inline void _tg3_flag_clear(enum TG3_FLAGS flag
, unsigned long *bits
)
80 clear_bit(flag
, bits
);
83 #define tg3_flag(tp, flag) \
84 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
85 #define tg3_flag_set(tp, flag) \
86 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
87 #define tg3_flag_clear(tp, flag) \
88 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define DRV_MODULE_NAME "tg3"
92 #define TG3_MIN_NUM 119
93 #define DRV_MODULE_VERSION \
94 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
95 #define DRV_MODULE_RELDATE "May 18, 2011"
97 #define TG3_DEF_MAC_MODE 0
98 #define TG3_DEF_RX_MODE 0
99 #define TG3_DEF_TX_MODE 0
100 #define TG3_DEF_MSG_ENABLE \
110 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
112 /* length of time before we decide the hardware is borked,
113 * and dev->tx_timeout() should be called to fix the problem
116 #define TG3_TX_TIMEOUT (5 * HZ)
118 /* hardware minimum and maximum for a single frame's data payload */
119 #define TG3_MIN_MTU 60
120 #define TG3_MAX_MTU(tp) \
121 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
123 /* These numbers seem to be hard coded in the NIC firmware somehow.
124 * You can't change the ring sizes, but you can change where you place
125 * them in the NIC onboard memory.
127 #define TG3_RX_STD_RING_SIZE(tp) \
128 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
129 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
130 #define TG3_DEF_RX_RING_PENDING 200
131 #define TG3_RX_JMB_RING_SIZE(tp) \
132 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
133 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
134 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
135 #define TG3_RSS_INDIR_TBL_SIZE 128
137 /* Do not place this n-ring entries value into the tp struct itself,
138 * we really want to expose these constants to GCC so that modulo et
139 * al. operations are done with shifts and masks instead of with
140 * hw multiply/modulo instructions. Another solution would be to
141 * replace things like '% foo' with '& (foo - 1)'.
144 #define TG3_TX_RING_SIZE 512
145 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
147 #define TG3_RX_STD_RING_BYTES(tp) \
148 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
149 #define TG3_RX_JMB_RING_BYTES(tp) \
150 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
151 #define TG3_RX_RCB_RING_BYTES(tp) \
152 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
153 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
155 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
157 #define TG3_DMA_BYTE_ENAB 64
159 #define TG3_RX_STD_DMA_SZ 1536
160 #define TG3_RX_JMB_DMA_SZ 9046
162 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
164 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
165 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
167 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
168 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
170 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
171 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
173 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
174 * that are at least dword aligned when used in PCIX mode. The driver
175 * works around this bug by double copying the packet. This workaround
176 * is built into the normal double copy length check for efficiency.
178 * However, the double copy is only necessary on those architectures
179 * where unaligned memory accesses are inefficient. For those architectures
180 * where unaligned memory accesses incur little penalty, we can reintegrate
181 * the 5701 in the normal rx path. Doing so saves a device structure
182 * dereference by hardcoding the double copy threshold in place.
184 #define TG3_RX_COPY_THRESHOLD 256
185 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
186 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
188 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
191 /* minimum number of free TX descriptors required to wake up TX process */
192 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
194 #define TG3_RAW_IP_ALIGN 2
196 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
198 #define FIRMWARE_TG3 "tigon/tg3.bin"
199 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
200 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
202 static char version
[] __devinitdata
=
203 DRV_MODULE_NAME
".c:v" DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")";
205 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
206 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
207 MODULE_LICENSE("GPL");
208 MODULE_VERSION(DRV_MODULE_VERSION
);
209 MODULE_FIRMWARE(FIRMWARE_TG3
);
210 MODULE_FIRMWARE(FIRMWARE_TG3TSO
);
211 MODULE_FIRMWARE(FIRMWARE_TG3TSO5
);
213 static int tg3_debug
= -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
214 module_param(tg3_debug
, int, 0);
215 MODULE_PARM_DESC(tg3_debug
, "Tigon3 bitmapped debugging message enable value");
217 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl
) = {
218 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5700
)},
219 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5701
)},
220 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702
)},
221 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703
)},
222 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704
)},
223 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702FE
)},
224 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705
)},
225 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705_2
)},
226 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705M
)},
227 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705M_2
)},
228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702X
)},
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703X
)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704S
)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702A3
)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703A3
)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5782
)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5788
)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5789
)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5901
)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5901_2
)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704S_2
)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705F
)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5721
)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5722
)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751
)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751M
)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751F
)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5752
)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5752M
)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753
)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753M
)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753F
)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5754
)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5754M
)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5755
)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5755M
)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5756
)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5786
)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787
)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787M
)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787F
)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5714
)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5714S
)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5715
)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5715S
)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5780
)},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5780S
)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5781
)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5906
)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5906M
)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5784
)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5764
)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5723
)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5761
)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5761E
)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5761S
)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5761SE
)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5785_G
)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5785_F
)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57780
)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57760
)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57790
)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57788
)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5717
)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5718
)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57781
)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57785
)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57761
)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57765
)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57791
)},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57795
)},
289 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5719
)},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5720
)},
291 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_9DXX
)},
292 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_9MXX
)},
293 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1000
)},
294 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1001
)},
295 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1003
)},
296 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC9100
)},
297 {PCI_DEVICE(PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_TIGON3
)},
298 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
302 MODULE_DEVICE_TABLE(pci
, tg3_pci_tbl
);
304 static const struct {
305 const char string
[ETH_GSTRING_LEN
];
306 } ethtool_stats_keys
[] = {
309 { "rx_ucast_packets" },
310 { "rx_mcast_packets" },
311 { "rx_bcast_packets" },
313 { "rx_align_errors" },
314 { "rx_xon_pause_rcvd" },
315 { "rx_xoff_pause_rcvd" },
316 { "rx_mac_ctrl_rcvd" },
317 { "rx_xoff_entered" },
318 { "rx_frame_too_long_errors" },
320 { "rx_undersize_packets" },
321 { "rx_in_length_errors" },
322 { "rx_out_length_errors" },
323 { "rx_64_or_less_octet_packets" },
324 { "rx_65_to_127_octet_packets" },
325 { "rx_128_to_255_octet_packets" },
326 { "rx_256_to_511_octet_packets" },
327 { "rx_512_to_1023_octet_packets" },
328 { "rx_1024_to_1522_octet_packets" },
329 { "rx_1523_to_2047_octet_packets" },
330 { "rx_2048_to_4095_octet_packets" },
331 { "rx_4096_to_8191_octet_packets" },
332 { "rx_8192_to_9022_octet_packets" },
339 { "tx_flow_control" },
341 { "tx_single_collisions" },
342 { "tx_mult_collisions" },
344 { "tx_excessive_collisions" },
345 { "tx_late_collisions" },
346 { "tx_collide_2times" },
347 { "tx_collide_3times" },
348 { "tx_collide_4times" },
349 { "tx_collide_5times" },
350 { "tx_collide_6times" },
351 { "tx_collide_7times" },
352 { "tx_collide_8times" },
353 { "tx_collide_9times" },
354 { "tx_collide_10times" },
355 { "tx_collide_11times" },
356 { "tx_collide_12times" },
357 { "tx_collide_13times" },
358 { "tx_collide_14times" },
359 { "tx_collide_15times" },
360 { "tx_ucast_packets" },
361 { "tx_mcast_packets" },
362 { "tx_bcast_packets" },
363 { "tx_carrier_sense_errors" },
367 { "dma_writeq_full" },
368 { "dma_write_prioq_full" },
372 { "rx_threshold_hit" },
374 { "dma_readq_full" },
375 { "dma_read_prioq_full" },
376 { "tx_comp_queue_full" },
378 { "ring_set_send_prod_index" },
379 { "ring_status_update" },
381 { "nic_avoided_irqs" },
382 { "nic_tx_threshold_hit" },
384 { "mbuf_lwm_thresh_hit" },
387 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
390 static const struct {
391 const char string
[ETH_GSTRING_LEN
];
392 } ethtool_test_keys
[] = {
393 { "nvram test (online) " },
394 { "link test (online) " },
395 { "register test (offline)" },
396 { "memory test (offline)" },
397 { "loopback test (offline)" },
398 { "interrupt test (offline)" },
401 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
404 static void tg3_write32(struct tg3
*tp
, u32 off
, u32 val
)
406 writel(val
, tp
->regs
+ off
);
409 static u32
tg3_read32(struct tg3
*tp
, u32 off
)
411 return readl(tp
->regs
+ off
);
414 static void tg3_ape_write32(struct tg3
*tp
, u32 off
, u32 val
)
416 writel(val
, tp
->aperegs
+ off
);
419 static u32
tg3_ape_read32(struct tg3
*tp
, u32 off
)
421 return readl(tp
->aperegs
+ off
);
424 static void tg3_write_indirect_reg32(struct tg3
*tp
, u32 off
, u32 val
)
428 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
429 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
);
430 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, val
);
431 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
434 static void tg3_write_flush_reg32(struct tg3
*tp
, u32 off
, u32 val
)
436 writel(val
, tp
->regs
+ off
);
437 readl(tp
->regs
+ off
);
440 static u32
tg3_read_indirect_reg32(struct tg3
*tp
, u32 off
)
445 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
446 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
);
447 pci_read_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, &val
);
448 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
452 static void tg3_write_indirect_mbox(struct tg3
*tp
, u32 off
, u32 val
)
456 if (off
== (MAILBOX_RCVRET_CON_IDX_0
+ TG3_64BIT_REG_LOW
)) {
457 pci_write_config_dword(tp
->pdev
, TG3PCI_RCV_RET_RING_CON_IDX
+
458 TG3_64BIT_REG_LOW
, val
);
461 if (off
== TG3_RX_STD_PROD_IDX_REG
) {
462 pci_write_config_dword(tp
->pdev
, TG3PCI_STD_RING_PROD_IDX
+
463 TG3_64BIT_REG_LOW
, val
);
467 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
468 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
+ 0x5600);
469 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, val
);
470 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
472 /* In indirect mode when disabling interrupts, we also need
473 * to clear the interrupt bit in the GRC local ctrl register.
475 if ((off
== (MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
)) &&
477 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_LOCAL_CTRL
,
478 tp
->grc_local_ctrl
|GRC_LCLCTRL_CLEARINT
);
482 static u32
tg3_read_indirect_mbox(struct tg3
*tp
, u32 off
)
487 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
488 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
+ 0x5600);
489 pci_read_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, &val
);
490 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
494 /* usec_wait specifies the wait time in usec when writing to certain registers
495 * where it is unsafe to read back the register without some delay.
496 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
497 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
499 static void _tw32_flush(struct tg3
*tp
, u32 off
, u32 val
, u32 usec_wait
)
501 if (tg3_flag(tp
, PCIX_TARGET_HWBUG
) || tg3_flag(tp
, ICH_WORKAROUND
))
502 /* Non-posted methods */
503 tp
->write32(tp
, off
, val
);
506 tg3_write32(tp
, off
, val
);
511 /* Wait again after the read for the posted method to guarantee that
512 * the wait time is met.
518 static inline void tw32_mailbox_flush(struct tg3
*tp
, u32 off
, u32 val
)
520 tp
->write32_mbox(tp
, off
, val
);
521 if (!tg3_flag(tp
, MBOX_WRITE_REORDER
) && !tg3_flag(tp
, ICH_WORKAROUND
))
522 tp
->read32_mbox(tp
, off
);
525 static void tg3_write32_tx_mbox(struct tg3
*tp
, u32 off
, u32 val
)
527 void __iomem
*mbox
= tp
->regs
+ off
;
529 if (tg3_flag(tp
, TXD_MBOX_HWBUG
))
531 if (tg3_flag(tp
, MBOX_WRITE_REORDER
))
535 static u32
tg3_read32_mbox_5906(struct tg3
*tp
, u32 off
)
537 return readl(tp
->regs
+ off
+ GRCMBOX_BASE
);
540 static void tg3_write32_mbox_5906(struct tg3
*tp
, u32 off
, u32 val
)
542 writel(val
, tp
->regs
+ off
+ GRCMBOX_BASE
);
545 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
546 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
547 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
548 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
549 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
551 #define tw32(reg, val) tp->write32(tp, reg, val)
552 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
553 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
554 #define tr32(reg) tp->read32(tp, reg)
556 static void tg3_write_mem(struct tg3
*tp
, u32 off
, u32 val
)
560 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
&&
561 (off
>= NIC_SRAM_STATS_BLK
) && (off
< NIC_SRAM_TX_BUFFER_DESC
))
564 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
565 if (tg3_flag(tp
, SRAM_USE_CONFIG
)) {
566 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, off
);
567 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
569 /* Always leave this as zero. */
570 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
572 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, off
);
573 tw32_f(TG3PCI_MEM_WIN_DATA
, val
);
575 /* Always leave this as zero. */
576 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
578 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
581 static void tg3_read_mem(struct tg3
*tp
, u32 off
, u32
*val
)
585 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
&&
586 (off
>= NIC_SRAM_STATS_BLK
) && (off
< NIC_SRAM_TX_BUFFER_DESC
)) {
591 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
592 if (tg3_flag(tp
, SRAM_USE_CONFIG
)) {
593 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, off
);
594 pci_read_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
596 /* Always leave this as zero. */
597 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
599 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, off
);
600 *val
= tr32(TG3PCI_MEM_WIN_DATA
);
602 /* Always leave this as zero. */
603 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
605 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
608 static void tg3_ape_lock_init(struct tg3
*tp
)
613 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
614 regbase
= TG3_APE_LOCK_GRANT
;
616 regbase
= TG3_APE_PER_LOCK_GRANT
;
618 /* Make sure the driver hasn't any stale locks. */
619 for (i
= 0; i
< 8; i
++) {
620 if (i
== TG3_APE_LOCK_GPIO
)
622 tg3_ape_write32(tp
, regbase
+ 4 * i
, APE_LOCK_GRANT_DRIVER
);
625 /* Clear the correct bit of the GPIO lock too. */
627 bit
= APE_LOCK_GRANT_DRIVER
;
629 bit
= 1 << tp
->pci_fn
;
631 tg3_ape_write32(tp
, regbase
+ 4 * TG3_APE_LOCK_GPIO
, bit
);
634 static int tg3_ape_lock(struct tg3
*tp
, int locknum
)
638 u32 status
, req
, gnt
, bit
;
640 if (!tg3_flag(tp
, ENABLE_APE
))
644 case TG3_APE_LOCK_GPIO
:
645 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
647 case TG3_APE_LOCK_GRC
:
648 case TG3_APE_LOCK_MEM
:
654 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
) {
655 req
= TG3_APE_LOCK_REQ
;
656 gnt
= TG3_APE_LOCK_GRANT
;
658 req
= TG3_APE_PER_LOCK_REQ
;
659 gnt
= TG3_APE_PER_LOCK_GRANT
;
664 if (locknum
!= TG3_APE_LOCK_GPIO
|| !tp
->pci_fn
)
665 bit
= APE_LOCK_REQ_DRIVER
;
667 bit
= 1 << tp
->pci_fn
;
669 tg3_ape_write32(tp
, req
+ off
, bit
);
671 /* Wait for up to 1 millisecond to acquire lock. */
672 for (i
= 0; i
< 100; i
++) {
673 status
= tg3_ape_read32(tp
, gnt
+ off
);
680 /* Revoke the lock request. */
681 tg3_ape_write32(tp
, gnt
+ off
, bit
);
688 static void tg3_ape_unlock(struct tg3
*tp
, int locknum
)
692 if (!tg3_flag(tp
, ENABLE_APE
))
696 case TG3_APE_LOCK_GPIO
:
697 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
699 case TG3_APE_LOCK_GRC
:
700 case TG3_APE_LOCK_MEM
:
706 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
707 gnt
= TG3_APE_LOCK_GRANT
;
709 gnt
= TG3_APE_PER_LOCK_GRANT
;
711 if (locknum
!= TG3_APE_LOCK_GPIO
|| !tp
->pci_fn
)
712 bit
= APE_LOCK_GRANT_DRIVER
;
714 bit
= 1 << tp
->pci_fn
;
716 tg3_ape_write32(tp
, gnt
+ 4 * locknum
, bit
);
719 static void tg3_disable_ints(struct tg3
*tp
)
723 tw32(TG3PCI_MISC_HOST_CTRL
,
724 (tp
->misc_host_ctrl
| MISC_HOST_CTRL_MASK_PCI_INT
));
725 for (i
= 0; i
< tp
->irq_max
; i
++)
726 tw32_mailbox_f(tp
->napi
[i
].int_mbox
, 0x00000001);
729 static void tg3_enable_ints(struct tg3
*tp
)
736 tw32(TG3PCI_MISC_HOST_CTRL
,
737 (tp
->misc_host_ctrl
& ~MISC_HOST_CTRL_MASK_PCI_INT
));
739 tp
->coal_now
= tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
;
740 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
741 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
743 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
744 if (tg3_flag(tp
, 1SHOT_MSI
))
745 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
747 tp
->coal_now
|= tnapi
->coal_now
;
750 /* Force an initial interrupt */
751 if (!tg3_flag(tp
, TAGGED_STATUS
) &&
752 (tp
->napi
[0].hw_status
->status
& SD_STATUS_UPDATED
))
753 tw32(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
| GRC_LCLCTRL_SETINT
);
755 tw32(HOSTCC_MODE
, tp
->coal_now
);
757 tp
->coal_now
&= ~(tp
->napi
[0].coal_now
| tp
->napi
[1].coal_now
);
760 static inline unsigned int tg3_has_work(struct tg3_napi
*tnapi
)
762 struct tg3
*tp
= tnapi
->tp
;
763 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
764 unsigned int work_exists
= 0;
766 /* check for phy events */
767 if (!(tg3_flag(tp
, USE_LINKCHG_REG
) || tg3_flag(tp
, POLL_SERDES
))) {
768 if (sblk
->status
& SD_STATUS_LINK_CHG
)
771 /* check for RX/TX work to do */
772 if (sblk
->idx
[0].tx_consumer
!= tnapi
->tx_cons
||
773 *(tnapi
->rx_rcb_prod_idx
) != tnapi
->rx_rcb_ptr
)
780 * similar to tg3_enable_ints, but it accurately determines whether there
781 * is new work pending and can return without flushing the PIO write
782 * which reenables interrupts
784 static void tg3_int_reenable(struct tg3_napi
*tnapi
)
786 struct tg3
*tp
= tnapi
->tp
;
788 tw32_mailbox(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
791 /* When doing tagged status, this work check is unnecessary.
792 * The last_tag we write above tells the chip which piece of
793 * work we've completed.
795 if (!tg3_flag(tp
, TAGGED_STATUS
) && tg3_has_work(tnapi
))
796 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
797 HOSTCC_MODE_ENABLE
| tnapi
->coal_now
);
800 static void tg3_switch_clocks(struct tg3
*tp
)
805 if (tg3_flag(tp
, CPMU_PRESENT
) || tg3_flag(tp
, 5780_CLASS
))
808 clock_ctrl
= tr32(TG3PCI_CLOCK_CTRL
);
810 orig_clock_ctrl
= clock_ctrl
;
811 clock_ctrl
&= (CLOCK_CTRL_FORCE_CLKRUN
|
812 CLOCK_CTRL_CLKRUN_OENABLE
|
814 tp
->pci_clock_ctrl
= clock_ctrl
;
816 if (tg3_flag(tp
, 5705_PLUS
)) {
817 if (orig_clock_ctrl
& CLOCK_CTRL_625_CORE
) {
818 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
819 clock_ctrl
| CLOCK_CTRL_625_CORE
, 40);
821 } else if ((orig_clock_ctrl
& CLOCK_CTRL_44MHZ_CORE
) != 0) {
822 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
824 (CLOCK_CTRL_44MHZ_CORE
| CLOCK_CTRL_ALTCLK
),
826 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
827 clock_ctrl
| (CLOCK_CTRL_ALTCLK
),
830 tw32_wait_f(TG3PCI_CLOCK_CTRL
, clock_ctrl
, 40);
833 #define PHY_BUSY_LOOPS 5000
835 static int tg3_readphy(struct tg3
*tp
, int reg
, u32
*val
)
841 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
843 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
849 frame_val
= ((tp
->phy_addr
<< MI_COM_PHY_ADDR_SHIFT
) &
850 MI_COM_PHY_ADDR_MASK
);
851 frame_val
|= ((reg
<< MI_COM_REG_ADDR_SHIFT
) &
852 MI_COM_REG_ADDR_MASK
);
853 frame_val
|= (MI_COM_CMD_READ
| MI_COM_START
);
855 tw32_f(MAC_MI_COM
, frame_val
);
857 loops
= PHY_BUSY_LOOPS
;
860 frame_val
= tr32(MAC_MI_COM
);
862 if ((frame_val
& MI_COM_BUSY
) == 0) {
864 frame_val
= tr32(MAC_MI_COM
);
872 *val
= frame_val
& MI_COM_DATA_MASK
;
876 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
877 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
884 static int tg3_writephy(struct tg3
*tp
, int reg
, u32 val
)
890 if ((tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
891 (reg
== MII_CTRL1000
|| reg
== MII_TG3_AUX_CTRL
))
894 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
896 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
900 frame_val
= ((tp
->phy_addr
<< MI_COM_PHY_ADDR_SHIFT
) &
901 MI_COM_PHY_ADDR_MASK
);
902 frame_val
|= ((reg
<< MI_COM_REG_ADDR_SHIFT
) &
903 MI_COM_REG_ADDR_MASK
);
904 frame_val
|= (val
& MI_COM_DATA_MASK
);
905 frame_val
|= (MI_COM_CMD_WRITE
| MI_COM_START
);
907 tw32_f(MAC_MI_COM
, frame_val
);
909 loops
= PHY_BUSY_LOOPS
;
912 frame_val
= tr32(MAC_MI_COM
);
913 if ((frame_val
& MI_COM_BUSY
) == 0) {
915 frame_val
= tr32(MAC_MI_COM
);
925 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
926 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
933 static int tg3_phy_cl45_write(struct tg3
*tp
, u32 devad
, u32 addr
, u32 val
)
937 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
, devad
);
941 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, addr
);
945 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
,
946 MII_TG3_MMD_CTRL_DATA_NOINC
| devad
);
950 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, val
);
956 static int tg3_phy_cl45_read(struct tg3
*tp
, u32 devad
, u32 addr
, u32
*val
)
960 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
, devad
);
964 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, addr
);
968 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
,
969 MII_TG3_MMD_CTRL_DATA_NOINC
| devad
);
973 err
= tg3_readphy(tp
, MII_TG3_MMD_ADDRESS
, val
);
979 static int tg3_phydsp_read(struct tg3
*tp
, u32 reg
, u32
*val
)
983 err
= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, reg
);
985 err
= tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, val
);
990 static int tg3_phydsp_write(struct tg3
*tp
, u32 reg
, u32 val
)
994 err
= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, reg
);
996 err
= tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, val
);
1001 static int tg3_phy_auxctl_read(struct tg3
*tp
, int reg
, u32
*val
)
1005 err
= tg3_writephy(tp
, MII_TG3_AUX_CTRL
,
1006 (reg
<< MII_TG3_AUXCTL_MISC_RDSEL_SHIFT
) |
1007 MII_TG3_AUXCTL_SHDWSEL_MISC
);
1009 err
= tg3_readphy(tp
, MII_TG3_AUX_CTRL
, val
);
1014 static int tg3_phy_auxctl_write(struct tg3
*tp
, int reg
, u32 set
)
1016 if (reg
== MII_TG3_AUXCTL_SHDWSEL_MISC
)
1017 set
|= MII_TG3_AUXCTL_MISC_WREN
;
1019 return tg3_writephy(tp
, MII_TG3_AUX_CTRL
, set
| reg
);
1022 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1023 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1024 MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1025 MII_TG3_AUXCTL_ACTL_TX_6DB)
1027 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1028 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1029 MII_TG3_AUXCTL_ACTL_TX_6DB);
1031 static int tg3_bmcr_reset(struct tg3
*tp
)
1036 /* OK, reset it, and poll the BMCR_RESET bit until it
1037 * clears or we time out.
1039 phy_control
= BMCR_RESET
;
1040 err
= tg3_writephy(tp
, MII_BMCR
, phy_control
);
1046 err
= tg3_readphy(tp
, MII_BMCR
, &phy_control
);
1050 if ((phy_control
& BMCR_RESET
) == 0) {
1062 static int tg3_mdio_read(struct mii_bus
*bp
, int mii_id
, int reg
)
1064 struct tg3
*tp
= bp
->priv
;
1067 spin_lock_bh(&tp
->lock
);
1069 if (tg3_readphy(tp
, reg
, &val
))
1072 spin_unlock_bh(&tp
->lock
);
1077 static int tg3_mdio_write(struct mii_bus
*bp
, int mii_id
, int reg
, u16 val
)
1079 struct tg3
*tp
= bp
->priv
;
1082 spin_lock_bh(&tp
->lock
);
1084 if (tg3_writephy(tp
, reg
, val
))
1087 spin_unlock_bh(&tp
->lock
);
1092 static int tg3_mdio_reset(struct mii_bus
*bp
)
1097 static void tg3_mdio_config_5785(struct tg3
*tp
)
1100 struct phy_device
*phydev
;
1102 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1103 switch (phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
) {
1104 case PHY_ID_BCM50610
:
1105 case PHY_ID_BCM50610M
:
1106 val
= MAC_PHYCFG2_50610_LED_MODES
;
1108 case PHY_ID_BCMAC131
:
1109 val
= MAC_PHYCFG2_AC131_LED_MODES
;
1111 case PHY_ID_RTL8211C
:
1112 val
= MAC_PHYCFG2_RTL8211C_LED_MODES
;
1114 case PHY_ID_RTL8201E
:
1115 val
= MAC_PHYCFG2_RTL8201E_LED_MODES
;
1121 if (phydev
->interface
!= PHY_INTERFACE_MODE_RGMII
) {
1122 tw32(MAC_PHYCFG2
, val
);
1124 val
= tr32(MAC_PHYCFG1
);
1125 val
&= ~(MAC_PHYCFG1_RGMII_INT
|
1126 MAC_PHYCFG1_RXCLK_TO_MASK
| MAC_PHYCFG1_TXCLK_TO_MASK
);
1127 val
|= MAC_PHYCFG1_RXCLK_TIMEOUT
| MAC_PHYCFG1_TXCLK_TIMEOUT
;
1128 tw32(MAC_PHYCFG1
, val
);
1133 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
))
1134 val
|= MAC_PHYCFG2_EMODE_MASK_MASK
|
1135 MAC_PHYCFG2_FMODE_MASK_MASK
|
1136 MAC_PHYCFG2_GMODE_MASK_MASK
|
1137 MAC_PHYCFG2_ACT_MASK_MASK
|
1138 MAC_PHYCFG2_QUAL_MASK_MASK
|
1139 MAC_PHYCFG2_INBAND_ENABLE
;
1141 tw32(MAC_PHYCFG2
, val
);
1143 val
= tr32(MAC_PHYCFG1
);
1144 val
&= ~(MAC_PHYCFG1_RXCLK_TO_MASK
| MAC_PHYCFG1_TXCLK_TO_MASK
|
1145 MAC_PHYCFG1_RGMII_EXT_RX_DEC
| MAC_PHYCFG1_RGMII_SND_STAT_EN
);
1146 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
)) {
1147 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1148 val
|= MAC_PHYCFG1_RGMII_EXT_RX_DEC
;
1149 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1150 val
|= MAC_PHYCFG1_RGMII_SND_STAT_EN
;
1152 val
|= MAC_PHYCFG1_RXCLK_TIMEOUT
| MAC_PHYCFG1_TXCLK_TIMEOUT
|
1153 MAC_PHYCFG1_RGMII_INT
| MAC_PHYCFG1_TXC_DRV
;
1154 tw32(MAC_PHYCFG1
, val
);
1156 val
= tr32(MAC_EXT_RGMII_MODE
);
1157 val
&= ~(MAC_RGMII_MODE_RX_INT_B
|
1158 MAC_RGMII_MODE_RX_QUALITY
|
1159 MAC_RGMII_MODE_RX_ACTIVITY
|
1160 MAC_RGMII_MODE_RX_ENG_DET
|
1161 MAC_RGMII_MODE_TX_ENABLE
|
1162 MAC_RGMII_MODE_TX_LOWPWR
|
1163 MAC_RGMII_MODE_TX_RESET
);
1164 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
)) {
1165 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1166 val
|= MAC_RGMII_MODE_RX_INT_B
|
1167 MAC_RGMII_MODE_RX_QUALITY
|
1168 MAC_RGMII_MODE_RX_ACTIVITY
|
1169 MAC_RGMII_MODE_RX_ENG_DET
;
1170 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1171 val
|= MAC_RGMII_MODE_TX_ENABLE
|
1172 MAC_RGMII_MODE_TX_LOWPWR
|
1173 MAC_RGMII_MODE_TX_RESET
;
1175 tw32(MAC_EXT_RGMII_MODE
, val
);
1178 static void tg3_mdio_start(struct tg3
*tp
)
1180 tp
->mi_mode
&= ~MAC_MI_MODE_AUTO_POLL
;
1181 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1184 if (tg3_flag(tp
, MDIOBUS_INITED
) &&
1185 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
)
1186 tg3_mdio_config_5785(tp
);
1189 static int tg3_mdio_init(struct tg3
*tp
)
1193 struct phy_device
*phydev
;
1195 if (tg3_flag(tp
, 5717_PLUS
)) {
1198 tp
->phy_addr
= tp
->pci_fn
+ 1;
1200 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5717_A0
)
1201 is_serdes
= tr32(SG_DIG_STATUS
) & SG_DIG_IS_SERDES
;
1203 is_serdes
= tr32(TG3_CPMU_PHY_STRAP
) &
1204 TG3_CPMU_PHY_STRAP_IS_SERDES
;
1208 tp
->phy_addr
= TG3_PHY_MII_ADDR
;
1212 if (!tg3_flag(tp
, USE_PHYLIB
) || tg3_flag(tp
, MDIOBUS_INITED
))
1215 tp
->mdio_bus
= mdiobus_alloc();
1216 if (tp
->mdio_bus
== NULL
)
1219 tp
->mdio_bus
->name
= "tg3 mdio bus";
1220 snprintf(tp
->mdio_bus
->id
, MII_BUS_ID_SIZE
, "%x",
1221 (tp
->pdev
->bus
->number
<< 8) | tp
->pdev
->devfn
);
1222 tp
->mdio_bus
->priv
= tp
;
1223 tp
->mdio_bus
->parent
= &tp
->pdev
->dev
;
1224 tp
->mdio_bus
->read
= &tg3_mdio_read
;
1225 tp
->mdio_bus
->write
= &tg3_mdio_write
;
1226 tp
->mdio_bus
->reset
= &tg3_mdio_reset
;
1227 tp
->mdio_bus
->phy_mask
= ~(1 << TG3_PHY_MII_ADDR
);
1228 tp
->mdio_bus
->irq
= &tp
->mdio_irq
[0];
1230 for (i
= 0; i
< PHY_MAX_ADDR
; i
++)
1231 tp
->mdio_bus
->irq
[i
] = PHY_POLL
;
1233 /* The bus registration will look for all the PHYs on the mdio bus.
1234 * Unfortunately, it does not ensure the PHY is powered up before
1235 * accessing the PHY ID registers. A chip reset is the
1236 * quickest way to bring the device back to an operational state..
1238 if (tg3_readphy(tp
, MII_BMCR
, ®
) || (reg
& BMCR_PDOWN
))
1241 i
= mdiobus_register(tp
->mdio_bus
);
1243 dev_warn(&tp
->pdev
->dev
, "mdiobus_reg failed (0x%x)\n", i
);
1244 mdiobus_free(tp
->mdio_bus
);
1248 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1250 if (!phydev
|| !phydev
->drv
) {
1251 dev_warn(&tp
->pdev
->dev
, "No PHY devices\n");
1252 mdiobus_unregister(tp
->mdio_bus
);
1253 mdiobus_free(tp
->mdio_bus
);
1257 switch (phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
) {
1258 case PHY_ID_BCM57780
:
1259 phydev
->interface
= PHY_INTERFACE_MODE_GMII
;
1260 phydev
->dev_flags
|= PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1262 case PHY_ID_BCM50610
:
1263 case PHY_ID_BCM50610M
:
1264 phydev
->dev_flags
|= PHY_BRCM_CLEAR_RGMII_MODE
|
1265 PHY_BRCM_RX_REFCLK_UNUSED
|
1266 PHY_BRCM_DIS_TXCRXC_NOENRGY
|
1267 PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1268 if (tg3_flag(tp
, RGMII_INBAND_DISABLE
))
1269 phydev
->dev_flags
|= PHY_BRCM_STD_IBND_DISABLE
;
1270 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1271 phydev
->dev_flags
|= PHY_BRCM_EXT_IBND_RX_ENABLE
;
1272 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1273 phydev
->dev_flags
|= PHY_BRCM_EXT_IBND_TX_ENABLE
;
1275 case PHY_ID_RTL8211C
:
1276 phydev
->interface
= PHY_INTERFACE_MODE_RGMII
;
1278 case PHY_ID_RTL8201E
:
1279 case PHY_ID_BCMAC131
:
1280 phydev
->interface
= PHY_INTERFACE_MODE_MII
;
1281 phydev
->dev_flags
|= PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1282 tp
->phy_flags
|= TG3_PHYFLG_IS_FET
;
1286 tg3_flag_set(tp
, MDIOBUS_INITED
);
1288 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
)
1289 tg3_mdio_config_5785(tp
);
1294 static void tg3_mdio_fini(struct tg3
*tp
)
1296 if (tg3_flag(tp
, MDIOBUS_INITED
)) {
1297 tg3_flag_clear(tp
, MDIOBUS_INITED
);
1298 mdiobus_unregister(tp
->mdio_bus
);
1299 mdiobus_free(tp
->mdio_bus
);
1303 /* tp->lock is held. */
1304 static inline void tg3_generate_fw_event(struct tg3
*tp
)
1308 val
= tr32(GRC_RX_CPU_EVENT
);
1309 val
|= GRC_RX_CPU_DRIVER_EVENT
;
1310 tw32_f(GRC_RX_CPU_EVENT
, val
);
1312 tp
->last_event_jiffies
= jiffies
;
1315 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1317 /* tp->lock is held. */
1318 static void tg3_wait_for_event_ack(struct tg3
*tp
)
1321 unsigned int delay_cnt
;
1324 /* If enough time has passed, no wait is necessary. */
1325 time_remain
= (long)(tp
->last_event_jiffies
+ 1 +
1326 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC
)) -
1328 if (time_remain
< 0)
1331 /* Check if we can shorten the wait time. */
1332 delay_cnt
= jiffies_to_usecs(time_remain
);
1333 if (delay_cnt
> TG3_FW_EVENT_TIMEOUT_USEC
)
1334 delay_cnt
= TG3_FW_EVENT_TIMEOUT_USEC
;
1335 delay_cnt
= (delay_cnt
>> 3) + 1;
1337 for (i
= 0; i
< delay_cnt
; i
++) {
1338 if (!(tr32(GRC_RX_CPU_EVENT
) & GRC_RX_CPU_DRIVER_EVENT
))
1344 /* tp->lock is held. */
1345 static void tg3_ump_link_report(struct tg3
*tp
)
1350 if (!tg3_flag(tp
, 5780_CLASS
) || !tg3_flag(tp
, ENABLE_ASF
))
1353 tg3_wait_for_event_ack(tp
);
1355 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
, FWCMD_NICDRV_LINK_UPDATE
);
1357 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_LEN_MBOX
, 14);
1360 if (!tg3_readphy(tp
, MII_BMCR
, ®
))
1362 if (!tg3_readphy(tp
, MII_BMSR
, ®
))
1363 val
|= (reg
& 0xffff);
1364 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
, val
);
1367 if (!tg3_readphy(tp
, MII_ADVERTISE
, ®
))
1369 if (!tg3_readphy(tp
, MII_LPA
, ®
))
1370 val
|= (reg
& 0xffff);
1371 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 4, val
);
1374 if (!(tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)) {
1375 if (!tg3_readphy(tp
, MII_CTRL1000
, ®
))
1377 if (!tg3_readphy(tp
, MII_STAT1000
, ®
))
1378 val
|= (reg
& 0xffff);
1380 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 8, val
);
1382 if (!tg3_readphy(tp
, MII_PHYADDR
, ®
))
1386 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 12, val
);
1388 tg3_generate_fw_event(tp
);
1391 static void tg3_link_report(struct tg3
*tp
)
1393 if (!netif_carrier_ok(tp
->dev
)) {
1394 netif_info(tp
, link
, tp
->dev
, "Link is down\n");
1395 tg3_ump_link_report(tp
);
1396 } else if (netif_msg_link(tp
)) {
1397 netdev_info(tp
->dev
, "Link is up at %d Mbps, %s duplex\n",
1398 (tp
->link_config
.active_speed
== SPEED_1000
?
1400 (tp
->link_config
.active_speed
== SPEED_100
?
1402 (tp
->link_config
.active_duplex
== DUPLEX_FULL
?
1405 netdev_info(tp
->dev
, "Flow control is %s for TX and %s for RX\n",
1406 (tp
->link_config
.active_flowctrl
& FLOW_CTRL_TX
) ?
1408 (tp
->link_config
.active_flowctrl
& FLOW_CTRL_RX
) ?
1411 if (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
)
1412 netdev_info(tp
->dev
, "EEE is %s\n",
1413 tp
->setlpicnt
? "enabled" : "disabled");
1415 tg3_ump_link_report(tp
);
1419 static u16
tg3_advert_flowctrl_1000T(u8 flow_ctrl
)
1423 if ((flow_ctrl
& FLOW_CTRL_TX
) && (flow_ctrl
& FLOW_CTRL_RX
))
1424 miireg
= ADVERTISE_PAUSE_CAP
;
1425 else if (flow_ctrl
& FLOW_CTRL_TX
)
1426 miireg
= ADVERTISE_PAUSE_ASYM
;
1427 else if (flow_ctrl
& FLOW_CTRL_RX
)
1428 miireg
= ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
;
1435 static u16
tg3_advert_flowctrl_1000X(u8 flow_ctrl
)
1439 if ((flow_ctrl
& FLOW_CTRL_TX
) && (flow_ctrl
& FLOW_CTRL_RX
))
1440 miireg
= ADVERTISE_1000XPAUSE
;
1441 else if (flow_ctrl
& FLOW_CTRL_TX
)
1442 miireg
= ADVERTISE_1000XPSE_ASYM
;
1443 else if (flow_ctrl
& FLOW_CTRL_RX
)
1444 miireg
= ADVERTISE_1000XPAUSE
| ADVERTISE_1000XPSE_ASYM
;
1451 static u8
tg3_resolve_flowctrl_1000X(u16 lcladv
, u16 rmtadv
)
1455 if (lcladv
& ADVERTISE_1000XPAUSE
) {
1456 if (lcladv
& ADVERTISE_1000XPSE_ASYM
) {
1457 if (rmtadv
& LPA_1000XPAUSE
)
1458 cap
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
1459 else if (rmtadv
& LPA_1000XPAUSE_ASYM
)
1462 if (rmtadv
& LPA_1000XPAUSE
)
1463 cap
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
1465 } else if (lcladv
& ADVERTISE_1000XPSE_ASYM
) {
1466 if ((rmtadv
& LPA_1000XPAUSE
) && (rmtadv
& LPA_1000XPAUSE_ASYM
))
1473 static void tg3_setup_flow_control(struct tg3
*tp
, u32 lcladv
, u32 rmtadv
)
1477 u32 old_rx_mode
= tp
->rx_mode
;
1478 u32 old_tx_mode
= tp
->tx_mode
;
1480 if (tg3_flag(tp
, USE_PHYLIB
))
1481 autoneg
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]->autoneg
;
1483 autoneg
= tp
->link_config
.autoneg
;
1485 if (autoneg
== AUTONEG_ENABLE
&& tg3_flag(tp
, PAUSE_AUTONEG
)) {
1486 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
1487 flowctrl
= tg3_resolve_flowctrl_1000X(lcladv
, rmtadv
);
1489 flowctrl
= mii_resolve_flowctrl_fdx(lcladv
, rmtadv
);
1491 flowctrl
= tp
->link_config
.flowctrl
;
1493 tp
->link_config
.active_flowctrl
= flowctrl
;
1495 if (flowctrl
& FLOW_CTRL_RX
)
1496 tp
->rx_mode
|= RX_MODE_FLOW_CTRL_ENABLE
;
1498 tp
->rx_mode
&= ~RX_MODE_FLOW_CTRL_ENABLE
;
1500 if (old_rx_mode
!= tp
->rx_mode
)
1501 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
1503 if (flowctrl
& FLOW_CTRL_TX
)
1504 tp
->tx_mode
|= TX_MODE_FLOW_CTRL_ENABLE
;
1506 tp
->tx_mode
&= ~TX_MODE_FLOW_CTRL_ENABLE
;
1508 if (old_tx_mode
!= tp
->tx_mode
)
1509 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
1512 static void tg3_adjust_link(struct net_device
*dev
)
1514 u8 oldflowctrl
, linkmesg
= 0;
1515 u32 mac_mode
, lcl_adv
, rmt_adv
;
1516 struct tg3
*tp
= netdev_priv(dev
);
1517 struct phy_device
*phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1519 spin_lock_bh(&tp
->lock
);
1521 mac_mode
= tp
->mac_mode
& ~(MAC_MODE_PORT_MODE_MASK
|
1522 MAC_MODE_HALF_DUPLEX
);
1524 oldflowctrl
= tp
->link_config
.active_flowctrl
;
1530 if (phydev
->speed
== SPEED_100
|| phydev
->speed
== SPEED_10
)
1531 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
1532 else if (phydev
->speed
== SPEED_1000
||
1533 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5785
)
1534 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
1536 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
1538 if (phydev
->duplex
== DUPLEX_HALF
)
1539 mac_mode
|= MAC_MODE_HALF_DUPLEX
;
1541 lcl_adv
= tg3_advert_flowctrl_1000T(
1542 tp
->link_config
.flowctrl
);
1545 rmt_adv
= LPA_PAUSE_CAP
;
1546 if (phydev
->asym_pause
)
1547 rmt_adv
|= LPA_PAUSE_ASYM
;
1550 tg3_setup_flow_control(tp
, lcl_adv
, rmt_adv
);
1552 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
1554 if (mac_mode
!= tp
->mac_mode
) {
1555 tp
->mac_mode
= mac_mode
;
1556 tw32_f(MAC_MODE
, tp
->mac_mode
);
1560 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
) {
1561 if (phydev
->speed
== SPEED_10
)
1563 MAC_MI_STAT_10MBPS_MODE
|
1564 MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
1566 tw32(MAC_MI_STAT
, MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
1569 if (phydev
->speed
== SPEED_1000
&& phydev
->duplex
== DUPLEX_HALF
)
1570 tw32(MAC_TX_LENGTHS
,
1571 ((2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
1572 (6 << TX_LENGTHS_IPG_SHIFT
) |
1573 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT
)));
1575 tw32(MAC_TX_LENGTHS
,
1576 ((2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
1577 (6 << TX_LENGTHS_IPG_SHIFT
) |
1578 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
)));
1580 if ((phydev
->link
&& tp
->link_config
.active_speed
== SPEED_INVALID
) ||
1581 (!phydev
->link
&& tp
->link_config
.active_speed
!= SPEED_INVALID
) ||
1582 phydev
->speed
!= tp
->link_config
.active_speed
||
1583 phydev
->duplex
!= tp
->link_config
.active_duplex
||
1584 oldflowctrl
!= tp
->link_config
.active_flowctrl
)
1587 tp
->link_config
.active_speed
= phydev
->speed
;
1588 tp
->link_config
.active_duplex
= phydev
->duplex
;
1590 spin_unlock_bh(&tp
->lock
);
1593 tg3_link_report(tp
);
1596 static int tg3_phy_init(struct tg3
*tp
)
1598 struct phy_device
*phydev
;
1600 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
)
1603 /* Bring the PHY back to a known state. */
1606 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1608 /* Attach the MAC to the PHY. */
1609 phydev
= phy_connect(tp
->dev
, dev_name(&phydev
->dev
), tg3_adjust_link
,
1610 phydev
->dev_flags
, phydev
->interface
);
1611 if (IS_ERR(phydev
)) {
1612 dev_err(&tp
->pdev
->dev
, "Could not attach to PHY\n");
1613 return PTR_ERR(phydev
);
1616 /* Mask with MAC supported features. */
1617 switch (phydev
->interface
) {
1618 case PHY_INTERFACE_MODE_GMII
:
1619 case PHY_INTERFACE_MODE_RGMII
:
1620 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
1621 phydev
->supported
&= (PHY_GBIT_FEATURES
|
1623 SUPPORTED_Asym_Pause
);
1627 case PHY_INTERFACE_MODE_MII
:
1628 phydev
->supported
&= (PHY_BASIC_FEATURES
|
1630 SUPPORTED_Asym_Pause
);
1633 phy_disconnect(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
1637 tp
->phy_flags
|= TG3_PHYFLG_IS_CONNECTED
;
1639 phydev
->advertising
= phydev
->supported
;
1644 static void tg3_phy_start(struct tg3
*tp
)
1646 struct phy_device
*phydev
;
1648 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
1651 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1653 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) {
1654 tp
->phy_flags
&= ~TG3_PHYFLG_IS_LOW_POWER
;
1655 phydev
->speed
= tp
->link_config
.orig_speed
;
1656 phydev
->duplex
= tp
->link_config
.orig_duplex
;
1657 phydev
->autoneg
= tp
->link_config
.orig_autoneg
;
1658 phydev
->advertising
= tp
->link_config
.orig_advertising
;
1663 phy_start_aneg(phydev
);
1666 static void tg3_phy_stop(struct tg3
*tp
)
1668 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
1671 phy_stop(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
1674 static void tg3_phy_fini(struct tg3
*tp
)
1676 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
1677 phy_disconnect(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
1678 tp
->phy_flags
&= ~TG3_PHYFLG_IS_CONNECTED
;
1682 static void tg3_phy_fet_toggle_apd(struct tg3
*tp
, bool enable
)
1686 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &phytest
)) {
1689 tg3_writephy(tp
, MII_TG3_FET_TEST
,
1690 phytest
| MII_TG3_FET_SHADOW_EN
);
1691 if (!tg3_readphy(tp
, MII_TG3_FET_SHDW_AUXSTAT2
, &phy
)) {
1693 phy
|= MII_TG3_FET_SHDW_AUXSTAT2_APD
;
1695 phy
&= ~MII_TG3_FET_SHDW_AUXSTAT2_APD
;
1696 tg3_writephy(tp
, MII_TG3_FET_SHDW_AUXSTAT2
, phy
);
1698 tg3_writephy(tp
, MII_TG3_FET_TEST
, phytest
);
1702 static void tg3_phy_toggle_apd(struct tg3
*tp
, bool enable
)
1706 if (!tg3_flag(tp
, 5705_PLUS
) ||
1707 (tg3_flag(tp
, 5717_PLUS
) &&
1708 (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)))
1711 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
1712 tg3_phy_fet_toggle_apd(tp
, enable
);
1716 reg
= MII_TG3_MISC_SHDW_WREN
|
1717 MII_TG3_MISC_SHDW_SCR5_SEL
|
1718 MII_TG3_MISC_SHDW_SCR5_LPED
|
1719 MII_TG3_MISC_SHDW_SCR5_DLPTLM
|
1720 MII_TG3_MISC_SHDW_SCR5_SDTL
|
1721 MII_TG3_MISC_SHDW_SCR5_C125OE
;
1722 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5784
|| !enable
)
1723 reg
|= MII_TG3_MISC_SHDW_SCR5_DLLAPD
;
1725 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, reg
);
1728 reg
= MII_TG3_MISC_SHDW_WREN
|
1729 MII_TG3_MISC_SHDW_APD_SEL
|
1730 MII_TG3_MISC_SHDW_APD_WKTM_84MS
;
1732 reg
|= MII_TG3_MISC_SHDW_APD_ENABLE
;
1734 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, reg
);
1737 static void tg3_phy_toggle_automdix(struct tg3
*tp
, int enable
)
1741 if (!tg3_flag(tp
, 5705_PLUS
) ||
1742 (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
1745 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
1748 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &ephy
)) {
1749 u32 reg
= MII_TG3_FET_SHDW_MISCCTRL
;
1751 tg3_writephy(tp
, MII_TG3_FET_TEST
,
1752 ephy
| MII_TG3_FET_SHADOW_EN
);
1753 if (!tg3_readphy(tp
, reg
, &phy
)) {
1755 phy
|= MII_TG3_FET_SHDW_MISCCTRL_MDIX
;
1757 phy
&= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX
;
1758 tg3_writephy(tp
, reg
, phy
);
1760 tg3_writephy(tp
, MII_TG3_FET_TEST
, ephy
);
1765 ret
= tg3_phy_auxctl_read(tp
,
1766 MII_TG3_AUXCTL_SHDWSEL_MISC
, &phy
);
1769 phy
|= MII_TG3_AUXCTL_MISC_FORCE_AMDIX
;
1771 phy
&= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX
;
1772 tg3_phy_auxctl_write(tp
,
1773 MII_TG3_AUXCTL_SHDWSEL_MISC
, phy
);
1778 static void tg3_phy_set_wirespeed(struct tg3
*tp
)
1783 if (tp
->phy_flags
& TG3_PHYFLG_NO_ETH_WIRE_SPEED
)
1786 ret
= tg3_phy_auxctl_read(tp
, MII_TG3_AUXCTL_SHDWSEL_MISC
, &val
);
1788 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_MISC
,
1789 val
| MII_TG3_AUXCTL_MISC_WIRESPD_EN
);
1792 static void tg3_phy_apply_otp(struct tg3
*tp
)
1801 if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp
))
1804 phy
= ((otp
& TG3_OTP_AGCTGT_MASK
) >> TG3_OTP_AGCTGT_SHIFT
);
1805 phy
|= MII_TG3_DSP_TAP1_AGCTGT_DFLT
;
1806 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP1
, phy
);
1808 phy
= ((otp
& TG3_OTP_HPFFLTR_MASK
) >> TG3_OTP_HPFFLTR_SHIFT
) |
1809 ((otp
& TG3_OTP_HPFOVER_MASK
) >> TG3_OTP_HPFOVER_SHIFT
);
1810 tg3_phydsp_write(tp
, MII_TG3_DSP_AADJ1CH0
, phy
);
1812 phy
= ((otp
& TG3_OTP_LPFDIS_MASK
) >> TG3_OTP_LPFDIS_SHIFT
);
1813 phy
|= MII_TG3_DSP_AADJ1CH3_ADCCKADJ
;
1814 tg3_phydsp_write(tp
, MII_TG3_DSP_AADJ1CH3
, phy
);
1816 phy
= ((otp
& TG3_OTP_VDAC_MASK
) >> TG3_OTP_VDAC_SHIFT
);
1817 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP75
, phy
);
1819 phy
= ((otp
& TG3_OTP_10BTAMP_MASK
) >> TG3_OTP_10BTAMP_SHIFT
);
1820 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP96
, phy
);
1822 phy
= ((otp
& TG3_OTP_ROFF_MASK
) >> TG3_OTP_ROFF_SHIFT
) |
1823 ((otp
& TG3_OTP_RCOFF_MASK
) >> TG3_OTP_RCOFF_SHIFT
);
1824 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP97
, phy
);
1826 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp
);
1829 static void tg3_phy_eee_adjust(struct tg3
*tp
, u32 current_link_up
)
1833 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
))
1838 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
&&
1839 current_link_up
== 1 &&
1840 tp
->link_config
.active_duplex
== DUPLEX_FULL
&&
1841 (tp
->link_config
.active_speed
== SPEED_100
||
1842 tp
->link_config
.active_speed
== SPEED_1000
)) {
1845 if (tp
->link_config
.active_speed
== SPEED_1000
)
1846 eeectl
= TG3_CPMU_EEE_CTRL_EXIT_16_5_US
;
1848 eeectl
= TG3_CPMU_EEE_CTRL_EXIT_36_US
;
1850 tw32(TG3_CPMU_EEE_CTRL
, eeectl
);
1852 tg3_phy_cl45_read(tp
, MDIO_MMD_AN
,
1853 TG3_CL45_D7_EEERES_STAT
, &val
);
1855 if (val
== TG3_CL45_D7_EEERES_STAT_LP_1000T
||
1856 val
== TG3_CL45_D7_EEERES_STAT_LP_100TX
)
1860 if (!tp
->setlpicnt
) {
1861 if (current_link_up
== 1 &&
1862 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp
)) {
1863 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, 0x0000);
1864 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp
);
1867 val
= tr32(TG3_CPMU_EEE_MODE
);
1868 tw32(TG3_CPMU_EEE_MODE
, val
& ~TG3_CPMU_EEEMD_LPI_ENABLE
);
1872 static void tg3_phy_eee_enable(struct tg3
*tp
)
1876 if (tp
->link_config
.active_speed
== SPEED_1000
&&
1877 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
1878 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
1879 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
) &&
1880 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp
)) {
1881 val
= MII_TG3_DSP_TAP26_ALNOKO
|
1882 MII_TG3_DSP_TAP26_RMRXSTO
;
1883 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, val
);
1884 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp
);
1887 val
= tr32(TG3_CPMU_EEE_MODE
);
1888 tw32(TG3_CPMU_EEE_MODE
, val
| TG3_CPMU_EEEMD_LPI_ENABLE
);
1891 static int tg3_wait_macro_done(struct tg3
*tp
)
1898 if (!tg3_readphy(tp
, MII_TG3_DSP_CONTROL
, &tmp32
)) {
1899 if ((tmp32
& 0x1000) == 0)
1909 static int tg3_phy_write_and_check_testpat(struct tg3
*tp
, int *resetp
)
1911 static const u32 test_pat
[4][6] = {
1912 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1913 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1914 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1915 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1919 for (chan
= 0; chan
< 4; chan
++) {
1922 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
1923 (chan
* 0x2000) | 0x0200);
1924 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0002);
1926 for (i
= 0; i
< 6; i
++)
1927 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
,
1930 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0202);
1931 if (tg3_wait_macro_done(tp
)) {
1936 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
1937 (chan
* 0x2000) | 0x0200);
1938 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0082);
1939 if (tg3_wait_macro_done(tp
)) {
1944 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0802);
1945 if (tg3_wait_macro_done(tp
)) {
1950 for (i
= 0; i
< 6; i
+= 2) {
1953 if (tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &low
) ||
1954 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &high
) ||
1955 tg3_wait_macro_done(tp
)) {
1961 if (low
!= test_pat
[chan
][i
] ||
1962 high
!= test_pat
[chan
][i
+1]) {
1963 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000b);
1964 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x4001);
1965 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x4005);
1975 static int tg3_phy_reset_chanpat(struct tg3
*tp
)
1979 for (chan
= 0; chan
< 4; chan
++) {
1982 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
1983 (chan
* 0x2000) | 0x0200);
1984 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0002);
1985 for (i
= 0; i
< 6; i
++)
1986 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x000);
1987 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0202);
1988 if (tg3_wait_macro_done(tp
))
1995 static int tg3_phy_reset_5703_4_5(struct tg3
*tp
)
1997 u32 reg32
, phy9_orig
;
1998 int retries
, do_phy_reset
, err
;
2004 err
= tg3_bmcr_reset(tp
);
2010 /* Disable transmitter and interrupt. */
2011 if (tg3_readphy(tp
, MII_TG3_EXT_CTRL
, ®32
))
2015 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, reg32
);
2017 /* Set full-duplex, 1000 mbps. */
2018 tg3_writephy(tp
, MII_BMCR
,
2019 BMCR_FULLDPLX
| BMCR_SPEED1000
);
2021 /* Set to master mode. */
2022 if (tg3_readphy(tp
, MII_CTRL1000
, &phy9_orig
))
2025 tg3_writephy(tp
, MII_CTRL1000
,
2026 CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
);
2028 err
= TG3_PHY_AUXCTL_SMDSP_ENABLE(tp
);
2032 /* Block the PHY control access. */
2033 tg3_phydsp_write(tp
, 0x8005, 0x0800);
2035 err
= tg3_phy_write_and_check_testpat(tp
, &do_phy_reset
);
2038 } while (--retries
);
2040 err
= tg3_phy_reset_chanpat(tp
);
2044 tg3_phydsp_write(tp
, 0x8005, 0x0000);
2046 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x8200);
2047 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0000);
2049 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp
);
2051 tg3_writephy(tp
, MII_CTRL1000
, phy9_orig
);
2053 if (!tg3_readphy(tp
, MII_TG3_EXT_CTRL
, ®32
)) {
2055 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, reg32
);
2062 /* This will reset the tigon3 PHY if there is no valid
2063 * link unless the FORCE argument is non-zero.
2065 static int tg3_phy_reset(struct tg3
*tp
)
2070 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
2071 val
= tr32(GRC_MISC_CFG
);
2072 tw32_f(GRC_MISC_CFG
, val
& ~GRC_MISC_CFG_EPHY_IDDQ
);
2075 err
= tg3_readphy(tp
, MII_BMSR
, &val
);
2076 err
|= tg3_readphy(tp
, MII_BMSR
, &val
);
2080 if (netif_running(tp
->dev
) && netif_carrier_ok(tp
->dev
)) {
2081 netif_carrier_off(tp
->dev
);
2082 tg3_link_report(tp
);
2085 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
2086 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
2087 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
2088 err
= tg3_phy_reset_5703_4_5(tp
);
2095 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
&&
2096 GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5784_AX
) {
2097 cpmuctrl
= tr32(TG3_CPMU_CTRL
);
2098 if (cpmuctrl
& CPMU_CTRL_GPHY_10MB_RXONLY
)
2100 cpmuctrl
& ~CPMU_CTRL_GPHY_10MB_RXONLY
);
2103 err
= tg3_bmcr_reset(tp
);
2107 if (cpmuctrl
& CPMU_CTRL_GPHY_10MB_RXONLY
) {
2108 val
= MII_TG3_DSP_EXP8_AEDW
| MII_TG3_DSP_EXP8_REJ2MHz
;
2109 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP8
, val
);
2111 tw32(TG3_CPMU_CTRL
, cpmuctrl
);
2114 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5784_AX
||
2115 GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5761_AX
) {
2116 val
= tr32(TG3_CPMU_LSPD_1000MB_CLK
);
2117 if ((val
& CPMU_LSPD_1000MB_MACCLK_MASK
) ==
2118 CPMU_LSPD_1000MB_MACCLK_12_5
) {
2119 val
&= ~CPMU_LSPD_1000MB_MACCLK_MASK
;
2121 tw32_f(TG3_CPMU_LSPD_1000MB_CLK
, val
);
2125 if (tg3_flag(tp
, 5717_PLUS
) &&
2126 (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
))
2129 tg3_phy_apply_otp(tp
);
2131 if (tp
->phy_flags
& TG3_PHYFLG_ENABLE_APD
)
2132 tg3_phy_toggle_apd(tp
, true);
2134 tg3_phy_toggle_apd(tp
, false);
2137 if ((tp
->phy_flags
& TG3_PHYFLG_ADC_BUG
) &&
2138 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp
)) {
2139 tg3_phydsp_write(tp
, 0x201f, 0x2aaa);
2140 tg3_phydsp_write(tp
, 0x000a, 0x0323);
2141 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp
);
2144 if (tp
->phy_flags
& TG3_PHYFLG_5704_A0_BUG
) {
2145 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
2146 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
2149 if (tp
->phy_flags
& TG3_PHYFLG_BER_BUG
) {
2150 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp
)) {
2151 tg3_phydsp_write(tp
, 0x000a, 0x310b);
2152 tg3_phydsp_write(tp
, 0x201f, 0x9506);
2153 tg3_phydsp_write(tp
, 0x401f, 0x14e2);
2154 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp
);
2156 } else if (tp
->phy_flags
& TG3_PHYFLG_JITTER_BUG
) {
2157 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp
)) {
2158 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000a);
2159 if (tp
->phy_flags
& TG3_PHYFLG_ADJUST_TRIM
) {
2160 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x110b);
2161 tg3_writephy(tp
, MII_TG3_TEST1
,
2162 MII_TG3_TEST1_TRIM_EN
| 0x4);
2164 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x010b);
2166 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp
);
2170 /* Set Extended packet length bit (bit 14) on all chips that */
2171 /* support jumbo frames */
2172 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
2173 /* Cannot do read-modify-write on 5401 */
2174 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, 0x4c20);
2175 } else if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
2176 /* Set bit 14 with read-modify-write to preserve other bits */
2177 err
= tg3_phy_auxctl_read(tp
,
2178 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, &val
);
2180 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
,
2181 val
| MII_TG3_AUXCTL_ACTL_EXTPKTLEN
);
2184 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2185 * jumbo frames transmission.
2187 if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
2188 if (!tg3_readphy(tp
, MII_TG3_EXT_CTRL
, &val
))
2189 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
2190 val
| MII_TG3_EXT_CTRL_FIFO_ELASTIC
);
2193 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
2194 /* adjust output voltage */
2195 tg3_writephy(tp
, MII_TG3_FET_PTEST
, 0x12);
2198 tg3_phy_toggle_automdix(tp
, 1);
2199 tg3_phy_set_wirespeed(tp
);
2203 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2204 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2205 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2206 TG3_GPIO_MSG_NEED_VAUX)
2207 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2208 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2209 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2210 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2211 (TG3_GPIO_MSG_DRVR_PRES << 12))
2213 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2214 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2215 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2216 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2217 (TG3_GPIO_MSG_NEED_VAUX << 12))
2219 static inline u32
tg3_set_function_status(struct tg3
*tp
, u32 newstat
)
2223 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
2224 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
)
2225 status
= tg3_ape_read32(tp
, TG3_APE_GPIO_MSG
);
2227 status
= tr32(TG3_CPMU_DRV_STATUS
);
2229 shift
= TG3_APE_GPIO_MSG_SHIFT
+ 4 * tp
->pci_fn
;
2230 status
&= ~(TG3_GPIO_MSG_MASK
<< shift
);
2231 status
|= (newstat
<< shift
);
2233 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
2234 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
)
2235 tg3_ape_write32(tp
, TG3_APE_GPIO_MSG
, status
);
2237 tw32(TG3_CPMU_DRV_STATUS
, status
);
2239 return status
>> TG3_APE_GPIO_MSG_SHIFT
;
2242 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3
*tp
)
2244 if (!tg3_flag(tp
, IS_NIC
))
2247 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
2248 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
2249 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
2250 if (tg3_ape_lock(tp
, TG3_APE_LOCK_GPIO
))
2253 tg3_set_function_status(tp
, TG3_GPIO_MSG_DRVR_PRES
);
2255 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
,
2256 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2258 tg3_ape_unlock(tp
, TG3_APE_LOCK_GPIO
);
2260 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
,
2261 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2267 static void tg3_pwrsrc_die_with_vmain(struct tg3
*tp
)
2271 if (!tg3_flag(tp
, IS_NIC
) ||
2272 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
2273 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
)
2276 grc_local_ctrl
= tp
->grc_local_ctrl
| GRC_LCLCTRL_GPIO_OE1
;
2278 tw32_wait_f(GRC_LOCAL_CTRL
,
2279 grc_local_ctrl
| GRC_LCLCTRL_GPIO_OUTPUT1
,
2280 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2282 tw32_wait_f(GRC_LOCAL_CTRL
,
2284 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2286 tw32_wait_f(GRC_LOCAL_CTRL
,
2287 grc_local_ctrl
| GRC_LCLCTRL_GPIO_OUTPUT1
,
2288 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2291 static void tg3_pwrsrc_switch_to_vaux(struct tg3
*tp
)
2293 if (!tg3_flag(tp
, IS_NIC
))
2296 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
2297 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
2298 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2299 (GRC_LCLCTRL_GPIO_OE0
|
2300 GRC_LCLCTRL_GPIO_OE1
|
2301 GRC_LCLCTRL_GPIO_OE2
|
2302 GRC_LCLCTRL_GPIO_OUTPUT0
|
2303 GRC_LCLCTRL_GPIO_OUTPUT1
),
2304 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2305 } else if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
2306 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
) {
2307 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2308 u32 grc_local_ctrl
= GRC_LCLCTRL_GPIO_OE0
|
2309 GRC_LCLCTRL_GPIO_OE1
|
2310 GRC_LCLCTRL_GPIO_OE2
|
2311 GRC_LCLCTRL_GPIO_OUTPUT0
|
2312 GRC_LCLCTRL_GPIO_OUTPUT1
|
2314 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
,
2315 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2317 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OUTPUT2
;
2318 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
,
2319 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2321 grc_local_ctrl
&= ~GRC_LCLCTRL_GPIO_OUTPUT0
;
2322 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
,
2323 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2326 u32 grc_local_ctrl
= 0;
2328 /* Workaround to prevent overdrawing Amps. */
2329 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
) {
2330 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE3
;
2331 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2333 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2336 /* On 5753 and variants, GPIO2 cannot be used. */
2337 no_gpio2
= tp
->nic_sram_data_cfg
&
2338 NIC_SRAM_DATA_CFG_NO_GPIO2
;
2340 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE0
|
2341 GRC_LCLCTRL_GPIO_OE1
|
2342 GRC_LCLCTRL_GPIO_OE2
|
2343 GRC_LCLCTRL_GPIO_OUTPUT1
|
2344 GRC_LCLCTRL_GPIO_OUTPUT2
;
2346 grc_local_ctrl
&= ~(GRC_LCLCTRL_GPIO_OE2
|
2347 GRC_LCLCTRL_GPIO_OUTPUT2
);
2349 tw32_wait_f(GRC_LOCAL_CTRL
,
2350 tp
->grc_local_ctrl
| grc_local_ctrl
,
2351 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2353 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OUTPUT0
;
2355 tw32_wait_f(GRC_LOCAL_CTRL
,
2356 tp
->grc_local_ctrl
| grc_local_ctrl
,
2357 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2360 grc_local_ctrl
&= ~GRC_LCLCTRL_GPIO_OUTPUT2
;
2361 tw32_wait_f(GRC_LOCAL_CTRL
,
2362 tp
->grc_local_ctrl
| grc_local_ctrl
,
2363 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2368 static void tg3_frob_aux_power_5717(struct tg3
*tp
, bool wol_enable
)
2372 /* Serialize power state transitions */
2373 if (tg3_ape_lock(tp
, TG3_APE_LOCK_GPIO
))
2376 if (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
) || wol_enable
)
2377 msg
= TG3_GPIO_MSG_NEED_VAUX
;
2379 msg
= tg3_set_function_status(tp
, msg
);
2381 if (msg
& TG3_GPIO_MSG_ALL_DRVR_PRES_MASK
)
2384 if (msg
& TG3_GPIO_MSG_ALL_NEED_VAUX_MASK
)
2385 tg3_pwrsrc_switch_to_vaux(tp
);
2387 tg3_pwrsrc_die_with_vmain(tp
);
2390 tg3_ape_unlock(tp
, TG3_APE_LOCK_GPIO
);
2393 static void tg3_frob_aux_power(struct tg3
*tp
, bool include_wol
)
2395 bool need_vaux
= false;
2397 /* The GPIOs do something completely different on 57765. */
2398 if (!tg3_flag(tp
, IS_NIC
) ||
2399 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
2402 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
2403 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
2404 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
2405 tg3_frob_aux_power_5717(tp
, include_wol
?
2406 tg3_flag(tp
, WOL_ENABLE
) != 0 : 0);
2410 if (tp
->pdev_peer
&& tp
->pdev_peer
!= tp
->pdev
) {
2411 struct net_device
*dev_peer
;
2413 dev_peer
= pci_get_drvdata(tp
->pdev_peer
);
2415 /* remove_one() may have been run on the peer. */
2417 struct tg3
*tp_peer
= netdev_priv(dev_peer
);
2419 if (tg3_flag(tp_peer
, INIT_COMPLETE
))
2422 if ((include_wol
&& tg3_flag(tp_peer
, WOL_ENABLE
)) ||
2423 tg3_flag(tp_peer
, ENABLE_ASF
))
2428 if ((include_wol
&& tg3_flag(tp
, WOL_ENABLE
)) ||
2429 tg3_flag(tp
, ENABLE_ASF
))
2433 tg3_pwrsrc_switch_to_vaux(tp
);
2435 tg3_pwrsrc_die_with_vmain(tp
);
2438 static int tg3_5700_link_polarity(struct tg3
*tp
, u32 speed
)
2440 if (tp
->led_ctrl
== LED_CTRL_MODE_PHY_2
)
2442 else if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5411
) {
2443 if (speed
!= SPEED_10
)
2445 } else if (speed
== SPEED_10
)
2451 static int tg3_setup_phy(struct tg3
*, int);
2453 #define RESET_KIND_SHUTDOWN 0
2454 #define RESET_KIND_INIT 1
2455 #define RESET_KIND_SUSPEND 2
2457 static void tg3_write_sig_post_reset(struct tg3
*, int);
2458 static int tg3_halt_cpu(struct tg3
*, u32
);
2460 static void tg3_power_down_phy(struct tg3
*tp
, bool do_low_power
)
2464 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
2465 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
2466 u32 sg_dig_ctrl
= tr32(SG_DIG_CTRL
);
2467 u32 serdes_cfg
= tr32(MAC_SERDES_CFG
);
2470 SG_DIG_USING_HW_AUTONEG
| SG_DIG_SOFT_RESET
;
2471 tw32(SG_DIG_CTRL
, sg_dig_ctrl
);
2472 tw32(MAC_SERDES_CFG
, serdes_cfg
| (1 << 15));
2477 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
2479 val
= tr32(GRC_MISC_CFG
);
2480 tw32_f(GRC_MISC_CFG
, val
| GRC_MISC_CFG_EPHY_IDDQ
);
2483 } else if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
2485 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &phytest
)) {
2488 tg3_writephy(tp
, MII_ADVERTISE
, 0);
2489 tg3_writephy(tp
, MII_BMCR
,
2490 BMCR_ANENABLE
| BMCR_ANRESTART
);
2492 tg3_writephy(tp
, MII_TG3_FET_TEST
,
2493 phytest
| MII_TG3_FET_SHADOW_EN
);
2494 if (!tg3_readphy(tp
, MII_TG3_FET_SHDW_AUXMODE4
, &phy
)) {
2495 phy
|= MII_TG3_FET_SHDW_AUXMODE4_SBPD
;
2497 MII_TG3_FET_SHDW_AUXMODE4
,
2500 tg3_writephy(tp
, MII_TG3_FET_TEST
, phytest
);
2503 } else if (do_low_power
) {
2504 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
2505 MII_TG3_EXT_CTRL_FORCE_LED_OFF
);
2507 val
= MII_TG3_AUXCTL_PCTL_100TX_LPWR
|
2508 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE
|
2509 MII_TG3_AUXCTL_PCTL_VREG_11V
;
2510 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_PWRCTL
, val
);
2513 /* The PHY should not be powered down on some chips because
2516 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
2517 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
2518 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5780
&&
2519 (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)))
2522 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5784_AX
||
2523 GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5761_AX
) {
2524 val
= tr32(TG3_CPMU_LSPD_1000MB_CLK
);
2525 val
&= ~CPMU_LSPD_1000MB_MACCLK_MASK
;
2526 val
|= CPMU_LSPD_1000MB_MACCLK_12_5
;
2527 tw32_f(TG3_CPMU_LSPD_1000MB_CLK
, val
);
2530 tg3_writephy(tp
, MII_BMCR
, BMCR_PDOWN
);
2533 /* tp->lock is held. */
2534 static int tg3_nvram_lock(struct tg3
*tp
)
2536 if (tg3_flag(tp
, NVRAM
)) {
2539 if (tp
->nvram_lock_cnt
== 0) {
2540 tw32(NVRAM_SWARB
, SWARB_REQ_SET1
);
2541 for (i
= 0; i
< 8000; i
++) {
2542 if (tr32(NVRAM_SWARB
) & SWARB_GNT1
)
2547 tw32(NVRAM_SWARB
, SWARB_REQ_CLR1
);
2551 tp
->nvram_lock_cnt
++;
2556 /* tp->lock is held. */
2557 static void tg3_nvram_unlock(struct tg3
*tp
)
2559 if (tg3_flag(tp
, NVRAM
)) {
2560 if (tp
->nvram_lock_cnt
> 0)
2561 tp
->nvram_lock_cnt
--;
2562 if (tp
->nvram_lock_cnt
== 0)
2563 tw32_f(NVRAM_SWARB
, SWARB_REQ_CLR1
);
2567 /* tp->lock is held. */
2568 static void tg3_enable_nvram_access(struct tg3
*tp
)
2570 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
)) {
2571 u32 nvaccess
= tr32(NVRAM_ACCESS
);
2573 tw32(NVRAM_ACCESS
, nvaccess
| ACCESS_ENABLE
);
2577 /* tp->lock is held. */
2578 static void tg3_disable_nvram_access(struct tg3
*tp
)
2580 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
)) {
2581 u32 nvaccess
= tr32(NVRAM_ACCESS
);
2583 tw32(NVRAM_ACCESS
, nvaccess
& ~ACCESS_ENABLE
);
2587 static int tg3_nvram_read_using_eeprom(struct tg3
*tp
,
2588 u32 offset
, u32
*val
)
2593 if (offset
> EEPROM_ADDR_ADDR_MASK
|| (offset
% 4) != 0)
2596 tmp
= tr32(GRC_EEPROM_ADDR
) & ~(EEPROM_ADDR_ADDR_MASK
|
2597 EEPROM_ADDR_DEVID_MASK
|
2599 tw32(GRC_EEPROM_ADDR
,
2601 (0 << EEPROM_ADDR_DEVID_SHIFT
) |
2602 ((offset
<< EEPROM_ADDR_ADDR_SHIFT
) &
2603 EEPROM_ADDR_ADDR_MASK
) |
2604 EEPROM_ADDR_READ
| EEPROM_ADDR_START
);
2606 for (i
= 0; i
< 1000; i
++) {
2607 tmp
= tr32(GRC_EEPROM_ADDR
);
2609 if (tmp
& EEPROM_ADDR_COMPLETE
)
2613 if (!(tmp
& EEPROM_ADDR_COMPLETE
))
2616 tmp
= tr32(GRC_EEPROM_DATA
);
2619 * The data will always be opposite the native endian
2620 * format. Perform a blind byteswap to compensate.
2627 #define NVRAM_CMD_TIMEOUT 10000
2629 static int tg3_nvram_exec_cmd(struct tg3
*tp
, u32 nvram_cmd
)
2633 tw32(NVRAM_CMD
, nvram_cmd
);
2634 for (i
= 0; i
< NVRAM_CMD_TIMEOUT
; i
++) {
2636 if (tr32(NVRAM_CMD
) & NVRAM_CMD_DONE
) {
2642 if (i
== NVRAM_CMD_TIMEOUT
)
2648 static u32
tg3_nvram_phys_addr(struct tg3
*tp
, u32 addr
)
2650 if (tg3_flag(tp
, NVRAM
) &&
2651 tg3_flag(tp
, NVRAM_BUFFERED
) &&
2652 tg3_flag(tp
, FLASH
) &&
2653 !tg3_flag(tp
, NO_NVRAM_ADDR_TRANS
) &&
2654 (tp
->nvram_jedecnum
== JEDEC_ATMEL
))
2656 addr
= ((addr
/ tp
->nvram_pagesize
) <<
2657 ATMEL_AT45DB0X1B_PAGE_POS
) +
2658 (addr
% tp
->nvram_pagesize
);
2663 static u32
tg3_nvram_logical_addr(struct tg3
*tp
, u32 addr
)
2665 if (tg3_flag(tp
, NVRAM
) &&
2666 tg3_flag(tp
, NVRAM_BUFFERED
) &&
2667 tg3_flag(tp
, FLASH
) &&
2668 !tg3_flag(tp
, NO_NVRAM_ADDR_TRANS
) &&
2669 (tp
->nvram_jedecnum
== JEDEC_ATMEL
))
2671 addr
= ((addr
>> ATMEL_AT45DB0X1B_PAGE_POS
) *
2672 tp
->nvram_pagesize
) +
2673 (addr
& ((1 << ATMEL_AT45DB0X1B_PAGE_POS
) - 1));
2678 /* NOTE: Data read in from NVRAM is byteswapped according to
2679 * the byteswapping settings for all other register accesses.
2680 * tg3 devices are BE devices, so on a BE machine, the data
2681 * returned will be exactly as it is seen in NVRAM. On a LE
2682 * machine, the 32-bit value will be byteswapped.
2684 static int tg3_nvram_read(struct tg3
*tp
, u32 offset
, u32
*val
)
2688 if (!tg3_flag(tp
, NVRAM
))
2689 return tg3_nvram_read_using_eeprom(tp
, offset
, val
);
2691 offset
= tg3_nvram_phys_addr(tp
, offset
);
2693 if (offset
> NVRAM_ADDR_MSK
)
2696 ret
= tg3_nvram_lock(tp
);
2700 tg3_enable_nvram_access(tp
);
2702 tw32(NVRAM_ADDR
, offset
);
2703 ret
= tg3_nvram_exec_cmd(tp
, NVRAM_CMD_RD
| NVRAM_CMD_GO
|
2704 NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
| NVRAM_CMD_DONE
);
2707 *val
= tr32(NVRAM_RDDATA
);
2709 tg3_disable_nvram_access(tp
);
2711 tg3_nvram_unlock(tp
);
2716 /* Ensures NVRAM data is in bytestream format. */
2717 static int tg3_nvram_read_be32(struct tg3
*tp
, u32 offset
, __be32
*val
)
2720 int res
= tg3_nvram_read(tp
, offset
, &v
);
2722 *val
= cpu_to_be32(v
);
2726 /* tp->lock is held. */
2727 static void __tg3_set_mac_addr(struct tg3
*tp
, int skip_mac_1
)
2729 u32 addr_high
, addr_low
;
2732 addr_high
= ((tp
->dev
->dev_addr
[0] << 8) |
2733 tp
->dev
->dev_addr
[1]);
2734 addr_low
= ((tp
->dev
->dev_addr
[2] << 24) |
2735 (tp
->dev
->dev_addr
[3] << 16) |
2736 (tp
->dev
->dev_addr
[4] << 8) |
2737 (tp
->dev
->dev_addr
[5] << 0));
2738 for (i
= 0; i
< 4; i
++) {
2739 if (i
== 1 && skip_mac_1
)
2741 tw32(MAC_ADDR_0_HIGH
+ (i
* 8), addr_high
);
2742 tw32(MAC_ADDR_0_LOW
+ (i
* 8), addr_low
);
2745 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
2746 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
2747 for (i
= 0; i
< 12; i
++) {
2748 tw32(MAC_EXTADDR_0_HIGH
+ (i
* 8), addr_high
);
2749 tw32(MAC_EXTADDR_0_LOW
+ (i
* 8), addr_low
);
2753 addr_high
= (tp
->dev
->dev_addr
[0] +
2754 tp
->dev
->dev_addr
[1] +
2755 tp
->dev
->dev_addr
[2] +
2756 tp
->dev
->dev_addr
[3] +
2757 tp
->dev
->dev_addr
[4] +
2758 tp
->dev
->dev_addr
[5]) &
2759 TX_BACKOFF_SEED_MASK
;
2760 tw32(MAC_TX_BACKOFF_SEED
, addr_high
);
2763 static void tg3_enable_register_access(struct tg3
*tp
)
2766 * Make sure register accesses (indirect or otherwise) will function
2769 pci_write_config_dword(tp
->pdev
,
2770 TG3PCI_MISC_HOST_CTRL
, tp
->misc_host_ctrl
);
2773 static int tg3_power_up(struct tg3
*tp
)
2777 tg3_enable_register_access(tp
);
2779 err
= pci_set_power_state(tp
->pdev
, PCI_D0
);
2781 /* Switch out of Vaux if it is a NIC */
2782 tg3_pwrsrc_switch_to_vmain(tp
);
2784 netdev_err(tp
->dev
, "Transition to D0 failed\n");
2790 static int tg3_power_down_prepare(struct tg3
*tp
)
2793 bool device_should_wake
, do_low_power
;
2795 tg3_enable_register_access(tp
);
2797 /* Restore the CLKREQ setting. */
2798 if (tg3_flag(tp
, CLKREQ_BUG
)) {
2801 pci_read_config_word(tp
->pdev
,
2802 pci_pcie_cap(tp
->pdev
) + PCI_EXP_LNKCTL
,
2804 lnkctl
|= PCI_EXP_LNKCTL_CLKREQ_EN
;
2805 pci_write_config_word(tp
->pdev
,
2806 pci_pcie_cap(tp
->pdev
) + PCI_EXP_LNKCTL
,
2810 misc_host_ctrl
= tr32(TG3PCI_MISC_HOST_CTRL
);
2811 tw32(TG3PCI_MISC_HOST_CTRL
,
2812 misc_host_ctrl
| MISC_HOST_CTRL_MASK_PCI_INT
);
2814 device_should_wake
= device_may_wakeup(&tp
->pdev
->dev
) &&
2815 tg3_flag(tp
, WOL_ENABLE
);
2817 if (tg3_flag(tp
, USE_PHYLIB
)) {
2818 do_low_power
= false;
2819 if ((tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) &&
2820 !(tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
2821 struct phy_device
*phydev
;
2822 u32 phyid
, advertising
;
2824 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
2826 tp
->phy_flags
|= TG3_PHYFLG_IS_LOW_POWER
;
2828 tp
->link_config
.orig_speed
= phydev
->speed
;
2829 tp
->link_config
.orig_duplex
= phydev
->duplex
;
2830 tp
->link_config
.orig_autoneg
= phydev
->autoneg
;
2831 tp
->link_config
.orig_advertising
= phydev
->advertising
;
2833 advertising
= ADVERTISED_TP
|
2835 ADVERTISED_Autoneg
|
2836 ADVERTISED_10baseT_Half
;
2838 if (tg3_flag(tp
, ENABLE_ASF
) || device_should_wake
) {
2839 if (tg3_flag(tp
, WOL_SPEED_100MB
))
2841 ADVERTISED_100baseT_Half
|
2842 ADVERTISED_100baseT_Full
|
2843 ADVERTISED_10baseT_Full
;
2845 advertising
|= ADVERTISED_10baseT_Full
;
2848 phydev
->advertising
= advertising
;
2850 phy_start_aneg(phydev
);
2852 phyid
= phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
;
2853 if (phyid
!= PHY_ID_BCMAC131
) {
2854 phyid
&= PHY_BCM_OUI_MASK
;
2855 if (phyid
== PHY_BCM_OUI_1
||
2856 phyid
== PHY_BCM_OUI_2
||
2857 phyid
== PHY_BCM_OUI_3
)
2858 do_low_power
= true;
2862 do_low_power
= true;
2864 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
2865 tp
->phy_flags
|= TG3_PHYFLG_IS_LOW_POWER
;
2866 tp
->link_config
.orig_speed
= tp
->link_config
.speed
;
2867 tp
->link_config
.orig_duplex
= tp
->link_config
.duplex
;
2868 tp
->link_config
.orig_autoneg
= tp
->link_config
.autoneg
;
2871 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
2872 tp
->link_config
.speed
= SPEED_10
;
2873 tp
->link_config
.duplex
= DUPLEX_HALF
;
2874 tp
->link_config
.autoneg
= AUTONEG_ENABLE
;
2875 tg3_setup_phy(tp
, 0);
2879 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
2882 val
= tr32(GRC_VCPU_EXT_CTRL
);
2883 tw32(GRC_VCPU_EXT_CTRL
, val
| GRC_VCPU_EXT_CTRL_DISABLE_WOL
);
2884 } else if (!tg3_flag(tp
, ENABLE_ASF
)) {
2888 for (i
= 0; i
< 200; i
++) {
2889 tg3_read_mem(tp
, NIC_SRAM_FW_ASF_STATUS_MBOX
, &val
);
2890 if (val
== ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1
)
2895 if (tg3_flag(tp
, WOL_CAP
))
2896 tg3_write_mem(tp
, NIC_SRAM_WOL_MBOX
, WOL_SIGNATURE
|
2897 WOL_DRV_STATE_SHUTDOWN
|
2901 if (device_should_wake
) {
2904 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
2906 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
2907 tg3_phy_auxctl_write(tp
,
2908 MII_TG3_AUXCTL_SHDWSEL_PWRCTL
,
2909 MII_TG3_AUXCTL_PCTL_WOL_EN
|
2910 MII_TG3_AUXCTL_PCTL_100TX_LPWR
|
2911 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC
);
2915 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
2916 mac_mode
= MAC_MODE_PORT_MODE_GMII
;
2918 mac_mode
= MAC_MODE_PORT_MODE_MII
;
2920 mac_mode
|= tp
->mac_mode
& MAC_MODE_LINK_POLARITY
;
2921 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
2923 u32 speed
= tg3_flag(tp
, WOL_SPEED_100MB
) ?
2924 SPEED_100
: SPEED_10
;
2925 if (tg3_5700_link_polarity(tp
, speed
))
2926 mac_mode
|= MAC_MODE_LINK_POLARITY
;
2928 mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
2931 mac_mode
= MAC_MODE_PORT_MODE_TBI
;
2934 if (!tg3_flag(tp
, 5750_PLUS
))
2935 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
2937 mac_mode
|= MAC_MODE_MAGIC_PKT_ENABLE
;
2938 if ((tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, 5780_CLASS
)) &&
2939 (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
)))
2940 mac_mode
|= MAC_MODE_KEEP_FRAME_IN_WOL
;
2942 if (tg3_flag(tp
, ENABLE_APE
))
2943 mac_mode
|= MAC_MODE_APE_TX_EN
|
2944 MAC_MODE_APE_RX_EN
|
2945 MAC_MODE_TDE_ENABLE
;
2947 tw32_f(MAC_MODE
, mac_mode
);
2950 tw32_f(MAC_RX_MODE
, RX_MODE_ENABLE
);
2954 if (!tg3_flag(tp
, WOL_SPEED_100MB
) &&
2955 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
2956 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
)) {
2959 base_val
= tp
->pci_clock_ctrl
;
2960 base_val
|= (CLOCK_CTRL_RXCLK_DISABLE
|
2961 CLOCK_CTRL_TXCLK_DISABLE
);
2963 tw32_wait_f(TG3PCI_CLOCK_CTRL
, base_val
| CLOCK_CTRL_ALTCLK
|
2964 CLOCK_CTRL_PWRDOWN_PLL133
, 40);
2965 } else if (tg3_flag(tp
, 5780_CLASS
) ||
2966 tg3_flag(tp
, CPMU_PRESENT
) ||
2967 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
2969 } else if (!(tg3_flag(tp
, 5750_PLUS
) && tg3_flag(tp
, ENABLE_ASF
))) {
2970 u32 newbits1
, newbits2
;
2972 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
2973 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
2974 newbits1
= (CLOCK_CTRL_RXCLK_DISABLE
|
2975 CLOCK_CTRL_TXCLK_DISABLE
|
2977 newbits2
= newbits1
| CLOCK_CTRL_44MHZ_CORE
;
2978 } else if (tg3_flag(tp
, 5705_PLUS
)) {
2979 newbits1
= CLOCK_CTRL_625_CORE
;
2980 newbits2
= newbits1
| CLOCK_CTRL_ALTCLK
;
2982 newbits1
= CLOCK_CTRL_ALTCLK
;
2983 newbits2
= newbits1
| CLOCK_CTRL_44MHZ_CORE
;
2986 tw32_wait_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
| newbits1
,
2989 tw32_wait_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
| newbits2
,
2992 if (!tg3_flag(tp
, 5705_PLUS
)) {
2995 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
2996 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
2997 newbits3
= (CLOCK_CTRL_RXCLK_DISABLE
|
2998 CLOCK_CTRL_TXCLK_DISABLE
|
2999 CLOCK_CTRL_44MHZ_CORE
);
3001 newbits3
= CLOCK_CTRL_44MHZ_CORE
;
3004 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
3005 tp
->pci_clock_ctrl
| newbits3
, 40);
3009 if (!(device_should_wake
) && !tg3_flag(tp
, ENABLE_ASF
))
3010 tg3_power_down_phy(tp
, do_low_power
);
3012 tg3_frob_aux_power(tp
, true);
3014 /* Workaround for unstable PLL clock */
3015 if ((GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5750_AX
) ||
3016 (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5750_BX
)) {
3017 u32 val
= tr32(0x7d00);
3019 val
&= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3021 if (!tg3_flag(tp
, ENABLE_ASF
)) {
3024 err
= tg3_nvram_lock(tp
);
3025 tg3_halt_cpu(tp
, RX_CPU_BASE
);
3027 tg3_nvram_unlock(tp
);
3031 tg3_write_sig_post_reset(tp
, RESET_KIND_SHUTDOWN
);
3036 static void tg3_power_down(struct tg3
*tp
)
3038 tg3_power_down_prepare(tp
);
3040 pci_wake_from_d3(tp
->pdev
, tg3_flag(tp
, WOL_ENABLE
));
3041 pci_set_power_state(tp
->pdev
, PCI_D3hot
);
3044 static void tg3_aux_stat_to_speed_duplex(struct tg3
*tp
, u32 val
, u16
*speed
, u8
*duplex
)
3046 switch (val
& MII_TG3_AUX_STAT_SPDMASK
) {
3047 case MII_TG3_AUX_STAT_10HALF
:
3049 *duplex
= DUPLEX_HALF
;
3052 case MII_TG3_AUX_STAT_10FULL
:
3054 *duplex
= DUPLEX_FULL
;
3057 case MII_TG3_AUX_STAT_100HALF
:
3059 *duplex
= DUPLEX_HALF
;
3062 case MII_TG3_AUX_STAT_100FULL
:
3064 *duplex
= DUPLEX_FULL
;
3067 case MII_TG3_AUX_STAT_1000HALF
:
3068 *speed
= SPEED_1000
;
3069 *duplex
= DUPLEX_HALF
;
3072 case MII_TG3_AUX_STAT_1000FULL
:
3073 *speed
= SPEED_1000
;
3074 *duplex
= DUPLEX_FULL
;
3078 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
3079 *speed
= (val
& MII_TG3_AUX_STAT_100
) ? SPEED_100
:
3081 *duplex
= (val
& MII_TG3_AUX_STAT_FULL
) ? DUPLEX_FULL
:
3085 *speed
= SPEED_INVALID
;
3086 *duplex
= DUPLEX_INVALID
;
3091 static int tg3_phy_autoneg_cfg(struct tg3
*tp
, u32 advertise
, u32 flowctrl
)
3096 new_adv
= ADVERTISE_CSMA
;
3097 if (advertise
& ADVERTISED_10baseT_Half
)
3098 new_adv
|= ADVERTISE_10HALF
;
3099 if (advertise
& ADVERTISED_10baseT_Full
)
3100 new_adv
|= ADVERTISE_10FULL
;
3101 if (advertise
& ADVERTISED_100baseT_Half
)
3102 new_adv
|= ADVERTISE_100HALF
;
3103 if (advertise
& ADVERTISED_100baseT_Full
)
3104 new_adv
|= ADVERTISE_100FULL
;
3106 new_adv
|= tg3_advert_flowctrl_1000T(flowctrl
);
3108 err
= tg3_writephy(tp
, MII_ADVERTISE
, new_adv
);
3112 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
3116 if (advertise
& ADVERTISED_1000baseT_Half
)
3117 new_adv
|= ADVERTISE_1000HALF
;
3118 if (advertise
& ADVERTISED_1000baseT_Full
)
3119 new_adv
|= ADVERTISE_1000FULL
;
3121 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
||
3122 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B0
)
3123 new_adv
|= CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
;
3125 err
= tg3_writephy(tp
, MII_CTRL1000
, new_adv
);
3129 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
))
3132 tw32(TG3_CPMU_EEE_MODE
,
3133 tr32(TG3_CPMU_EEE_MODE
) & ~TG3_CPMU_EEEMD_LPI_ENABLE
);
3135 err
= TG3_PHY_AUXCTL_SMDSP_ENABLE(tp
);
3140 /* Advertise 100-BaseTX EEE ability */
3141 if (advertise
& ADVERTISED_100baseT_Full
)
3142 val
|= MDIO_AN_EEE_ADV_100TX
;
3143 /* Advertise 1000-BaseT EEE ability */
3144 if (advertise
& ADVERTISED_1000baseT_Full
)
3145 val
|= MDIO_AN_EEE_ADV_1000T
;
3146 err
= tg3_phy_cl45_write(tp
, MDIO_MMD_AN
, MDIO_AN_EEE_ADV
, val
);
3150 switch (GET_ASIC_REV(tp
->pci_chip_rev_id
)) {
3152 case ASIC_REV_57765
:
3154 /* If we advertised any eee advertisements above... */
3156 val
= MII_TG3_DSP_TAP26_ALNOKO
|
3157 MII_TG3_DSP_TAP26_RMRXSTO
|
3158 MII_TG3_DSP_TAP26_OPCSINPT
;
3159 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, val
);
3162 if (!tg3_phydsp_read(tp
, MII_TG3_DSP_CH34TP2
, &val
))
3163 tg3_phydsp_write(tp
, MII_TG3_DSP_CH34TP2
, val
|
3164 MII_TG3_DSP_CH34TP2_HIBW01
);
3167 err2
= TG3_PHY_AUXCTL_SMDSP_DISABLE(tp
);
3176 static void tg3_phy_copper_begin(struct tg3
*tp
)
3181 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) {
3182 new_adv
= ADVERTISED_10baseT_Half
|
3183 ADVERTISED_10baseT_Full
;
3184 if (tg3_flag(tp
, WOL_SPEED_100MB
))
3185 new_adv
|= ADVERTISED_100baseT_Half
|
3186 ADVERTISED_100baseT_Full
;
3188 tg3_phy_autoneg_cfg(tp
, new_adv
,
3189 FLOW_CTRL_TX
| FLOW_CTRL_RX
);
3190 } else if (tp
->link_config
.speed
== SPEED_INVALID
) {
3191 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
3192 tp
->link_config
.advertising
&=
3193 ~(ADVERTISED_1000baseT_Half
|
3194 ADVERTISED_1000baseT_Full
);
3196 tg3_phy_autoneg_cfg(tp
, tp
->link_config
.advertising
,
3197 tp
->link_config
.flowctrl
);
3199 /* Asking for a specific link mode. */
3200 if (tp
->link_config
.speed
== SPEED_1000
) {
3201 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
3202 new_adv
= ADVERTISED_1000baseT_Full
;
3204 new_adv
= ADVERTISED_1000baseT_Half
;
3205 } else if (tp
->link_config
.speed
== SPEED_100
) {
3206 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
3207 new_adv
= ADVERTISED_100baseT_Full
;
3209 new_adv
= ADVERTISED_100baseT_Half
;
3211 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
3212 new_adv
= ADVERTISED_10baseT_Full
;
3214 new_adv
= ADVERTISED_10baseT_Half
;
3217 tg3_phy_autoneg_cfg(tp
, new_adv
,
3218 tp
->link_config
.flowctrl
);
3221 if (tp
->link_config
.autoneg
== AUTONEG_DISABLE
&&
3222 tp
->link_config
.speed
!= SPEED_INVALID
) {
3223 u32 bmcr
, orig_bmcr
;
3225 tp
->link_config
.active_speed
= tp
->link_config
.speed
;
3226 tp
->link_config
.active_duplex
= tp
->link_config
.duplex
;
3229 switch (tp
->link_config
.speed
) {
3235 bmcr
|= BMCR_SPEED100
;
3239 bmcr
|= BMCR_SPEED1000
;
3243 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
3244 bmcr
|= BMCR_FULLDPLX
;
3246 if (!tg3_readphy(tp
, MII_BMCR
, &orig_bmcr
) &&
3247 (bmcr
!= orig_bmcr
)) {
3248 tg3_writephy(tp
, MII_BMCR
, BMCR_LOOPBACK
);
3249 for (i
= 0; i
< 1500; i
++) {
3253 if (tg3_readphy(tp
, MII_BMSR
, &tmp
) ||
3254 tg3_readphy(tp
, MII_BMSR
, &tmp
))
3256 if (!(tmp
& BMSR_LSTATUS
)) {
3261 tg3_writephy(tp
, MII_BMCR
, bmcr
);
3265 tg3_writephy(tp
, MII_BMCR
,
3266 BMCR_ANENABLE
| BMCR_ANRESTART
);
3270 static int tg3_init_5401phy_dsp(struct tg3
*tp
)
3274 /* Turn off tap power management. */
3275 /* Set Extended packet length bit */
3276 err
= tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, 0x4c20);
3278 err
|= tg3_phydsp_write(tp
, 0x0012, 0x1804);
3279 err
|= tg3_phydsp_write(tp
, 0x0013, 0x1204);
3280 err
|= tg3_phydsp_write(tp
, 0x8006, 0x0132);
3281 err
|= tg3_phydsp_write(tp
, 0x8006, 0x0232);
3282 err
|= tg3_phydsp_write(tp
, 0x201f, 0x0a20);
3289 static int tg3_copper_is_advertising_all(struct tg3
*tp
, u32 mask
)
3291 u32 adv_reg
, all_mask
= 0;
3293 if (mask
& ADVERTISED_10baseT_Half
)
3294 all_mask
|= ADVERTISE_10HALF
;
3295 if (mask
& ADVERTISED_10baseT_Full
)
3296 all_mask
|= ADVERTISE_10FULL
;
3297 if (mask
& ADVERTISED_100baseT_Half
)
3298 all_mask
|= ADVERTISE_100HALF
;
3299 if (mask
& ADVERTISED_100baseT_Full
)
3300 all_mask
|= ADVERTISE_100FULL
;
3302 if (tg3_readphy(tp
, MII_ADVERTISE
, &adv_reg
))
3305 if ((adv_reg
& all_mask
) != all_mask
)
3307 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
3311 if (mask
& ADVERTISED_1000baseT_Half
)
3312 all_mask
|= ADVERTISE_1000HALF
;
3313 if (mask
& ADVERTISED_1000baseT_Full
)
3314 all_mask
|= ADVERTISE_1000FULL
;
3316 if (tg3_readphy(tp
, MII_CTRL1000
, &tg3_ctrl
))
3319 if ((tg3_ctrl
& all_mask
) != all_mask
)
3325 static int tg3_adv_1000T_flowctrl_ok(struct tg3
*tp
, u32
*lcladv
, u32
*rmtadv
)
3329 if (tg3_readphy(tp
, MII_ADVERTISE
, lcladv
))
3332 curadv
= *lcladv
& (ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
);
3333 reqadv
= tg3_advert_flowctrl_1000T(tp
->link_config
.flowctrl
);
3335 if (tp
->link_config
.active_duplex
== DUPLEX_FULL
) {
3336 if (curadv
!= reqadv
)
3339 if (tg3_flag(tp
, PAUSE_AUTONEG
))
3340 tg3_readphy(tp
, MII_LPA
, rmtadv
);
3342 /* Reprogram the advertisement register, even if it
3343 * does not affect the current link. If the link
3344 * gets renegotiated in the future, we can save an
3345 * additional renegotiation cycle by advertising
3346 * it correctly in the first place.
3348 if (curadv
!= reqadv
) {
3349 *lcladv
&= ~(ADVERTISE_PAUSE_CAP
|
3350 ADVERTISE_PAUSE_ASYM
);
3351 tg3_writephy(tp
, MII_ADVERTISE
, *lcladv
| reqadv
);
3358 static int tg3_setup_copper_phy(struct tg3
*tp
, int force_reset
)
3360 int current_link_up
;
3362 u32 lcl_adv
, rmt_adv
;
3370 (MAC_STATUS_SYNC_CHANGED
|
3371 MAC_STATUS_CFG_CHANGED
|
3372 MAC_STATUS_MI_COMPLETION
|
3373 MAC_STATUS_LNKSTATE_CHANGED
));
3376 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
3378 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
3382 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_PWRCTL
, 0);
3384 /* Some third-party PHYs need to be reset on link going
3387 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
3388 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
3389 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) &&
3390 netif_carrier_ok(tp
->dev
)) {
3391 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
3392 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
3393 !(bmsr
& BMSR_LSTATUS
))
3399 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
3400 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
3401 if (tg3_readphy(tp
, MII_BMSR
, &bmsr
) ||
3402 !tg3_flag(tp
, INIT_COMPLETE
))
3405 if (!(bmsr
& BMSR_LSTATUS
)) {
3406 err
= tg3_init_5401phy_dsp(tp
);
3410 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
3411 for (i
= 0; i
< 1000; i
++) {
3413 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
3414 (bmsr
& BMSR_LSTATUS
)) {
3420 if ((tp
->phy_id
& TG3_PHY_ID_REV_MASK
) ==
3421 TG3_PHY_REV_BCM5401_B0
&&
3422 !(bmsr
& BMSR_LSTATUS
) &&
3423 tp
->link_config
.active_speed
== SPEED_1000
) {
3424 err
= tg3_phy_reset(tp
);
3426 err
= tg3_init_5401phy_dsp(tp
);
3431 } else if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
||
3432 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B0
) {
3433 /* 5701 {A0,B0} CRC bug workaround */
3434 tg3_writephy(tp
, 0x15, 0x0a75);
3435 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8c68);
3436 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
3437 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8c68);
3440 /* Clear pending interrupts... */
3441 tg3_readphy(tp
, MII_TG3_ISTAT
, &val
);
3442 tg3_readphy(tp
, MII_TG3_ISTAT
, &val
);
3444 if (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
)
3445 tg3_writephy(tp
, MII_TG3_IMASK
, ~MII_TG3_INT_LINKCHG
);
3446 else if (!(tp
->phy_flags
& TG3_PHYFLG_IS_FET
))
3447 tg3_writephy(tp
, MII_TG3_IMASK
, ~0);
3449 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
3450 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
3451 if (tp
->led_ctrl
== LED_CTRL_MODE_PHY_1
)
3452 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
3453 MII_TG3_EXT_CTRL_LNK3_LED_MODE
);
3455 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, 0);
3458 current_link_up
= 0;
3459 current_speed
= SPEED_INVALID
;
3460 current_duplex
= DUPLEX_INVALID
;
3462 if (tp
->phy_flags
& TG3_PHYFLG_CAPACITIVE_COUPLING
) {
3463 err
= tg3_phy_auxctl_read(tp
,
3464 MII_TG3_AUXCTL_SHDWSEL_MISCTEST
,
3466 if (!err
&& !(val
& (1 << 10))) {
3467 tg3_phy_auxctl_write(tp
,
3468 MII_TG3_AUXCTL_SHDWSEL_MISCTEST
,
3475 for (i
= 0; i
< 100; i
++) {
3476 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
3477 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
3478 (bmsr
& BMSR_LSTATUS
))
3483 if (bmsr
& BMSR_LSTATUS
) {
3486 tg3_readphy(tp
, MII_TG3_AUX_STAT
, &aux_stat
);
3487 for (i
= 0; i
< 2000; i
++) {
3489 if (!tg3_readphy(tp
, MII_TG3_AUX_STAT
, &aux_stat
) &&
3494 tg3_aux_stat_to_speed_duplex(tp
, aux_stat
,
3499 for (i
= 0; i
< 200; i
++) {
3500 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
3501 if (tg3_readphy(tp
, MII_BMCR
, &bmcr
))
3503 if (bmcr
&& bmcr
!= 0x7fff)
3511 tp
->link_config
.active_speed
= current_speed
;
3512 tp
->link_config
.active_duplex
= current_duplex
;
3514 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
3515 if ((bmcr
& BMCR_ANENABLE
) &&
3516 tg3_copper_is_advertising_all(tp
,
3517 tp
->link_config
.advertising
)) {
3518 if (tg3_adv_1000T_flowctrl_ok(tp
, &lcl_adv
,
3520 current_link_up
= 1;
3523 if (!(bmcr
& BMCR_ANENABLE
) &&
3524 tp
->link_config
.speed
== current_speed
&&
3525 tp
->link_config
.duplex
== current_duplex
&&
3526 tp
->link_config
.flowctrl
==
3527 tp
->link_config
.active_flowctrl
) {
3528 current_link_up
= 1;
3532 if (current_link_up
== 1 &&
3533 tp
->link_config
.active_duplex
== DUPLEX_FULL
)
3534 tg3_setup_flow_control(tp
, lcl_adv
, rmt_adv
);
3538 if (current_link_up
== 0 || (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
3539 tg3_phy_copper_begin(tp
);
3541 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
3542 if ((!tg3_readphy(tp
, MII_BMSR
, &bmsr
) && (bmsr
& BMSR_LSTATUS
)) ||
3543 (tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
))
3544 current_link_up
= 1;
3547 tp
->mac_mode
&= ~MAC_MODE_PORT_MODE_MASK
;
3548 if (current_link_up
== 1) {
3549 if (tp
->link_config
.active_speed
== SPEED_100
||
3550 tp
->link_config
.active_speed
== SPEED_10
)
3551 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
3553 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
3554 } else if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
)
3555 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
3557 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
3559 tp
->mac_mode
&= ~MAC_MODE_HALF_DUPLEX
;
3560 if (tp
->link_config
.active_duplex
== DUPLEX_HALF
)
3561 tp
->mac_mode
|= MAC_MODE_HALF_DUPLEX
;
3563 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
) {
3564 if (current_link_up
== 1 &&
3565 tg3_5700_link_polarity(tp
, tp
->link_config
.active_speed
))
3566 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
3568 tp
->mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
3571 /* ??? Without this setting Netgear GA302T PHY does not
3572 * ??? send/receive packets...
3574 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5411
&&
3575 tp
->pci_chip_rev_id
== CHIPREV_ID_5700_ALTIMA
) {
3576 tp
->mi_mode
|= MAC_MI_MODE_AUTO_POLL
;
3577 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
3581 tw32_f(MAC_MODE
, tp
->mac_mode
);
3584 tg3_phy_eee_adjust(tp
, current_link_up
);
3586 if (tg3_flag(tp
, USE_LINKCHG_REG
)) {
3587 /* Polled via timer. */
3588 tw32_f(MAC_EVENT
, 0);
3590 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
3594 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
&&
3595 current_link_up
== 1 &&
3596 tp
->link_config
.active_speed
== SPEED_1000
&&
3597 (tg3_flag(tp
, PCIX_MODE
) || tg3_flag(tp
, PCI_HIGH_SPEED
))) {
3600 (MAC_STATUS_SYNC_CHANGED
|
3601 MAC_STATUS_CFG_CHANGED
));
3604 NIC_SRAM_FIRMWARE_MBOX
,
3605 NIC_SRAM_FIRMWARE_MBOX_MAGIC2
);
3608 /* Prevent send BD corruption. */
3609 if (tg3_flag(tp
, CLKREQ_BUG
)) {
3610 u16 oldlnkctl
, newlnkctl
;
3612 pci_read_config_word(tp
->pdev
,
3613 pci_pcie_cap(tp
->pdev
) + PCI_EXP_LNKCTL
,
3615 if (tp
->link_config
.active_speed
== SPEED_100
||
3616 tp
->link_config
.active_speed
== SPEED_10
)
3617 newlnkctl
= oldlnkctl
& ~PCI_EXP_LNKCTL_CLKREQ_EN
;
3619 newlnkctl
= oldlnkctl
| PCI_EXP_LNKCTL_CLKREQ_EN
;
3620 if (newlnkctl
!= oldlnkctl
)
3621 pci_write_config_word(tp
->pdev
,
3622 pci_pcie_cap(tp
->pdev
) + PCI_EXP_LNKCTL
,
3626 if (current_link_up
!= netif_carrier_ok(tp
->dev
)) {
3627 if (current_link_up
)
3628 netif_carrier_on(tp
->dev
);
3630 netif_carrier_off(tp
->dev
);
3631 tg3_link_report(tp
);
3637 struct tg3_fiber_aneginfo
{
3639 #define ANEG_STATE_UNKNOWN 0
3640 #define ANEG_STATE_AN_ENABLE 1
3641 #define ANEG_STATE_RESTART_INIT 2
3642 #define ANEG_STATE_RESTART 3
3643 #define ANEG_STATE_DISABLE_LINK_OK 4
3644 #define ANEG_STATE_ABILITY_DETECT_INIT 5
3645 #define ANEG_STATE_ABILITY_DETECT 6
3646 #define ANEG_STATE_ACK_DETECT_INIT 7
3647 #define ANEG_STATE_ACK_DETECT 8
3648 #define ANEG_STATE_COMPLETE_ACK_INIT 9
3649 #define ANEG_STATE_COMPLETE_ACK 10
3650 #define ANEG_STATE_IDLE_DETECT_INIT 11
3651 #define ANEG_STATE_IDLE_DETECT 12
3652 #define ANEG_STATE_LINK_OK 13
3653 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
3654 #define ANEG_STATE_NEXT_PAGE_WAIT 15
3657 #define MR_AN_ENABLE 0x00000001
3658 #define MR_RESTART_AN 0x00000002
3659 #define MR_AN_COMPLETE 0x00000004
3660 #define MR_PAGE_RX 0x00000008
3661 #define MR_NP_LOADED 0x00000010
3662 #define MR_TOGGLE_TX 0x00000020
3663 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
3664 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
3665 #define MR_LP_ADV_SYM_PAUSE 0x00000100
3666 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
3667 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3668 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3669 #define MR_LP_ADV_NEXT_PAGE 0x00001000
3670 #define MR_TOGGLE_RX 0x00002000
3671 #define MR_NP_RX 0x00004000
3673 #define MR_LINK_OK 0x80000000
3675 unsigned long link_time
, cur_time
;
3677 u32 ability_match_cfg
;
3678 int ability_match_count
;
3680 char ability_match
, idle_match
, ack_match
;
3682 u32 txconfig
, rxconfig
;
3683 #define ANEG_CFG_NP 0x00000080
3684 #define ANEG_CFG_ACK 0x00000040
3685 #define ANEG_CFG_RF2 0x00000020
3686 #define ANEG_CFG_RF1 0x00000010
3687 #define ANEG_CFG_PS2 0x00000001
3688 #define ANEG_CFG_PS1 0x00008000
3689 #define ANEG_CFG_HD 0x00004000
3690 #define ANEG_CFG_FD 0x00002000
3691 #define ANEG_CFG_INVAL 0x00001f06
3696 #define ANEG_TIMER_ENAB 2
3697 #define ANEG_FAILED -1
3699 #define ANEG_STATE_SETTLE_TIME 10000
3701 static int tg3_fiber_aneg_smachine(struct tg3
*tp
,
3702 struct tg3_fiber_aneginfo
*ap
)
3705 unsigned long delta
;
3709 if (ap
->state
== ANEG_STATE_UNKNOWN
) {
3713 ap
->ability_match_cfg
= 0;
3714 ap
->ability_match_count
= 0;
3715 ap
->ability_match
= 0;
3721 if (tr32(MAC_STATUS
) & MAC_STATUS_RCVD_CFG
) {
3722 rx_cfg_reg
= tr32(MAC_RX_AUTO_NEG
);
3724 if (rx_cfg_reg
!= ap
->ability_match_cfg
) {
3725 ap
->ability_match_cfg
= rx_cfg_reg
;
3726 ap
->ability_match
= 0;
3727 ap
->ability_match_count
= 0;
3729 if (++ap
->ability_match_count
> 1) {
3730 ap
->ability_match
= 1;
3731 ap
->ability_match_cfg
= rx_cfg_reg
;
3734 if (rx_cfg_reg
& ANEG_CFG_ACK
)
3742 ap
->ability_match_cfg
= 0;
3743 ap
->ability_match_count
= 0;
3744 ap
->ability_match
= 0;
3750 ap
->rxconfig
= rx_cfg_reg
;
3753 switch (ap
->state
) {
3754 case ANEG_STATE_UNKNOWN
:
3755 if (ap
->flags
& (MR_AN_ENABLE
| MR_RESTART_AN
))
3756 ap
->state
= ANEG_STATE_AN_ENABLE
;
3759 case ANEG_STATE_AN_ENABLE
:
3760 ap
->flags
&= ~(MR_AN_COMPLETE
| MR_PAGE_RX
);
3761 if (ap
->flags
& MR_AN_ENABLE
) {
3764 ap
->ability_match_cfg
= 0;
3765 ap
->ability_match_count
= 0;
3766 ap
->ability_match
= 0;
3770 ap
->state
= ANEG_STATE_RESTART_INIT
;
3772 ap
->state
= ANEG_STATE_DISABLE_LINK_OK
;
3776 case ANEG_STATE_RESTART_INIT
:
3777 ap
->link_time
= ap
->cur_time
;
3778 ap
->flags
&= ~(MR_NP_LOADED
);
3780 tw32(MAC_TX_AUTO_NEG
, 0);
3781 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
3782 tw32_f(MAC_MODE
, tp
->mac_mode
);
3785 ret
= ANEG_TIMER_ENAB
;
3786 ap
->state
= ANEG_STATE_RESTART
;
3789 case ANEG_STATE_RESTART
:
3790 delta
= ap
->cur_time
- ap
->link_time
;
3791 if (delta
> ANEG_STATE_SETTLE_TIME
)
3792 ap
->state
= ANEG_STATE_ABILITY_DETECT_INIT
;
3794 ret
= ANEG_TIMER_ENAB
;
3797 case ANEG_STATE_DISABLE_LINK_OK
:
3801 case ANEG_STATE_ABILITY_DETECT_INIT
:
3802 ap
->flags
&= ~(MR_TOGGLE_TX
);
3803 ap
->txconfig
= ANEG_CFG_FD
;
3804 flowctrl
= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
3805 if (flowctrl
& ADVERTISE_1000XPAUSE
)
3806 ap
->txconfig
|= ANEG_CFG_PS1
;
3807 if (flowctrl
& ADVERTISE_1000XPSE_ASYM
)
3808 ap
->txconfig
|= ANEG_CFG_PS2
;
3809 tw32(MAC_TX_AUTO_NEG
, ap
->txconfig
);
3810 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
3811 tw32_f(MAC_MODE
, tp
->mac_mode
);
3814 ap
->state
= ANEG_STATE_ABILITY_DETECT
;
3817 case ANEG_STATE_ABILITY_DETECT
:
3818 if (ap
->ability_match
!= 0 && ap
->rxconfig
!= 0)
3819 ap
->state
= ANEG_STATE_ACK_DETECT_INIT
;
3822 case ANEG_STATE_ACK_DETECT_INIT
:
3823 ap
->txconfig
|= ANEG_CFG_ACK
;
3824 tw32(MAC_TX_AUTO_NEG
, ap
->txconfig
);
3825 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
3826 tw32_f(MAC_MODE
, tp
->mac_mode
);
3829 ap
->state
= ANEG_STATE_ACK_DETECT
;
3832 case ANEG_STATE_ACK_DETECT
:
3833 if (ap
->ack_match
!= 0) {
3834 if ((ap
->rxconfig
& ~ANEG_CFG_ACK
) ==
3835 (ap
->ability_match_cfg
& ~ANEG_CFG_ACK
)) {
3836 ap
->state
= ANEG_STATE_COMPLETE_ACK_INIT
;
3838 ap
->state
= ANEG_STATE_AN_ENABLE
;
3840 } else if (ap
->ability_match
!= 0 &&
3841 ap
->rxconfig
== 0) {
3842 ap
->state
= ANEG_STATE_AN_ENABLE
;
3846 case ANEG_STATE_COMPLETE_ACK_INIT
:
3847 if (ap
->rxconfig
& ANEG_CFG_INVAL
) {
3851 ap
->flags
&= ~(MR_LP_ADV_FULL_DUPLEX
|
3852 MR_LP_ADV_HALF_DUPLEX
|
3853 MR_LP_ADV_SYM_PAUSE
|
3854 MR_LP_ADV_ASYM_PAUSE
|
3855 MR_LP_ADV_REMOTE_FAULT1
|
3856 MR_LP_ADV_REMOTE_FAULT2
|
3857 MR_LP_ADV_NEXT_PAGE
|
3860 if (ap
->rxconfig
& ANEG_CFG_FD
)
3861 ap
->flags
|= MR_LP_ADV_FULL_DUPLEX
;
3862 if (ap
->rxconfig
& ANEG_CFG_HD
)
3863 ap
->flags
|= MR_LP_ADV_HALF_DUPLEX
;
3864 if (ap
->rxconfig
& ANEG_CFG_PS1
)
3865 ap
->flags
|= MR_LP_ADV_SYM_PAUSE
;
3866 if (ap
->rxconfig
& ANEG_CFG_PS2
)
3867 ap
->flags
|= MR_LP_ADV_ASYM_PAUSE
;
3868 if (ap
->rxconfig
& ANEG_CFG_RF1
)
3869 ap
->flags
|= MR_LP_ADV_REMOTE_FAULT1
;
3870 if (ap
->rxconfig
& ANEG_CFG_RF2
)
3871 ap
->flags
|= MR_LP_ADV_REMOTE_FAULT2
;
3872 if (ap
->rxconfig
& ANEG_CFG_NP
)
3873 ap
->flags
|= MR_LP_ADV_NEXT_PAGE
;
3875 ap
->link_time
= ap
->cur_time
;
3877 ap
->flags
^= (MR_TOGGLE_TX
);
3878 if (ap
->rxconfig
& 0x0008)
3879 ap
->flags
|= MR_TOGGLE_RX
;
3880 if (ap
->rxconfig
& ANEG_CFG_NP
)
3881 ap
->flags
|= MR_NP_RX
;
3882 ap
->flags
|= MR_PAGE_RX
;
3884 ap
->state
= ANEG_STATE_COMPLETE_ACK
;
3885 ret
= ANEG_TIMER_ENAB
;
3888 case ANEG_STATE_COMPLETE_ACK
:
3889 if (ap
->ability_match
!= 0 &&
3890 ap
->rxconfig
== 0) {
3891 ap
->state
= ANEG_STATE_AN_ENABLE
;
3894 delta
= ap
->cur_time
- ap
->link_time
;
3895 if (delta
> ANEG_STATE_SETTLE_TIME
) {
3896 if (!(ap
->flags
& (MR_LP_ADV_NEXT_PAGE
))) {
3897 ap
->state
= ANEG_STATE_IDLE_DETECT_INIT
;
3899 if ((ap
->txconfig
& ANEG_CFG_NP
) == 0 &&
3900 !(ap
->flags
& MR_NP_RX
)) {
3901 ap
->state
= ANEG_STATE_IDLE_DETECT_INIT
;
3909 case ANEG_STATE_IDLE_DETECT_INIT
:
3910 ap
->link_time
= ap
->cur_time
;
3911 tp
->mac_mode
&= ~MAC_MODE_SEND_CONFIGS
;
3912 tw32_f(MAC_MODE
, tp
->mac_mode
);
3915 ap
->state
= ANEG_STATE_IDLE_DETECT
;
3916 ret
= ANEG_TIMER_ENAB
;
3919 case ANEG_STATE_IDLE_DETECT
:
3920 if (ap
->ability_match
!= 0 &&
3921 ap
->rxconfig
== 0) {
3922 ap
->state
= ANEG_STATE_AN_ENABLE
;
3925 delta
= ap
->cur_time
- ap
->link_time
;
3926 if (delta
> ANEG_STATE_SETTLE_TIME
) {
3927 /* XXX another gem from the Broadcom driver :( */
3928 ap
->state
= ANEG_STATE_LINK_OK
;
3932 case ANEG_STATE_LINK_OK
:
3933 ap
->flags
|= (MR_AN_COMPLETE
| MR_LINK_OK
);
3937 case ANEG_STATE_NEXT_PAGE_WAIT_INIT
:
3938 /* ??? unimplemented */
3941 case ANEG_STATE_NEXT_PAGE_WAIT
:
3942 /* ??? unimplemented */
3953 static int fiber_autoneg(struct tg3
*tp
, u32
*txflags
, u32
*rxflags
)
3956 struct tg3_fiber_aneginfo aninfo
;
3957 int status
= ANEG_FAILED
;
3961 tw32_f(MAC_TX_AUTO_NEG
, 0);
3963 tmp
= tp
->mac_mode
& ~MAC_MODE_PORT_MODE_MASK
;
3964 tw32_f(MAC_MODE
, tmp
| MAC_MODE_PORT_MODE_GMII
);
3967 tw32_f(MAC_MODE
, tp
->mac_mode
| MAC_MODE_SEND_CONFIGS
);
3970 memset(&aninfo
, 0, sizeof(aninfo
));
3971 aninfo
.flags
|= MR_AN_ENABLE
;
3972 aninfo
.state
= ANEG_STATE_UNKNOWN
;
3973 aninfo
.cur_time
= 0;
3975 while (++tick
< 195000) {
3976 status
= tg3_fiber_aneg_smachine(tp
, &aninfo
);
3977 if (status
== ANEG_DONE
|| status
== ANEG_FAILED
)
3983 tp
->mac_mode
&= ~MAC_MODE_SEND_CONFIGS
;
3984 tw32_f(MAC_MODE
, tp
->mac_mode
);
3987 *txflags
= aninfo
.txconfig
;
3988 *rxflags
= aninfo
.flags
;
3990 if (status
== ANEG_DONE
&&
3991 (aninfo
.flags
& (MR_AN_COMPLETE
| MR_LINK_OK
|
3992 MR_LP_ADV_FULL_DUPLEX
)))
3998 static void tg3_init_bcm8002(struct tg3
*tp
)
4000 u32 mac_status
= tr32(MAC_STATUS
);
4003 /* Reset when initting first time or we have a link. */
4004 if (tg3_flag(tp
, INIT_COMPLETE
) &&
4005 !(mac_status
& MAC_STATUS_PCS_SYNCED
))
4008 /* Set PLL lock range. */
4009 tg3_writephy(tp
, 0x16, 0x8007);
4012 tg3_writephy(tp
, MII_BMCR
, BMCR_RESET
);
4014 /* Wait for reset to complete. */
4015 /* XXX schedule_timeout() ... */
4016 for (i
= 0; i
< 500; i
++)
4019 /* Config mode; select PMA/Ch 1 regs. */
4020 tg3_writephy(tp
, 0x10, 0x8411);
4022 /* Enable auto-lock and comdet, select txclk for tx. */
4023 tg3_writephy(tp
, 0x11, 0x0a10);
4025 tg3_writephy(tp
, 0x18, 0x00a0);
4026 tg3_writephy(tp
, 0x16, 0x41ff);
4028 /* Assert and deassert POR. */
4029 tg3_writephy(tp
, 0x13, 0x0400);
4031 tg3_writephy(tp
, 0x13, 0x0000);
4033 tg3_writephy(tp
, 0x11, 0x0a50);
4035 tg3_writephy(tp
, 0x11, 0x0a10);
4037 /* Wait for signal to stabilize */
4038 /* XXX schedule_timeout() ... */
4039 for (i
= 0; i
< 15000; i
++)
4042 /* Deselect the channel register so we can read the PHYID
4045 tg3_writephy(tp
, 0x10, 0x8011);
4048 static int tg3_setup_fiber_hw_autoneg(struct tg3
*tp
, u32 mac_status
)
4051 u32 sg_dig_ctrl
, sg_dig_status
;
4052 u32 serdes_cfg
, expected_sg_dig_ctrl
;
4053 int workaround
, port_a
;
4054 int current_link_up
;
4057 expected_sg_dig_ctrl
= 0;
4060 current_link_up
= 0;
4062 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5704_A0
&&
4063 tp
->pci_chip_rev_id
!= CHIPREV_ID_5704_A1
) {
4065 if (tr32(TG3PCI_DUAL_MAC_CTRL
) & DUAL_MAC_CTRL_ID
)
4068 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4069 /* preserve bits 20-23 for voltage regulator */
4070 serdes_cfg
= tr32(MAC_SERDES_CFG
) & 0x00f06fff;
4073 sg_dig_ctrl
= tr32(SG_DIG_CTRL
);
4075 if (tp
->link_config
.autoneg
!= AUTONEG_ENABLE
) {
4076 if (sg_dig_ctrl
& SG_DIG_USING_HW_AUTONEG
) {
4078 u32 val
= serdes_cfg
;
4084 tw32_f(MAC_SERDES_CFG
, val
);
4087 tw32_f(SG_DIG_CTRL
, SG_DIG_COMMON_SETUP
);
4089 if (mac_status
& MAC_STATUS_PCS_SYNCED
) {
4090 tg3_setup_flow_control(tp
, 0, 0);
4091 current_link_up
= 1;
4096 /* Want auto-negotiation. */
4097 expected_sg_dig_ctrl
= SG_DIG_USING_HW_AUTONEG
| SG_DIG_COMMON_SETUP
;
4099 flowctrl
= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
4100 if (flowctrl
& ADVERTISE_1000XPAUSE
)
4101 expected_sg_dig_ctrl
|= SG_DIG_PAUSE_CAP
;
4102 if (flowctrl
& ADVERTISE_1000XPSE_ASYM
)
4103 expected_sg_dig_ctrl
|= SG_DIG_ASYM_PAUSE
;
4105 if (sg_dig_ctrl
!= expected_sg_dig_ctrl
) {
4106 if ((tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
) &&
4107 tp
->serdes_counter
&&
4108 ((mac_status
& (MAC_STATUS_PCS_SYNCED
|
4109 MAC_STATUS_RCVD_CFG
)) ==
4110 MAC_STATUS_PCS_SYNCED
)) {
4111 tp
->serdes_counter
--;
4112 current_link_up
= 1;
4117 tw32_f(MAC_SERDES_CFG
, serdes_cfg
| 0xc011000);
4118 tw32_f(SG_DIG_CTRL
, expected_sg_dig_ctrl
| SG_DIG_SOFT_RESET
);
4120 tw32_f(SG_DIG_CTRL
, expected_sg_dig_ctrl
);
4122 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5704S
;
4123 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
4124 } else if (mac_status
& (MAC_STATUS_PCS_SYNCED
|
4125 MAC_STATUS_SIGNAL_DET
)) {
4126 sg_dig_status
= tr32(SG_DIG_STATUS
);
4127 mac_status
= tr32(MAC_STATUS
);
4129 if ((sg_dig_status
& SG_DIG_AUTONEG_COMPLETE
) &&
4130 (mac_status
& MAC_STATUS_PCS_SYNCED
)) {
4131 u32 local_adv
= 0, remote_adv
= 0;
4133 if (sg_dig_ctrl
& SG_DIG_PAUSE_CAP
)
4134 local_adv
|= ADVERTISE_1000XPAUSE
;
4135 if (sg_dig_ctrl
& SG_DIG_ASYM_PAUSE
)
4136 local_adv
|= ADVERTISE_1000XPSE_ASYM
;
4138 if (sg_dig_status
& SG_DIG_PARTNER_PAUSE_CAPABLE
)
4139 remote_adv
|= LPA_1000XPAUSE
;
4140 if (sg_dig_status
& SG_DIG_PARTNER_ASYM_PAUSE
)
4141 remote_adv
|= LPA_1000XPAUSE_ASYM
;
4143 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
4144 current_link_up
= 1;
4145 tp
->serdes_counter
= 0;
4146 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
4147 } else if (!(sg_dig_status
& SG_DIG_AUTONEG_COMPLETE
)) {
4148 if (tp
->serdes_counter
)
4149 tp
->serdes_counter
--;
4152 u32 val
= serdes_cfg
;
4159 tw32_f(MAC_SERDES_CFG
, val
);
4162 tw32_f(SG_DIG_CTRL
, SG_DIG_COMMON_SETUP
);
4165 /* Link parallel detection - link is up */
4166 /* only if we have PCS_SYNC and not */
4167 /* receiving config code words */
4168 mac_status
= tr32(MAC_STATUS
);
4169 if ((mac_status
& MAC_STATUS_PCS_SYNCED
) &&
4170 !(mac_status
& MAC_STATUS_RCVD_CFG
)) {
4171 tg3_setup_flow_control(tp
, 0, 0);
4172 current_link_up
= 1;
4174 TG3_PHYFLG_PARALLEL_DETECT
;
4175 tp
->serdes_counter
=
4176 SERDES_PARALLEL_DET_TIMEOUT
;
4178 goto restart_autoneg
;
4182 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5704S
;
4183 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
4187 return current_link_up
;
4190 static int tg3_setup_fiber_by_hand(struct tg3
*tp
, u32 mac_status
)
4192 int current_link_up
= 0;
4194 if (!(mac_status
& MAC_STATUS_PCS_SYNCED
))
4197 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
4198 u32 txflags
, rxflags
;
4201 if (fiber_autoneg(tp
, &txflags
, &rxflags
)) {
4202 u32 local_adv
= 0, remote_adv
= 0;
4204 if (txflags
& ANEG_CFG_PS1
)
4205 local_adv
|= ADVERTISE_1000XPAUSE
;
4206 if (txflags
& ANEG_CFG_PS2
)
4207 local_adv
|= ADVERTISE_1000XPSE_ASYM
;
4209 if (rxflags
& MR_LP_ADV_SYM_PAUSE
)
4210 remote_adv
|= LPA_1000XPAUSE
;
4211 if (rxflags
& MR_LP_ADV_ASYM_PAUSE
)
4212 remote_adv
|= LPA_1000XPAUSE_ASYM
;
4214 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
4216 current_link_up
= 1;
4218 for (i
= 0; i
< 30; i
++) {
4221 (MAC_STATUS_SYNC_CHANGED
|
4222 MAC_STATUS_CFG_CHANGED
));
4224 if ((tr32(MAC_STATUS
) &
4225 (MAC_STATUS_SYNC_CHANGED
|
4226 MAC_STATUS_CFG_CHANGED
)) == 0)
4230 mac_status
= tr32(MAC_STATUS
);
4231 if (current_link_up
== 0 &&
4232 (mac_status
& MAC_STATUS_PCS_SYNCED
) &&
4233 !(mac_status
& MAC_STATUS_RCVD_CFG
))
4234 current_link_up
= 1;
4236 tg3_setup_flow_control(tp
, 0, 0);
4238 /* Forcing 1000FD link up. */
4239 current_link_up
= 1;
4241 tw32_f(MAC_MODE
, (tp
->mac_mode
| MAC_MODE_SEND_CONFIGS
));
4244 tw32_f(MAC_MODE
, tp
->mac_mode
);
4249 return current_link_up
;
4252 static int tg3_setup_fiber_phy(struct tg3
*tp
, int force_reset
)
4255 u16 orig_active_speed
;
4256 u8 orig_active_duplex
;
4258 int current_link_up
;
4261 orig_pause_cfg
= tp
->link_config
.active_flowctrl
;
4262 orig_active_speed
= tp
->link_config
.active_speed
;
4263 orig_active_duplex
= tp
->link_config
.active_duplex
;
4265 if (!tg3_flag(tp
, HW_AUTONEG
) &&
4266 netif_carrier_ok(tp
->dev
) &&
4267 tg3_flag(tp
, INIT_COMPLETE
)) {
4268 mac_status
= tr32(MAC_STATUS
);
4269 mac_status
&= (MAC_STATUS_PCS_SYNCED
|
4270 MAC_STATUS_SIGNAL_DET
|
4271 MAC_STATUS_CFG_CHANGED
|
4272 MAC_STATUS_RCVD_CFG
);
4273 if (mac_status
== (MAC_STATUS_PCS_SYNCED
|
4274 MAC_STATUS_SIGNAL_DET
)) {
4275 tw32_f(MAC_STATUS
, (MAC_STATUS_SYNC_CHANGED
|
4276 MAC_STATUS_CFG_CHANGED
));
4281 tw32_f(MAC_TX_AUTO_NEG
, 0);
4283 tp
->mac_mode
&= ~(MAC_MODE_PORT_MODE_MASK
| MAC_MODE_HALF_DUPLEX
);
4284 tp
->mac_mode
|= MAC_MODE_PORT_MODE_TBI
;
4285 tw32_f(MAC_MODE
, tp
->mac_mode
);
4288 if (tp
->phy_id
== TG3_PHY_ID_BCM8002
)
4289 tg3_init_bcm8002(tp
);
4291 /* Enable link change event even when serdes polling. */
4292 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
4295 current_link_up
= 0;
4296 mac_status
= tr32(MAC_STATUS
);
4298 if (tg3_flag(tp
, HW_AUTONEG
))
4299 current_link_up
= tg3_setup_fiber_hw_autoneg(tp
, mac_status
);
4301 current_link_up
= tg3_setup_fiber_by_hand(tp
, mac_status
);
4303 tp
->napi
[0].hw_status
->status
=
4304 (SD_STATUS_UPDATED
|
4305 (tp
->napi
[0].hw_status
->status
& ~SD_STATUS_LINK_CHG
));
4307 for (i
= 0; i
< 100; i
++) {
4308 tw32_f(MAC_STATUS
, (MAC_STATUS_SYNC_CHANGED
|
4309 MAC_STATUS_CFG_CHANGED
));
4311 if ((tr32(MAC_STATUS
) & (MAC_STATUS_SYNC_CHANGED
|
4312 MAC_STATUS_CFG_CHANGED
|
4313 MAC_STATUS_LNKSTATE_CHANGED
)) == 0)
4317 mac_status
= tr32(MAC_STATUS
);
4318 if ((mac_status
& MAC_STATUS_PCS_SYNCED
) == 0) {
4319 current_link_up
= 0;
4320 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
&&
4321 tp
->serdes_counter
== 0) {
4322 tw32_f(MAC_MODE
, (tp
->mac_mode
|
4323 MAC_MODE_SEND_CONFIGS
));
4325 tw32_f(MAC_MODE
, tp
->mac_mode
);
4329 if (current_link_up
== 1) {
4330 tp
->link_config
.active_speed
= SPEED_1000
;
4331 tp
->link_config
.active_duplex
= DUPLEX_FULL
;
4332 tw32(MAC_LED_CTRL
, (tp
->led_ctrl
|
4333 LED_CTRL_LNKLED_OVERRIDE
|
4334 LED_CTRL_1000MBPS_ON
));
4336 tp
->link_config
.active_speed
= SPEED_INVALID
;
4337 tp
->link_config
.active_duplex
= DUPLEX_INVALID
;
4338 tw32(MAC_LED_CTRL
, (tp
->led_ctrl
|
4339 LED_CTRL_LNKLED_OVERRIDE
|
4340 LED_CTRL_TRAFFIC_OVERRIDE
));
4343 if (current_link_up
!= netif_carrier_ok(tp
->dev
)) {
4344 if (current_link_up
)
4345 netif_carrier_on(tp
->dev
);
4347 netif_carrier_off(tp
->dev
);
4348 tg3_link_report(tp
);
4350 u32 now_pause_cfg
= tp
->link_config
.active_flowctrl
;
4351 if (orig_pause_cfg
!= now_pause_cfg
||
4352 orig_active_speed
!= tp
->link_config
.active_speed
||
4353 orig_active_duplex
!= tp
->link_config
.active_duplex
)
4354 tg3_link_report(tp
);
4360 static int tg3_setup_fiber_mii_phy(struct tg3
*tp
, int force_reset
)
4362 int current_link_up
, err
= 0;
4366 u32 local_adv
, remote_adv
;
4368 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
4369 tw32_f(MAC_MODE
, tp
->mac_mode
);
4375 (MAC_STATUS_SYNC_CHANGED
|
4376 MAC_STATUS_CFG_CHANGED
|
4377 MAC_STATUS_MI_COMPLETION
|
4378 MAC_STATUS_LNKSTATE_CHANGED
));
4384 current_link_up
= 0;
4385 current_speed
= SPEED_INVALID
;
4386 current_duplex
= DUPLEX_INVALID
;
4388 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4389 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4390 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
) {
4391 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
4392 bmsr
|= BMSR_LSTATUS
;
4394 bmsr
&= ~BMSR_LSTATUS
;
4397 err
|= tg3_readphy(tp
, MII_BMCR
, &bmcr
);
4399 if ((tp
->link_config
.autoneg
== AUTONEG_ENABLE
) && !force_reset
&&
4400 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
)) {
4401 /* do nothing, just check for link up at the end */
4402 } else if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
4405 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &adv
);
4406 new_adv
= adv
& ~(ADVERTISE_1000XFULL
| ADVERTISE_1000XHALF
|
4407 ADVERTISE_1000XPAUSE
|
4408 ADVERTISE_1000XPSE_ASYM
|
4411 new_adv
|= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
4413 if (tp
->link_config
.advertising
& ADVERTISED_1000baseT_Half
)
4414 new_adv
|= ADVERTISE_1000XHALF
;
4415 if (tp
->link_config
.advertising
& ADVERTISED_1000baseT_Full
)
4416 new_adv
|= ADVERTISE_1000XFULL
;
4418 if ((new_adv
!= adv
) || !(bmcr
& BMCR_ANENABLE
)) {
4419 tg3_writephy(tp
, MII_ADVERTISE
, new_adv
);
4420 bmcr
|= BMCR_ANENABLE
| BMCR_ANRESTART
;
4421 tg3_writephy(tp
, MII_BMCR
, bmcr
);
4423 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
4424 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5714S
;
4425 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
4432 bmcr
&= ~BMCR_SPEED1000
;
4433 new_bmcr
= bmcr
& ~(BMCR_ANENABLE
| BMCR_FULLDPLX
);
4435 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
4436 new_bmcr
|= BMCR_FULLDPLX
;
4438 if (new_bmcr
!= bmcr
) {
4439 /* BMCR_SPEED1000 is a reserved bit that needs
4440 * to be set on write.
4442 new_bmcr
|= BMCR_SPEED1000
;
4444 /* Force a linkdown */
4445 if (netif_carrier_ok(tp
->dev
)) {
4448 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &adv
);
4449 adv
&= ~(ADVERTISE_1000XFULL
|
4450 ADVERTISE_1000XHALF
|
4452 tg3_writephy(tp
, MII_ADVERTISE
, adv
);
4453 tg3_writephy(tp
, MII_BMCR
, bmcr
|
4457 netif_carrier_off(tp
->dev
);
4459 tg3_writephy(tp
, MII_BMCR
, new_bmcr
);
4461 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4462 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4463 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
4465 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
4466 bmsr
|= BMSR_LSTATUS
;
4468 bmsr
&= ~BMSR_LSTATUS
;
4470 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
4474 if (bmsr
& BMSR_LSTATUS
) {
4475 current_speed
= SPEED_1000
;
4476 current_link_up
= 1;
4477 if (bmcr
& BMCR_FULLDPLX
)
4478 current_duplex
= DUPLEX_FULL
;
4480 current_duplex
= DUPLEX_HALF
;
4485 if (bmcr
& BMCR_ANENABLE
) {
4488 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &local_adv
);
4489 err
|= tg3_readphy(tp
, MII_LPA
, &remote_adv
);
4490 common
= local_adv
& remote_adv
;
4491 if (common
& (ADVERTISE_1000XHALF
|
4492 ADVERTISE_1000XFULL
)) {
4493 if (common
& ADVERTISE_1000XFULL
)
4494 current_duplex
= DUPLEX_FULL
;
4496 current_duplex
= DUPLEX_HALF
;
4497 } else if (!tg3_flag(tp
, 5780_CLASS
)) {
4498 /* Link is up via parallel detect */
4500 current_link_up
= 0;
4505 if (current_link_up
== 1 && current_duplex
== DUPLEX_FULL
)
4506 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
4508 tp
->mac_mode
&= ~MAC_MODE_HALF_DUPLEX
;
4509 if (tp
->link_config
.active_duplex
== DUPLEX_HALF
)
4510 tp
->mac_mode
|= MAC_MODE_HALF_DUPLEX
;
4512 tw32_f(MAC_MODE
, tp
->mac_mode
);
4515 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
4517 tp
->link_config
.active_speed
= current_speed
;
4518 tp
->link_config
.active_duplex
= current_duplex
;
4520 if (current_link_up
!= netif_carrier_ok(tp
->dev
)) {
4521 if (current_link_up
)
4522 netif_carrier_on(tp
->dev
);
4524 netif_carrier_off(tp
->dev
);
4525 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
4527 tg3_link_report(tp
);
4532 static void tg3_serdes_parallel_detect(struct tg3
*tp
)
4534 if (tp
->serdes_counter
) {
4535 /* Give autoneg time to complete. */
4536 tp
->serdes_counter
--;
4540 if (!netif_carrier_ok(tp
->dev
) &&
4541 (tp
->link_config
.autoneg
== AUTONEG_ENABLE
)) {
4544 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
4545 if (bmcr
& BMCR_ANENABLE
) {
4548 /* Select shadow register 0x1f */
4549 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x7c00);
4550 tg3_readphy(tp
, MII_TG3_MISC_SHDW
, &phy1
);
4552 /* Select expansion interrupt status register */
4553 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
4554 MII_TG3_DSP_EXP1_INT_STAT
);
4555 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
4556 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
4558 if ((phy1
& 0x10) && !(phy2
& 0x20)) {
4559 /* We have signal detect and not receiving
4560 * config code words, link is up by parallel
4564 bmcr
&= ~BMCR_ANENABLE
;
4565 bmcr
|= BMCR_SPEED1000
| BMCR_FULLDPLX
;
4566 tg3_writephy(tp
, MII_BMCR
, bmcr
);
4567 tp
->phy_flags
|= TG3_PHYFLG_PARALLEL_DETECT
;
4570 } else if (netif_carrier_ok(tp
->dev
) &&
4571 (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) &&
4572 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
)) {
4575 /* Select expansion interrupt status register */
4576 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
4577 MII_TG3_DSP_EXP1_INT_STAT
);
4578 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
4582 /* Config code words received, turn on autoneg. */
4583 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
4584 tg3_writephy(tp
, MII_BMCR
, bmcr
| BMCR_ANENABLE
);
4586 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
4592 static int tg3_setup_phy(struct tg3
*tp
, int force_reset
)
4597 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
4598 err
= tg3_setup_fiber_phy(tp
, force_reset
);
4599 else if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
4600 err
= tg3_setup_fiber_mii_phy(tp
, force_reset
);
4602 err
= tg3_setup_copper_phy(tp
, force_reset
);
4604 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5784_AX
) {
4607 val
= tr32(TG3_CPMU_CLCK_STAT
) & CPMU_CLCK_STAT_MAC_CLCK_MASK
;
4608 if (val
== CPMU_CLCK_STAT_MAC_CLCK_62_5
)
4610 else if (val
== CPMU_CLCK_STAT_MAC_CLCK_6_25
)
4615 val
= tr32(GRC_MISC_CFG
) & ~GRC_MISC_CFG_PRESCALAR_MASK
;
4616 val
|= (scale
<< GRC_MISC_CFG_PRESCALAR_SHIFT
);
4617 tw32(GRC_MISC_CFG
, val
);
4620 val
= (2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
4621 (6 << TX_LENGTHS_IPG_SHIFT
);
4622 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
4623 val
|= tr32(MAC_TX_LENGTHS
) &
4624 (TX_LENGTHS_JMB_FRM_LEN_MSK
|
4625 TX_LENGTHS_CNT_DWN_VAL_MSK
);
4627 if (tp
->link_config
.active_speed
== SPEED_1000
&&
4628 tp
->link_config
.active_duplex
== DUPLEX_HALF
)
4629 tw32(MAC_TX_LENGTHS
, val
|
4630 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT
));
4632 tw32(MAC_TX_LENGTHS
, val
|
4633 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
));
4635 if (!tg3_flag(tp
, 5705_PLUS
)) {
4636 if (netif_carrier_ok(tp
->dev
)) {
4637 tw32(HOSTCC_STAT_COAL_TICKS
,
4638 tp
->coal
.stats_block_coalesce_usecs
);
4640 tw32(HOSTCC_STAT_COAL_TICKS
, 0);
4644 if (tg3_flag(tp
, ASPM_WORKAROUND
)) {
4645 val
= tr32(PCIE_PWR_MGMT_THRESH
);
4646 if (!netif_carrier_ok(tp
->dev
))
4647 val
= (val
& ~PCIE_PWR_MGMT_L1_THRESH_MSK
) |
4650 val
|= PCIE_PWR_MGMT_L1_THRESH_MSK
;
4651 tw32(PCIE_PWR_MGMT_THRESH
, val
);
4657 static inline int tg3_irq_sync(struct tg3
*tp
)
4659 return tp
->irq_sync
;
4662 static inline void tg3_rd32_loop(struct tg3
*tp
, u32
*dst
, u32 off
, u32 len
)
4666 dst
= (u32
*)((u8
*)dst
+ off
);
4667 for (i
= 0; i
< len
; i
+= sizeof(u32
))
4668 *dst
++ = tr32(off
+ i
);
4671 static void tg3_dump_legacy_regs(struct tg3
*tp
, u32
*regs
)
4673 tg3_rd32_loop(tp
, regs
, TG3PCI_VENDOR
, 0xb0);
4674 tg3_rd32_loop(tp
, regs
, MAILBOX_INTERRUPT_0
, 0x200);
4675 tg3_rd32_loop(tp
, regs
, MAC_MODE
, 0x4f0);
4676 tg3_rd32_loop(tp
, regs
, SNDDATAI_MODE
, 0xe0);
4677 tg3_rd32_loop(tp
, regs
, SNDDATAC_MODE
, 0x04);
4678 tg3_rd32_loop(tp
, regs
, SNDBDS_MODE
, 0x80);
4679 tg3_rd32_loop(tp
, regs
, SNDBDI_MODE
, 0x48);
4680 tg3_rd32_loop(tp
, regs
, SNDBDC_MODE
, 0x04);
4681 tg3_rd32_loop(tp
, regs
, RCVLPC_MODE
, 0x20);
4682 tg3_rd32_loop(tp
, regs
, RCVLPC_SELLST_BASE
, 0x15c);
4683 tg3_rd32_loop(tp
, regs
, RCVDBDI_MODE
, 0x0c);
4684 tg3_rd32_loop(tp
, regs
, RCVDBDI_JUMBO_BD
, 0x3c);
4685 tg3_rd32_loop(tp
, regs
, RCVDBDI_BD_PROD_IDX_0
, 0x44);
4686 tg3_rd32_loop(tp
, regs
, RCVDCC_MODE
, 0x04);
4687 tg3_rd32_loop(tp
, regs
, RCVBDI_MODE
, 0x20);
4688 tg3_rd32_loop(tp
, regs
, RCVCC_MODE
, 0x14);
4689 tg3_rd32_loop(tp
, regs
, RCVLSC_MODE
, 0x08);
4690 tg3_rd32_loop(tp
, regs
, MBFREE_MODE
, 0x08);
4691 tg3_rd32_loop(tp
, regs
, HOSTCC_MODE
, 0x100);
4693 if (tg3_flag(tp
, SUPPORT_MSIX
))
4694 tg3_rd32_loop(tp
, regs
, HOSTCC_RXCOL_TICKS_VEC1
, 0x180);
4696 tg3_rd32_loop(tp
, regs
, MEMARB_MODE
, 0x10);
4697 tg3_rd32_loop(tp
, regs
, BUFMGR_MODE
, 0x58);
4698 tg3_rd32_loop(tp
, regs
, RDMAC_MODE
, 0x08);
4699 tg3_rd32_loop(tp
, regs
, WDMAC_MODE
, 0x08);
4700 tg3_rd32_loop(tp
, regs
, RX_CPU_MODE
, 0x04);
4701 tg3_rd32_loop(tp
, regs
, RX_CPU_STATE
, 0x04);
4702 tg3_rd32_loop(tp
, regs
, RX_CPU_PGMCTR
, 0x04);
4703 tg3_rd32_loop(tp
, regs
, RX_CPU_HWBKPT
, 0x04);
4705 if (!tg3_flag(tp
, 5705_PLUS
)) {
4706 tg3_rd32_loop(tp
, regs
, TX_CPU_MODE
, 0x04);
4707 tg3_rd32_loop(tp
, regs
, TX_CPU_STATE
, 0x04);
4708 tg3_rd32_loop(tp
, regs
, TX_CPU_PGMCTR
, 0x04);
4711 tg3_rd32_loop(tp
, regs
, GRCMBOX_INTERRUPT_0
, 0x110);
4712 tg3_rd32_loop(tp
, regs
, FTQ_RESET
, 0x120);
4713 tg3_rd32_loop(tp
, regs
, MSGINT_MODE
, 0x0c);
4714 tg3_rd32_loop(tp
, regs
, DMAC_MODE
, 0x04);
4715 tg3_rd32_loop(tp
, regs
, GRC_MODE
, 0x4c);
4717 if (tg3_flag(tp
, NVRAM
))
4718 tg3_rd32_loop(tp
, regs
, NVRAM_CMD
, 0x24);
4721 static void tg3_dump_state(struct tg3
*tp
)
4726 regs
= kzalloc(TG3_REG_BLK_SIZE
, GFP_ATOMIC
);
4728 netdev_err(tp
->dev
, "Failed allocating register dump buffer\n");
4732 if (tg3_flag(tp
, PCI_EXPRESS
)) {
4733 /* Read up to but not including private PCI registers */
4734 for (i
= 0; i
< TG3_PCIE_TLDLPL_PORT
; i
+= sizeof(u32
))
4735 regs
[i
/ sizeof(u32
)] = tr32(i
);
4737 tg3_dump_legacy_regs(tp
, regs
);
4739 for (i
= 0; i
< TG3_REG_BLK_SIZE
/ sizeof(u32
); i
+= 4) {
4740 if (!regs
[i
+ 0] && !regs
[i
+ 1] &&
4741 !regs
[i
+ 2] && !regs
[i
+ 3])
4744 netdev_err(tp
->dev
, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
4746 regs
[i
+ 0], regs
[i
+ 1], regs
[i
+ 2], regs
[i
+ 3]);
4751 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
4752 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
4754 /* SW status block */
4756 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
4758 tnapi
->hw_status
->status
,
4759 tnapi
->hw_status
->status_tag
,
4760 tnapi
->hw_status
->rx_jumbo_consumer
,
4761 tnapi
->hw_status
->rx_consumer
,
4762 tnapi
->hw_status
->rx_mini_consumer
,
4763 tnapi
->hw_status
->idx
[0].rx_producer
,
4764 tnapi
->hw_status
->idx
[0].tx_consumer
);
4767 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
4769 tnapi
->last_tag
, tnapi
->last_irq_tag
,
4770 tnapi
->tx_prod
, tnapi
->tx_cons
, tnapi
->tx_pending
,
4772 tnapi
->prodring
.rx_std_prod_idx
,
4773 tnapi
->prodring
.rx_std_cons_idx
,
4774 tnapi
->prodring
.rx_jmb_prod_idx
,
4775 tnapi
->prodring
.rx_jmb_cons_idx
);
4779 /* This is called whenever we suspect that the system chipset is re-
4780 * ordering the sequence of MMIO to the tx send mailbox. The symptom
4781 * is bogus tx completions. We try to recover by setting the
4782 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4785 static void tg3_tx_recover(struct tg3
*tp
)
4787 BUG_ON(tg3_flag(tp
, MBOX_WRITE_REORDER
) ||
4788 tp
->write32_tx_mbox
== tg3_write_indirect_mbox
);
4790 netdev_warn(tp
->dev
,
4791 "The system may be re-ordering memory-mapped I/O "
4792 "cycles to the network device, attempting to recover. "
4793 "Please report the problem to the driver maintainer "
4794 "and include system chipset information.\n");
4796 spin_lock(&tp
->lock
);
4797 tg3_flag_set(tp
, TX_RECOVERY_PENDING
);
4798 spin_unlock(&tp
->lock
);
4801 static inline u32
tg3_tx_avail(struct tg3_napi
*tnapi
)
4803 /* Tell compiler to fetch tx indices from memory. */
4805 return tnapi
->tx_pending
-
4806 ((tnapi
->tx_prod
- tnapi
->tx_cons
) & (TG3_TX_RING_SIZE
- 1));
4809 /* Tigon3 never reports partial packet sends. So we do not
4810 * need special logic to handle SKBs that have not had all
4811 * of their frags sent yet, like SunGEM does.
4813 static void tg3_tx(struct tg3_napi
*tnapi
)
4815 struct tg3
*tp
= tnapi
->tp
;
4816 u32 hw_idx
= tnapi
->hw_status
->idx
[0].tx_consumer
;
4817 u32 sw_idx
= tnapi
->tx_cons
;
4818 struct netdev_queue
*txq
;
4819 int index
= tnapi
- tp
->napi
;
4821 if (tg3_flag(tp
, ENABLE_TSS
))
4824 txq
= netdev_get_tx_queue(tp
->dev
, index
);
4826 while (sw_idx
!= hw_idx
) {
4827 struct ring_info
*ri
= &tnapi
->tx_buffers
[sw_idx
];
4828 struct sk_buff
*skb
= ri
->skb
;
4831 if (unlikely(skb
== NULL
)) {
4836 pci_unmap_single(tp
->pdev
,
4837 dma_unmap_addr(ri
, mapping
),
4843 sw_idx
= NEXT_TX(sw_idx
);
4845 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
4846 ri
= &tnapi
->tx_buffers
[sw_idx
];
4847 if (unlikely(ri
->skb
!= NULL
|| sw_idx
== hw_idx
))
4850 pci_unmap_page(tp
->pdev
,
4851 dma_unmap_addr(ri
, mapping
),
4852 skb_shinfo(skb
)->frags
[i
].size
,
4854 sw_idx
= NEXT_TX(sw_idx
);
4859 if (unlikely(tx_bug
)) {
4865 tnapi
->tx_cons
= sw_idx
;
4867 /* Need to make the tx_cons update visible to tg3_start_xmit()
4868 * before checking for netif_queue_stopped(). Without the
4869 * memory barrier, there is a small possibility that tg3_start_xmit()
4870 * will miss it and cause the queue to be stopped forever.
4874 if (unlikely(netif_tx_queue_stopped(txq
) &&
4875 (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
)))) {
4876 __netif_tx_lock(txq
, smp_processor_id());
4877 if (netif_tx_queue_stopped(txq
) &&
4878 (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
)))
4879 netif_tx_wake_queue(txq
);
4880 __netif_tx_unlock(txq
);
4884 static void tg3_rx_skb_free(struct tg3
*tp
, struct ring_info
*ri
, u32 map_sz
)
4889 pci_unmap_single(tp
->pdev
, dma_unmap_addr(ri
, mapping
),
4890 map_sz
, PCI_DMA_FROMDEVICE
);
4891 dev_kfree_skb_any(ri
->skb
);
4895 /* Returns size of skb allocated or < 0 on error.
4897 * We only need to fill in the address because the other members
4898 * of the RX descriptor are invariant, see tg3_init_rings.
4900 * Note the purposeful assymetry of cpu vs. chip accesses. For
4901 * posting buffers we only dirty the first cache line of the RX
4902 * descriptor (containing the address). Whereas for the RX status
4903 * buffers the cpu only reads the last cacheline of the RX descriptor
4904 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4906 static int tg3_alloc_rx_skb(struct tg3
*tp
, struct tg3_rx_prodring_set
*tpr
,
4907 u32 opaque_key
, u32 dest_idx_unmasked
)
4909 struct tg3_rx_buffer_desc
*desc
;
4910 struct ring_info
*map
;
4911 struct sk_buff
*skb
;
4913 int skb_size
, dest_idx
;
4915 switch (opaque_key
) {
4916 case RXD_OPAQUE_RING_STD
:
4917 dest_idx
= dest_idx_unmasked
& tp
->rx_std_ring_mask
;
4918 desc
= &tpr
->rx_std
[dest_idx
];
4919 map
= &tpr
->rx_std_buffers
[dest_idx
];
4920 skb_size
= tp
->rx_pkt_map_sz
;
4923 case RXD_OPAQUE_RING_JUMBO
:
4924 dest_idx
= dest_idx_unmasked
& tp
->rx_jmb_ring_mask
;
4925 desc
= &tpr
->rx_jmb
[dest_idx
].std
;
4926 map
= &tpr
->rx_jmb_buffers
[dest_idx
];
4927 skb_size
= TG3_RX_JMB_MAP_SZ
;
4934 /* Do not overwrite any of the map or rp information
4935 * until we are sure we can commit to a new buffer.
4937 * Callers depend upon this behavior and assume that
4938 * we leave everything unchanged if we fail.
4940 skb
= netdev_alloc_skb(tp
->dev
, skb_size
+ tp
->rx_offset
);
4944 skb_reserve(skb
, tp
->rx_offset
);
4946 mapping
= pci_map_single(tp
->pdev
, skb
->data
, skb_size
,
4947 PCI_DMA_FROMDEVICE
);
4948 if (pci_dma_mapping_error(tp
->pdev
, mapping
)) {
4954 dma_unmap_addr_set(map
, mapping
, mapping
);
4956 desc
->addr_hi
= ((u64
)mapping
>> 32);
4957 desc
->addr_lo
= ((u64
)mapping
& 0xffffffff);
4962 /* We only need to move over in the address because the other
4963 * members of the RX descriptor are invariant. See notes above
4964 * tg3_alloc_rx_skb for full details.
4966 static void tg3_recycle_rx(struct tg3_napi
*tnapi
,
4967 struct tg3_rx_prodring_set
*dpr
,
4968 u32 opaque_key
, int src_idx
,
4969 u32 dest_idx_unmasked
)
4971 struct tg3
*tp
= tnapi
->tp
;
4972 struct tg3_rx_buffer_desc
*src_desc
, *dest_desc
;
4973 struct ring_info
*src_map
, *dest_map
;
4974 struct tg3_rx_prodring_set
*spr
= &tp
->napi
[0].prodring
;
4977 switch (opaque_key
) {
4978 case RXD_OPAQUE_RING_STD
:
4979 dest_idx
= dest_idx_unmasked
& tp
->rx_std_ring_mask
;
4980 dest_desc
= &dpr
->rx_std
[dest_idx
];
4981 dest_map
= &dpr
->rx_std_buffers
[dest_idx
];
4982 src_desc
= &spr
->rx_std
[src_idx
];
4983 src_map
= &spr
->rx_std_buffers
[src_idx
];
4986 case RXD_OPAQUE_RING_JUMBO
:
4987 dest_idx
= dest_idx_unmasked
& tp
->rx_jmb_ring_mask
;
4988 dest_desc
= &dpr
->rx_jmb
[dest_idx
].std
;
4989 dest_map
= &dpr
->rx_jmb_buffers
[dest_idx
];
4990 src_desc
= &spr
->rx_jmb
[src_idx
].std
;
4991 src_map
= &spr
->rx_jmb_buffers
[src_idx
];
4998 dest_map
->skb
= src_map
->skb
;
4999 dma_unmap_addr_set(dest_map
, mapping
,
5000 dma_unmap_addr(src_map
, mapping
));
5001 dest_desc
->addr_hi
= src_desc
->addr_hi
;
5002 dest_desc
->addr_lo
= src_desc
->addr_lo
;
5004 /* Ensure that the update to the skb happens after the physical
5005 * addresses have been transferred to the new BD location.
5009 src_map
->skb
= NULL
;
5012 /* The RX ring scheme is composed of multiple rings which post fresh
5013 * buffers to the chip, and one special ring the chip uses to report
5014 * status back to the host.
5016 * The special ring reports the status of received packets to the
5017 * host. The chip does not write into the original descriptor the
5018 * RX buffer was obtained from. The chip simply takes the original
5019 * descriptor as provided by the host, updates the status and length
5020 * field, then writes this into the next status ring entry.
5022 * Each ring the host uses to post buffers to the chip is described
5023 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
5024 * it is first placed into the on-chip ram. When the packet's length
5025 * is known, it walks down the TG3_BDINFO entries to select the ring.
5026 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5027 * which is within the range of the new packet's length is chosen.
5029 * The "separate ring for rx status" scheme may sound queer, but it makes
5030 * sense from a cache coherency perspective. If only the host writes
5031 * to the buffer post rings, and only the chip writes to the rx status
5032 * rings, then cache lines never move beyond shared-modified state.
5033 * If both the host and chip were to write into the same ring, cache line
5034 * eviction could occur since both entities want it in an exclusive state.
5036 static int tg3_rx(struct tg3_napi
*tnapi
, int budget
)
5038 struct tg3
*tp
= tnapi
->tp
;
5039 u32 work_mask
, rx_std_posted
= 0;
5040 u32 std_prod_idx
, jmb_prod_idx
;
5041 u32 sw_idx
= tnapi
->rx_rcb_ptr
;
5044 struct tg3_rx_prodring_set
*tpr
= &tnapi
->prodring
;
5046 hw_idx
= *(tnapi
->rx_rcb_prod_idx
);
5048 * We need to order the read of hw_idx and the read of
5049 * the opaque cookie.
5054 std_prod_idx
= tpr
->rx_std_prod_idx
;
5055 jmb_prod_idx
= tpr
->rx_jmb_prod_idx
;
5056 while (sw_idx
!= hw_idx
&& budget
> 0) {
5057 struct ring_info
*ri
;
5058 struct tg3_rx_buffer_desc
*desc
= &tnapi
->rx_rcb
[sw_idx
];
5060 struct sk_buff
*skb
;
5061 dma_addr_t dma_addr
;
5062 u32 opaque_key
, desc_idx
, *post_ptr
;
5064 desc_idx
= desc
->opaque
& RXD_OPAQUE_INDEX_MASK
;
5065 opaque_key
= desc
->opaque
& RXD_OPAQUE_RING_MASK
;
5066 if (opaque_key
== RXD_OPAQUE_RING_STD
) {
5067 ri
= &tp
->napi
[0].prodring
.rx_std_buffers
[desc_idx
];
5068 dma_addr
= dma_unmap_addr(ri
, mapping
);
5070 post_ptr
= &std_prod_idx
;
5072 } else if (opaque_key
== RXD_OPAQUE_RING_JUMBO
) {
5073 ri
= &tp
->napi
[0].prodring
.rx_jmb_buffers
[desc_idx
];
5074 dma_addr
= dma_unmap_addr(ri
, mapping
);
5076 post_ptr
= &jmb_prod_idx
;
5078 goto next_pkt_nopost
;
5080 work_mask
|= opaque_key
;
5082 if ((desc
->err_vlan
& RXD_ERR_MASK
) != 0 &&
5083 (desc
->err_vlan
!= RXD_ERR_ODD_NIBBLE_RCVD_MII
)) {
5085 tg3_recycle_rx(tnapi
, tpr
, opaque_key
,
5086 desc_idx
, *post_ptr
);
5088 /* Other statistics kept track of by card. */
5093 len
= ((desc
->idx_len
& RXD_LEN_MASK
) >> RXD_LEN_SHIFT
) -
5096 if (len
> TG3_RX_COPY_THRESH(tp
)) {
5099 skb_size
= tg3_alloc_rx_skb(tp
, tpr
, opaque_key
,
5104 pci_unmap_single(tp
->pdev
, dma_addr
, skb_size
,
5105 PCI_DMA_FROMDEVICE
);
5107 /* Ensure that the update to the skb happens
5108 * after the usage of the old DMA mapping.
5116 struct sk_buff
*copy_skb
;
5118 tg3_recycle_rx(tnapi
, tpr
, opaque_key
,
5119 desc_idx
, *post_ptr
);
5121 copy_skb
= netdev_alloc_skb(tp
->dev
, len
+
5123 if (copy_skb
== NULL
)
5124 goto drop_it_no_recycle
;
5126 skb_reserve(copy_skb
, TG3_RAW_IP_ALIGN
);
5127 skb_put(copy_skb
, len
);
5128 pci_dma_sync_single_for_cpu(tp
->pdev
, dma_addr
, len
, PCI_DMA_FROMDEVICE
);
5129 skb_copy_from_linear_data(skb
, copy_skb
->data
, len
);
5130 pci_dma_sync_single_for_device(tp
->pdev
, dma_addr
, len
, PCI_DMA_FROMDEVICE
);
5132 /* We'll reuse the original ring buffer. */
5136 if ((tp
->dev
->features
& NETIF_F_RXCSUM
) &&
5137 (desc
->type_flags
& RXD_FLAG_TCPUDP_CSUM
) &&
5138 (((desc
->ip_tcp_csum
& RXD_TCPCSUM_MASK
)
5139 >> RXD_TCPCSUM_SHIFT
) == 0xffff))
5140 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
5142 skb_checksum_none_assert(skb
);
5144 skb
->protocol
= eth_type_trans(skb
, tp
->dev
);
5146 if (len
> (tp
->dev
->mtu
+ ETH_HLEN
) &&
5147 skb
->protocol
!= htons(ETH_P_8021Q
)) {
5149 goto drop_it_no_recycle
;
5152 if (desc
->type_flags
& RXD_FLAG_VLAN
&&
5153 !(tp
->rx_mode
& RX_MODE_KEEP_VLAN_TAG
))
5154 __vlan_hwaccel_put_tag(skb
,
5155 desc
->err_vlan
& RXD_VLAN_MASK
);
5157 napi_gro_receive(&tnapi
->napi
, skb
);
5165 if (unlikely(rx_std_posted
>= tp
->rx_std_max_post
)) {
5166 tpr
->rx_std_prod_idx
= std_prod_idx
&
5167 tp
->rx_std_ring_mask
;
5168 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
5169 tpr
->rx_std_prod_idx
);
5170 work_mask
&= ~RXD_OPAQUE_RING_STD
;
5175 sw_idx
&= tp
->rx_ret_ring_mask
;
5177 /* Refresh hw_idx to see if there is new work */
5178 if (sw_idx
== hw_idx
) {
5179 hw_idx
= *(tnapi
->rx_rcb_prod_idx
);
5184 /* ACK the status ring. */
5185 tnapi
->rx_rcb_ptr
= sw_idx
;
5186 tw32_rx_mbox(tnapi
->consmbox
, sw_idx
);
5188 /* Refill RX ring(s). */
5189 if (!tg3_flag(tp
, ENABLE_RSS
)) {
5190 if (work_mask
& RXD_OPAQUE_RING_STD
) {
5191 tpr
->rx_std_prod_idx
= std_prod_idx
&
5192 tp
->rx_std_ring_mask
;
5193 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
5194 tpr
->rx_std_prod_idx
);
5196 if (work_mask
& RXD_OPAQUE_RING_JUMBO
) {
5197 tpr
->rx_jmb_prod_idx
= jmb_prod_idx
&
5198 tp
->rx_jmb_ring_mask
;
5199 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
,
5200 tpr
->rx_jmb_prod_idx
);
5203 } else if (work_mask
) {
5204 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5205 * updated before the producer indices can be updated.
5209 tpr
->rx_std_prod_idx
= std_prod_idx
& tp
->rx_std_ring_mask
;
5210 tpr
->rx_jmb_prod_idx
= jmb_prod_idx
& tp
->rx_jmb_ring_mask
;
5212 if (tnapi
!= &tp
->napi
[1])
5213 napi_schedule(&tp
->napi
[1].napi
);
5219 static void tg3_poll_link(struct tg3
*tp
)
5221 /* handle link change and other phy events */
5222 if (!(tg3_flag(tp
, USE_LINKCHG_REG
) || tg3_flag(tp
, POLL_SERDES
))) {
5223 struct tg3_hw_status
*sblk
= tp
->napi
[0].hw_status
;
5225 if (sblk
->status
& SD_STATUS_LINK_CHG
) {
5226 sblk
->status
= SD_STATUS_UPDATED
|
5227 (sblk
->status
& ~SD_STATUS_LINK_CHG
);
5228 spin_lock(&tp
->lock
);
5229 if (tg3_flag(tp
, USE_PHYLIB
)) {
5231 (MAC_STATUS_SYNC_CHANGED
|
5232 MAC_STATUS_CFG_CHANGED
|
5233 MAC_STATUS_MI_COMPLETION
|
5234 MAC_STATUS_LNKSTATE_CHANGED
));
5237 tg3_setup_phy(tp
, 0);
5238 spin_unlock(&tp
->lock
);
5243 static int tg3_rx_prodring_xfer(struct tg3
*tp
,
5244 struct tg3_rx_prodring_set
*dpr
,
5245 struct tg3_rx_prodring_set
*spr
)
5247 u32 si
, di
, cpycnt
, src_prod_idx
;
5251 src_prod_idx
= spr
->rx_std_prod_idx
;
5253 /* Make sure updates to the rx_std_buffers[] entries and the
5254 * standard producer index are seen in the correct order.
5258 if (spr
->rx_std_cons_idx
== src_prod_idx
)
5261 if (spr
->rx_std_cons_idx
< src_prod_idx
)
5262 cpycnt
= src_prod_idx
- spr
->rx_std_cons_idx
;
5264 cpycnt
= tp
->rx_std_ring_mask
+ 1 -
5265 spr
->rx_std_cons_idx
;
5267 cpycnt
= min(cpycnt
,
5268 tp
->rx_std_ring_mask
+ 1 - dpr
->rx_std_prod_idx
);
5270 si
= spr
->rx_std_cons_idx
;
5271 di
= dpr
->rx_std_prod_idx
;
5273 for (i
= di
; i
< di
+ cpycnt
; i
++) {
5274 if (dpr
->rx_std_buffers
[i
].skb
) {
5284 /* Ensure that updates to the rx_std_buffers ring and the
5285 * shadowed hardware producer ring from tg3_recycle_skb() are
5286 * ordered correctly WRT the skb check above.
5290 memcpy(&dpr
->rx_std_buffers
[di
],
5291 &spr
->rx_std_buffers
[si
],
5292 cpycnt
* sizeof(struct ring_info
));
5294 for (i
= 0; i
< cpycnt
; i
++, di
++, si
++) {
5295 struct tg3_rx_buffer_desc
*sbd
, *dbd
;
5296 sbd
= &spr
->rx_std
[si
];
5297 dbd
= &dpr
->rx_std
[di
];
5298 dbd
->addr_hi
= sbd
->addr_hi
;
5299 dbd
->addr_lo
= sbd
->addr_lo
;
5302 spr
->rx_std_cons_idx
= (spr
->rx_std_cons_idx
+ cpycnt
) &
5303 tp
->rx_std_ring_mask
;
5304 dpr
->rx_std_prod_idx
= (dpr
->rx_std_prod_idx
+ cpycnt
) &
5305 tp
->rx_std_ring_mask
;
5309 src_prod_idx
= spr
->rx_jmb_prod_idx
;
5311 /* Make sure updates to the rx_jmb_buffers[] entries and
5312 * the jumbo producer index are seen in the correct order.
5316 if (spr
->rx_jmb_cons_idx
== src_prod_idx
)
5319 if (spr
->rx_jmb_cons_idx
< src_prod_idx
)
5320 cpycnt
= src_prod_idx
- spr
->rx_jmb_cons_idx
;
5322 cpycnt
= tp
->rx_jmb_ring_mask
+ 1 -
5323 spr
->rx_jmb_cons_idx
;
5325 cpycnt
= min(cpycnt
,
5326 tp
->rx_jmb_ring_mask
+ 1 - dpr
->rx_jmb_prod_idx
);
5328 si
= spr
->rx_jmb_cons_idx
;
5329 di
= dpr
->rx_jmb_prod_idx
;
5331 for (i
= di
; i
< di
+ cpycnt
; i
++) {
5332 if (dpr
->rx_jmb_buffers
[i
].skb
) {
5342 /* Ensure that updates to the rx_jmb_buffers ring and the
5343 * shadowed hardware producer ring from tg3_recycle_skb() are
5344 * ordered correctly WRT the skb check above.
5348 memcpy(&dpr
->rx_jmb_buffers
[di
],
5349 &spr
->rx_jmb_buffers
[si
],
5350 cpycnt
* sizeof(struct ring_info
));
5352 for (i
= 0; i
< cpycnt
; i
++, di
++, si
++) {
5353 struct tg3_rx_buffer_desc
*sbd
, *dbd
;
5354 sbd
= &spr
->rx_jmb
[si
].std
;
5355 dbd
= &dpr
->rx_jmb
[di
].std
;
5356 dbd
->addr_hi
= sbd
->addr_hi
;
5357 dbd
->addr_lo
= sbd
->addr_lo
;
5360 spr
->rx_jmb_cons_idx
= (spr
->rx_jmb_cons_idx
+ cpycnt
) &
5361 tp
->rx_jmb_ring_mask
;
5362 dpr
->rx_jmb_prod_idx
= (dpr
->rx_jmb_prod_idx
+ cpycnt
) &
5363 tp
->rx_jmb_ring_mask
;
5369 static int tg3_poll_work(struct tg3_napi
*tnapi
, int work_done
, int budget
)
5371 struct tg3
*tp
= tnapi
->tp
;
5373 /* run TX completion thread */
5374 if (tnapi
->hw_status
->idx
[0].tx_consumer
!= tnapi
->tx_cons
) {
5376 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
5380 /* run RX thread, within the bounds set by NAPI.
5381 * All RX "locking" is done by ensuring outside
5382 * code synchronizes with tg3->napi.poll()
5384 if (*(tnapi
->rx_rcb_prod_idx
) != tnapi
->rx_rcb_ptr
)
5385 work_done
+= tg3_rx(tnapi
, budget
- work_done
);
5387 if (tg3_flag(tp
, ENABLE_RSS
) && tnapi
== &tp
->napi
[1]) {
5388 struct tg3_rx_prodring_set
*dpr
= &tp
->napi
[0].prodring
;
5390 u32 std_prod_idx
= dpr
->rx_std_prod_idx
;
5391 u32 jmb_prod_idx
= dpr
->rx_jmb_prod_idx
;
5393 for (i
= 1; i
< tp
->irq_cnt
; i
++)
5394 err
|= tg3_rx_prodring_xfer(tp
, dpr
,
5395 &tp
->napi
[i
].prodring
);
5399 if (std_prod_idx
!= dpr
->rx_std_prod_idx
)
5400 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
5401 dpr
->rx_std_prod_idx
);
5403 if (jmb_prod_idx
!= dpr
->rx_jmb_prod_idx
)
5404 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
,
5405 dpr
->rx_jmb_prod_idx
);
5410 tw32_f(HOSTCC_MODE
, tp
->coal_now
);
5416 static int tg3_poll_msix(struct napi_struct
*napi
, int budget
)
5418 struct tg3_napi
*tnapi
= container_of(napi
, struct tg3_napi
, napi
);
5419 struct tg3
*tp
= tnapi
->tp
;
5421 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
5424 work_done
= tg3_poll_work(tnapi
, work_done
, budget
);
5426 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
5429 if (unlikely(work_done
>= budget
))
5432 /* tp->last_tag is used in tg3_int_reenable() below
5433 * to tell the hw how much work has been processed,
5434 * so we must read it before checking for more work.
5436 tnapi
->last_tag
= sblk
->status_tag
;
5437 tnapi
->last_irq_tag
= tnapi
->last_tag
;
5440 /* check for RX/TX work to do */
5441 if (likely(sblk
->idx
[0].tx_consumer
== tnapi
->tx_cons
&&
5442 *(tnapi
->rx_rcb_prod_idx
) == tnapi
->rx_rcb_ptr
)) {
5443 napi_complete(napi
);
5444 /* Reenable interrupts. */
5445 tw32_mailbox(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
5454 /* work_done is guaranteed to be less than budget. */
5455 napi_complete(napi
);
5456 schedule_work(&tp
->reset_task
);
5460 static void tg3_process_error(struct tg3
*tp
)
5463 bool real_error
= false;
5465 if (tg3_flag(tp
, ERROR_PROCESSED
))
5468 /* Check Flow Attention register */
5469 val
= tr32(HOSTCC_FLOW_ATTN
);
5470 if (val
& ~HOSTCC_FLOW_ATTN_MBUF_LWM
) {
5471 netdev_err(tp
->dev
, "FLOW Attention error. Resetting chip.\n");
5475 if (tr32(MSGINT_STATUS
) & ~MSGINT_STATUS_MSI_REQ
) {
5476 netdev_err(tp
->dev
, "MSI Status error. Resetting chip.\n");
5480 if (tr32(RDMAC_STATUS
) || tr32(WDMAC_STATUS
)) {
5481 netdev_err(tp
->dev
, "DMA Status error. Resetting chip.\n");
5490 tg3_flag_set(tp
, ERROR_PROCESSED
);
5491 schedule_work(&tp
->reset_task
);
5494 static int tg3_poll(struct napi_struct
*napi
, int budget
)
5496 struct tg3_napi
*tnapi
= container_of(napi
, struct tg3_napi
, napi
);
5497 struct tg3
*tp
= tnapi
->tp
;
5499 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
5502 if (sblk
->status
& SD_STATUS_ERROR
)
5503 tg3_process_error(tp
);
5507 work_done
= tg3_poll_work(tnapi
, work_done
, budget
);
5509 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
5512 if (unlikely(work_done
>= budget
))
5515 if (tg3_flag(tp
, TAGGED_STATUS
)) {
5516 /* tp->last_tag is used in tg3_int_reenable() below
5517 * to tell the hw how much work has been processed,
5518 * so we must read it before checking for more work.
5520 tnapi
->last_tag
= sblk
->status_tag
;
5521 tnapi
->last_irq_tag
= tnapi
->last_tag
;
5524 sblk
->status
&= ~SD_STATUS_UPDATED
;
5526 if (likely(!tg3_has_work(tnapi
))) {
5527 napi_complete(napi
);
5528 tg3_int_reenable(tnapi
);
5536 /* work_done is guaranteed to be less than budget. */
5537 napi_complete(napi
);
5538 schedule_work(&tp
->reset_task
);
5542 static void tg3_napi_disable(struct tg3
*tp
)
5546 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--)
5547 napi_disable(&tp
->napi
[i
].napi
);
5550 static void tg3_napi_enable(struct tg3
*tp
)
5554 for (i
= 0; i
< tp
->irq_cnt
; i
++)
5555 napi_enable(&tp
->napi
[i
].napi
);
5558 static void tg3_napi_init(struct tg3
*tp
)
5562 netif_napi_add(tp
->dev
, &tp
->napi
[0].napi
, tg3_poll
, 64);
5563 for (i
= 1; i
< tp
->irq_cnt
; i
++)
5564 netif_napi_add(tp
->dev
, &tp
->napi
[i
].napi
, tg3_poll_msix
, 64);
5567 static void tg3_napi_fini(struct tg3
*tp
)
5571 for (i
= 0; i
< tp
->irq_cnt
; i
++)
5572 netif_napi_del(&tp
->napi
[i
].napi
);
5575 static inline void tg3_netif_stop(struct tg3
*tp
)
5577 tp
->dev
->trans_start
= jiffies
; /* prevent tx timeout */
5578 tg3_napi_disable(tp
);
5579 netif_tx_disable(tp
->dev
);
5582 static inline void tg3_netif_start(struct tg3
*tp
)
5584 /* NOTE: unconditional netif_tx_wake_all_queues is only
5585 * appropriate so long as all callers are assured to
5586 * have free tx slots (such as after tg3_init_hw)
5588 netif_tx_wake_all_queues(tp
->dev
);
5590 tg3_napi_enable(tp
);
5591 tp
->napi
[0].hw_status
->status
|= SD_STATUS_UPDATED
;
5592 tg3_enable_ints(tp
);
5595 static void tg3_irq_quiesce(struct tg3
*tp
)
5599 BUG_ON(tp
->irq_sync
);
5604 for (i
= 0; i
< tp
->irq_cnt
; i
++)
5605 synchronize_irq(tp
->napi
[i
].irq_vec
);
5608 /* Fully shutdown all tg3 driver activity elsewhere in the system.
5609 * If irq_sync is non-zero, then the IRQ handler must be synchronized
5610 * with as well. Most of the time, this is not necessary except when
5611 * shutting down the device.
5613 static inline void tg3_full_lock(struct tg3
*tp
, int irq_sync
)
5615 spin_lock_bh(&tp
->lock
);
5617 tg3_irq_quiesce(tp
);
5620 static inline void tg3_full_unlock(struct tg3
*tp
)
5622 spin_unlock_bh(&tp
->lock
);
5625 /* One-shot MSI handler - Chip automatically disables interrupt
5626 * after sending MSI so driver doesn't have to do it.
5628 static irqreturn_t
tg3_msi_1shot(int irq
, void *dev_id
)
5630 struct tg3_napi
*tnapi
= dev_id
;
5631 struct tg3
*tp
= tnapi
->tp
;
5633 prefetch(tnapi
->hw_status
);
5635 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
5637 if (likely(!tg3_irq_sync(tp
)))
5638 napi_schedule(&tnapi
->napi
);
5643 /* MSI ISR - No need to check for interrupt sharing and no need to
5644 * flush status block and interrupt mailbox. PCI ordering rules
5645 * guarantee that MSI will arrive after the status block.
5647 static irqreturn_t
tg3_msi(int irq
, void *dev_id
)
5649 struct tg3_napi
*tnapi
= dev_id
;
5650 struct tg3
*tp
= tnapi
->tp
;
5652 prefetch(tnapi
->hw_status
);
5654 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
5656 * Writing any value to intr-mbox-0 clears PCI INTA# and
5657 * chip-internal interrupt pending events.
5658 * Writing non-zero to intr-mbox-0 additional tells the
5659 * NIC to stop sending us irqs, engaging "in-intr-handler"
5662 tw32_mailbox(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
5663 if (likely(!tg3_irq_sync(tp
)))
5664 napi_schedule(&tnapi
->napi
);
5666 return IRQ_RETVAL(1);
5669 static irqreturn_t
tg3_interrupt(int irq
, void *dev_id
)
5671 struct tg3_napi
*tnapi
= dev_id
;
5672 struct tg3
*tp
= tnapi
->tp
;
5673 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
5674 unsigned int handled
= 1;
5676 /* In INTx mode, it is possible for the interrupt to arrive at
5677 * the CPU before the status block posted prior to the interrupt.
5678 * Reading the PCI State register will confirm whether the
5679 * interrupt is ours and will flush the status block.
5681 if (unlikely(!(sblk
->status
& SD_STATUS_UPDATED
))) {
5682 if (tg3_flag(tp
, CHIP_RESETTING
) ||
5683 (tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
5690 * Writing any value to intr-mbox-0 clears PCI INTA# and
5691 * chip-internal interrupt pending events.
5692 * Writing non-zero to intr-mbox-0 additional tells the
5693 * NIC to stop sending us irqs, engaging "in-intr-handler"
5696 * Flush the mailbox to de-assert the IRQ immediately to prevent
5697 * spurious interrupts. The flush impacts performance but
5698 * excessive spurious interrupts can be worse in some cases.
5700 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
5701 if (tg3_irq_sync(tp
))
5703 sblk
->status
&= ~SD_STATUS_UPDATED
;
5704 if (likely(tg3_has_work(tnapi
))) {
5705 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
5706 napi_schedule(&tnapi
->napi
);
5708 /* No work, shared interrupt perhaps? re-enable
5709 * interrupts, and flush that PCI write
5711 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
,
5715 return IRQ_RETVAL(handled
);
5718 static irqreturn_t
tg3_interrupt_tagged(int irq
, void *dev_id
)
5720 struct tg3_napi
*tnapi
= dev_id
;
5721 struct tg3
*tp
= tnapi
->tp
;
5722 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
5723 unsigned int handled
= 1;
5725 /* In INTx mode, it is possible for the interrupt to arrive at
5726 * the CPU before the status block posted prior to the interrupt.
5727 * Reading the PCI State register will confirm whether the
5728 * interrupt is ours and will flush the status block.
5730 if (unlikely(sblk
->status_tag
== tnapi
->last_irq_tag
)) {
5731 if (tg3_flag(tp
, CHIP_RESETTING
) ||
5732 (tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
5739 * writing any value to intr-mbox-0 clears PCI INTA# and
5740 * chip-internal interrupt pending events.
5741 * writing non-zero to intr-mbox-0 additional tells the
5742 * NIC to stop sending us irqs, engaging "in-intr-handler"
5745 * Flush the mailbox to de-assert the IRQ immediately to prevent
5746 * spurious interrupts. The flush impacts performance but
5747 * excessive spurious interrupts can be worse in some cases.
5749 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
5752 * In a shared interrupt configuration, sometimes other devices'
5753 * interrupts will scream. We record the current status tag here
5754 * so that the above check can report that the screaming interrupts
5755 * are unhandled. Eventually they will be silenced.
5757 tnapi
->last_irq_tag
= sblk
->status_tag
;
5759 if (tg3_irq_sync(tp
))
5762 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
5764 napi_schedule(&tnapi
->napi
);
5767 return IRQ_RETVAL(handled
);
5770 /* ISR for interrupt test */
5771 static irqreturn_t
tg3_test_isr(int irq
, void *dev_id
)
5773 struct tg3_napi
*tnapi
= dev_id
;
5774 struct tg3
*tp
= tnapi
->tp
;
5775 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
5777 if ((sblk
->status
& SD_STATUS_UPDATED
) ||
5778 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
5779 tg3_disable_ints(tp
);
5780 return IRQ_RETVAL(1);
5782 return IRQ_RETVAL(0);
5785 static int tg3_init_hw(struct tg3
*, int);
5786 static int tg3_halt(struct tg3
*, int, int);
5788 /* Restart hardware after configuration changes, self-test, etc.
5789 * Invoked with tp->lock held.
5791 static int tg3_restart_hw(struct tg3
*tp
, int reset_phy
)
5792 __releases(tp
->lock
)
5793 __acquires(tp
->lock
)
5797 err
= tg3_init_hw(tp
, reset_phy
);
5800 "Failed to re-initialize device, aborting\n");
5801 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
5802 tg3_full_unlock(tp
);
5803 del_timer_sync(&tp
->timer
);
5805 tg3_napi_enable(tp
);
5807 tg3_full_lock(tp
, 0);
5812 #ifdef CONFIG_NET_POLL_CONTROLLER
5813 static void tg3_poll_controller(struct net_device
*dev
)
5816 struct tg3
*tp
= netdev_priv(dev
);
5818 for (i
= 0; i
< tp
->irq_cnt
; i
++)
5819 tg3_interrupt(tp
->napi
[i
].irq_vec
, &tp
->napi
[i
]);
5823 static void tg3_reset_task(struct work_struct
*work
)
5825 struct tg3
*tp
= container_of(work
, struct tg3
, reset_task
);
5827 unsigned int restart_timer
;
5829 tg3_full_lock(tp
, 0);
5831 if (!netif_running(tp
->dev
)) {
5832 tg3_full_unlock(tp
);
5836 tg3_full_unlock(tp
);
5842 tg3_full_lock(tp
, 1);
5844 restart_timer
= tg3_flag(tp
, RESTART_TIMER
);
5845 tg3_flag_clear(tp
, RESTART_TIMER
);
5847 if (tg3_flag(tp
, TX_RECOVERY_PENDING
)) {
5848 tp
->write32_tx_mbox
= tg3_write32_tx_mbox
;
5849 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
5850 tg3_flag_set(tp
, MBOX_WRITE_REORDER
);
5851 tg3_flag_clear(tp
, TX_RECOVERY_PENDING
);
5854 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 0);
5855 err
= tg3_init_hw(tp
, 1);
5859 tg3_netif_start(tp
);
5862 mod_timer(&tp
->timer
, jiffies
+ 1);
5865 tg3_full_unlock(tp
);
5871 static void tg3_tx_timeout(struct net_device
*dev
)
5873 struct tg3
*tp
= netdev_priv(dev
);
5875 if (netif_msg_tx_err(tp
)) {
5876 netdev_err(dev
, "transmit timed out, resetting\n");
5880 schedule_work(&tp
->reset_task
);
5883 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5884 static inline int tg3_4g_overflow_test(dma_addr_t mapping
, int len
)
5886 u32 base
= (u32
) mapping
& 0xffffffff;
5888 return (base
> 0xffffdcc0) && (base
+ len
+ 8 < base
);
5891 /* Test for DMA addresses > 40-bit */
5892 static inline int tg3_40bit_overflow_test(struct tg3
*tp
, dma_addr_t mapping
,
5895 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5896 if (tg3_flag(tp
, 40BIT_DMA_BUG
))
5897 return ((u64
) mapping
+ len
) > DMA_BIT_MASK(40);
5904 static void tg3_set_txd(struct tg3_napi
*tnapi
, int entry
,
5905 dma_addr_t mapping
, int len
, u32 flags
,
5908 struct tg3_tx_buffer_desc
*txd
= &tnapi
->tx_ring
[entry
];
5909 int is_end
= (mss_and_is_end
& 0x1);
5910 u32 mss
= (mss_and_is_end
>> 1);
5914 flags
|= TXD_FLAG_END
;
5915 if (flags
& TXD_FLAG_VLAN
) {
5916 vlan_tag
= flags
>> 16;
5919 vlan_tag
|= (mss
<< TXD_MSS_SHIFT
);
5921 txd
->addr_hi
= ((u64
) mapping
>> 32);
5922 txd
->addr_lo
= ((u64
) mapping
& 0xffffffff);
5923 txd
->len_flags
= (len
<< TXD_LEN_SHIFT
) | flags
;
5924 txd
->vlan_tag
= vlan_tag
<< TXD_VLAN_TAG_SHIFT
;
5927 static void tg3_skb_error_unmap(struct tg3_napi
*tnapi
,
5928 struct sk_buff
*skb
, int last
)
5931 u32 entry
= tnapi
->tx_prod
;
5932 struct ring_info
*txb
= &tnapi
->tx_buffers
[entry
];
5934 pci_unmap_single(tnapi
->tp
->pdev
,
5935 dma_unmap_addr(txb
, mapping
),
5938 for (i
= 0; i
< last
; i
++) {
5939 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
5941 entry
= NEXT_TX(entry
);
5942 txb
= &tnapi
->tx_buffers
[entry
];
5944 pci_unmap_page(tnapi
->tp
->pdev
,
5945 dma_unmap_addr(txb
, mapping
),
5946 frag
->size
, PCI_DMA_TODEVICE
);
5950 /* Workaround 4GB and 40-bit hardware DMA bugs. */
5951 static int tigon3_dma_hwbug_workaround(struct tg3_napi
*tnapi
,
5952 struct sk_buff
*skb
,
5953 u32 base_flags
, u32 mss
)
5955 struct tg3
*tp
= tnapi
->tp
;
5956 struct sk_buff
*new_skb
;
5957 dma_addr_t new_addr
= 0;
5958 u32 entry
= tnapi
->tx_prod
;
5961 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
)
5962 new_skb
= skb_copy(skb
, GFP_ATOMIC
);
5964 int more_headroom
= 4 - ((unsigned long)skb
->data
& 3);
5966 new_skb
= skb_copy_expand(skb
,
5967 skb_headroom(skb
) + more_headroom
,
5968 skb_tailroom(skb
), GFP_ATOMIC
);
5974 /* New SKB is guaranteed to be linear. */
5975 new_addr
= pci_map_single(tp
->pdev
, new_skb
->data
, new_skb
->len
,
5977 /* Make sure the mapping succeeded */
5978 if (pci_dma_mapping_error(tp
->pdev
, new_addr
)) {
5980 dev_kfree_skb(new_skb
);
5982 /* Make sure new skb does not cross any 4G boundaries.
5983 * Drop the packet if it does.
5985 } else if (tg3_4g_overflow_test(new_addr
, new_skb
->len
)) {
5986 pci_unmap_single(tp
->pdev
, new_addr
, new_skb
->len
,
5989 dev_kfree_skb(new_skb
);
5991 tnapi
->tx_buffers
[entry
].skb
= new_skb
;
5992 dma_unmap_addr_set(&tnapi
->tx_buffers
[entry
],
5995 tg3_set_txd(tnapi
, entry
, new_addr
, new_skb
->len
,
5996 base_flags
, 1 | (mss
<< 1));
6005 static netdev_tx_t
tg3_start_xmit(struct sk_buff
*, struct net_device
*);
6007 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6008 * TSO header is greater than 80 bytes.
6010 static int tg3_tso_bug(struct tg3
*tp
, struct sk_buff
*skb
)
6012 struct sk_buff
*segs
, *nskb
;
6013 u32 frag_cnt_est
= skb_shinfo(skb
)->gso_segs
* 3;
6015 /* Estimate the number of fragments in the worst case */
6016 if (unlikely(tg3_tx_avail(&tp
->napi
[0]) <= frag_cnt_est
)) {
6017 netif_stop_queue(tp
->dev
);
6019 /* netif_tx_stop_queue() must be done before checking
6020 * checking tx index in tg3_tx_avail() below, because in
6021 * tg3_tx(), we update tx index before checking for
6022 * netif_tx_queue_stopped().
6025 if (tg3_tx_avail(&tp
->napi
[0]) <= frag_cnt_est
)
6026 return NETDEV_TX_BUSY
;
6028 netif_wake_queue(tp
->dev
);
6031 segs
= skb_gso_segment(skb
, tp
->dev
->features
& ~NETIF_F_TSO
);
6033 goto tg3_tso_bug_end
;
6039 tg3_start_xmit(nskb
, tp
->dev
);
6045 return NETDEV_TX_OK
;
6048 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6049 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6051 static netdev_tx_t
tg3_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
6053 struct tg3
*tp
= netdev_priv(dev
);
6054 u32 len
, entry
, base_flags
, mss
;
6055 int i
= -1, would_hit_hwbug
;
6057 struct tg3_napi
*tnapi
;
6058 struct netdev_queue
*txq
;
6061 txq
= netdev_get_tx_queue(dev
, skb_get_queue_mapping(skb
));
6062 tnapi
= &tp
->napi
[skb_get_queue_mapping(skb
)];
6063 if (tg3_flag(tp
, ENABLE_TSS
))
6066 /* We are running in BH disabled context with netif_tx_lock
6067 * and TX reclaim runs via tp->napi.poll inside of a software
6068 * interrupt. Furthermore, IRQ processing runs lockless so we have
6069 * no IRQ context deadlocks to worry about either. Rejoice!
6071 if (unlikely(tg3_tx_avail(tnapi
) <= (skb_shinfo(skb
)->nr_frags
+ 1))) {
6072 if (!netif_tx_queue_stopped(txq
)) {
6073 netif_tx_stop_queue(txq
);
6075 /* This is a hard error, log it. */
6077 "BUG! Tx Ring full when queue awake!\n");
6079 return NETDEV_TX_BUSY
;
6082 entry
= tnapi
->tx_prod
;
6084 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
6085 base_flags
|= TXD_FLAG_TCPUDP_CSUM
;
6087 mss
= skb_shinfo(skb
)->gso_size
;
6090 u32 tcp_opt_len
, hdr_len
;
6092 if (skb_header_cloned(skb
) &&
6093 pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
)) {
6099 tcp_opt_len
= tcp_optlen(skb
);
6101 if (skb_is_gso_v6(skb
)) {
6102 hdr_len
= skb_headlen(skb
) - ETH_HLEN
;
6106 ip_tcp_len
= ip_hdrlen(skb
) + sizeof(struct tcphdr
);
6107 hdr_len
= ip_tcp_len
+ tcp_opt_len
;
6110 iph
->tot_len
= htons(mss
+ hdr_len
);
6113 if (unlikely((ETH_HLEN
+ hdr_len
) > 80) &&
6114 tg3_flag(tp
, TSO_BUG
))
6115 return tg3_tso_bug(tp
, skb
);
6117 base_flags
|= (TXD_FLAG_CPU_PRE_DMA
|
6118 TXD_FLAG_CPU_POST_DMA
);
6120 if (tg3_flag(tp
, HW_TSO_1
) ||
6121 tg3_flag(tp
, HW_TSO_2
) ||
6122 tg3_flag(tp
, HW_TSO_3
)) {
6123 tcp_hdr(skb
)->check
= 0;
6124 base_flags
&= ~TXD_FLAG_TCPUDP_CSUM
;
6126 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(iph
->saddr
,
6131 if (tg3_flag(tp
, HW_TSO_3
)) {
6132 mss
|= (hdr_len
& 0xc) << 12;
6134 base_flags
|= 0x00000010;
6135 base_flags
|= (hdr_len
& 0x3e0) << 5;
6136 } else if (tg3_flag(tp
, HW_TSO_2
))
6137 mss
|= hdr_len
<< 9;
6138 else if (tg3_flag(tp
, HW_TSO_1
) ||
6139 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
6140 if (tcp_opt_len
|| iph
->ihl
> 5) {
6143 tsflags
= (iph
->ihl
- 5) + (tcp_opt_len
>> 2);
6144 mss
|= (tsflags
<< 11);
6147 if (tcp_opt_len
|| iph
->ihl
> 5) {
6150 tsflags
= (iph
->ihl
- 5) + (tcp_opt_len
>> 2);
6151 base_flags
|= tsflags
<< 12;
6156 if (vlan_tx_tag_present(skb
))
6157 base_flags
|= (TXD_FLAG_VLAN
|
6158 (vlan_tx_tag_get(skb
) << 16));
6160 if (tg3_flag(tp
, USE_JUMBO_BDFLAG
) &&
6161 !mss
&& skb
->len
> VLAN_ETH_FRAME_LEN
)
6162 base_flags
|= TXD_FLAG_JMB_PKT
;
6164 len
= skb_headlen(skb
);
6166 mapping
= pci_map_single(tp
->pdev
, skb
->data
, len
, PCI_DMA_TODEVICE
);
6167 if (pci_dma_mapping_error(tp
->pdev
, mapping
)) {
6172 tnapi
->tx_buffers
[entry
].skb
= skb
;
6173 dma_unmap_addr_set(&tnapi
->tx_buffers
[entry
], mapping
, mapping
);
6175 would_hit_hwbug
= 0;
6177 if (tg3_flag(tp
, SHORT_DMA_BUG
) && len
<= 8)
6178 would_hit_hwbug
= 1;
6180 if (tg3_4g_overflow_test(mapping
, len
))
6181 would_hit_hwbug
= 1;
6183 if (tg3_40bit_overflow_test(tp
, mapping
, len
))
6184 would_hit_hwbug
= 1;
6186 if (tg3_flag(tp
, 5701_DMA_BUG
))
6187 would_hit_hwbug
= 1;
6189 tg3_set_txd(tnapi
, entry
, mapping
, len
, base_flags
,
6190 (skb_shinfo(skb
)->nr_frags
== 0) | (mss
<< 1));
6192 entry
= NEXT_TX(entry
);
6194 /* Now loop through additional data fragments, and queue them. */
6195 if (skb_shinfo(skb
)->nr_frags
> 0) {
6196 last
= skb_shinfo(skb
)->nr_frags
- 1;
6197 for (i
= 0; i
<= last
; i
++) {
6198 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
6201 mapping
= pci_map_page(tp
->pdev
,
6204 len
, PCI_DMA_TODEVICE
);
6206 tnapi
->tx_buffers
[entry
].skb
= NULL
;
6207 dma_unmap_addr_set(&tnapi
->tx_buffers
[entry
], mapping
,
6209 if (pci_dma_mapping_error(tp
->pdev
, mapping
))
6212 if (tg3_flag(tp
, SHORT_DMA_BUG
) &&
6214 would_hit_hwbug
= 1;
6216 if (tg3_4g_overflow_test(mapping
, len
))
6217 would_hit_hwbug
= 1;
6219 if (tg3_40bit_overflow_test(tp
, mapping
, len
))
6220 would_hit_hwbug
= 1;
6222 if (tg3_flag(tp
, HW_TSO_1
) ||
6223 tg3_flag(tp
, HW_TSO_2
) ||
6224 tg3_flag(tp
, HW_TSO_3
))
6225 tg3_set_txd(tnapi
, entry
, mapping
, len
,
6226 base_flags
, (i
== last
)|(mss
<< 1));
6228 tg3_set_txd(tnapi
, entry
, mapping
, len
,
6229 base_flags
, (i
== last
));
6231 entry
= NEXT_TX(entry
);
6235 if (would_hit_hwbug
) {
6236 tg3_skb_error_unmap(tnapi
, skb
, i
);
6238 /* If the workaround fails due to memory/mapping
6239 * failure, silently drop this packet.
6241 if (tigon3_dma_hwbug_workaround(tnapi
, skb
, base_flags
, mss
))
6244 entry
= NEXT_TX(tnapi
->tx_prod
);
6247 skb_tx_timestamp(skb
);
6249 /* Packets are ready, update Tx producer idx local and on card. */
6250 tw32_tx_mbox(tnapi
->prodmbox
, entry
);
6252 tnapi
->tx_prod
= entry
;
6253 if (unlikely(tg3_tx_avail(tnapi
) <= (MAX_SKB_FRAGS
+ 1))) {
6254 netif_tx_stop_queue(txq
);
6256 /* netif_tx_stop_queue() must be done before checking
6257 * checking tx index in tg3_tx_avail() below, because in
6258 * tg3_tx(), we update tx index before checking for
6259 * netif_tx_queue_stopped().
6262 if (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
))
6263 netif_tx_wake_queue(txq
);
6269 return NETDEV_TX_OK
;
6272 tg3_skb_error_unmap(tnapi
, skb
, i
);
6274 tnapi
->tx_buffers
[tnapi
->tx_prod
].skb
= NULL
;
6275 return NETDEV_TX_OK
;
6278 static void tg3_set_loopback(struct net_device
*dev
, u32 features
)
6280 struct tg3
*tp
= netdev_priv(dev
);
6282 if (features
& NETIF_F_LOOPBACK
) {
6283 if (tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
)
6287 * Clear MAC_MODE_HALF_DUPLEX or you won't get packets back in
6288 * loopback mode if Half-Duplex mode was negotiated earlier.
6290 tp
->mac_mode
&= ~MAC_MODE_HALF_DUPLEX
;
6292 /* Enable internal MAC loopback mode */
6293 tp
->mac_mode
|= MAC_MODE_PORT_INT_LPBACK
;
6294 spin_lock_bh(&tp
->lock
);
6295 tw32(MAC_MODE
, tp
->mac_mode
);
6296 netif_carrier_on(tp
->dev
);
6297 spin_unlock_bh(&tp
->lock
);
6298 netdev_info(dev
, "Internal MAC loopback mode enabled.\n");
6300 if (!(tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
))
6303 /* Disable internal MAC loopback mode */
6304 tp
->mac_mode
&= ~MAC_MODE_PORT_INT_LPBACK
;
6305 spin_lock_bh(&tp
->lock
);
6306 tw32(MAC_MODE
, tp
->mac_mode
);
6307 /* Force link status check */
6308 tg3_setup_phy(tp
, 1);
6309 spin_unlock_bh(&tp
->lock
);
6310 netdev_info(dev
, "Internal MAC loopback mode disabled.\n");
6314 static u32
tg3_fix_features(struct net_device
*dev
, u32 features
)
6316 struct tg3
*tp
= netdev_priv(dev
);
6318 if (dev
->mtu
> ETH_DATA_LEN
&& tg3_flag(tp
, 5780_CLASS
))
6319 features
&= ~NETIF_F_ALL_TSO
;
6324 static int tg3_set_features(struct net_device
*dev
, u32 features
)
6326 u32 changed
= dev
->features
^ features
;
6328 if ((changed
& NETIF_F_LOOPBACK
) && netif_running(dev
))
6329 tg3_set_loopback(dev
, features
);
6334 static inline void tg3_set_mtu(struct net_device
*dev
, struct tg3
*tp
,
6339 if (new_mtu
> ETH_DATA_LEN
) {
6340 if (tg3_flag(tp
, 5780_CLASS
)) {
6341 netdev_update_features(dev
);
6342 tg3_flag_clear(tp
, TSO_CAPABLE
);
6344 tg3_flag_set(tp
, JUMBO_RING_ENABLE
);
6347 if (tg3_flag(tp
, 5780_CLASS
)) {
6348 tg3_flag_set(tp
, TSO_CAPABLE
);
6349 netdev_update_features(dev
);
6351 tg3_flag_clear(tp
, JUMBO_RING_ENABLE
);
6355 static int tg3_change_mtu(struct net_device
*dev
, int new_mtu
)
6357 struct tg3
*tp
= netdev_priv(dev
);
6360 if (new_mtu
< TG3_MIN_MTU
|| new_mtu
> TG3_MAX_MTU(tp
))
6363 if (!netif_running(dev
)) {
6364 /* We'll just catch it later when the
6367 tg3_set_mtu(dev
, tp
, new_mtu
);
6375 tg3_full_lock(tp
, 1);
6377 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
6379 tg3_set_mtu(dev
, tp
, new_mtu
);
6381 err
= tg3_restart_hw(tp
, 0);
6384 tg3_netif_start(tp
);
6386 tg3_full_unlock(tp
);
6394 static void tg3_rx_prodring_free(struct tg3
*tp
,
6395 struct tg3_rx_prodring_set
*tpr
)
6399 if (tpr
!= &tp
->napi
[0].prodring
) {
6400 for (i
= tpr
->rx_std_cons_idx
; i
!= tpr
->rx_std_prod_idx
;
6401 i
= (i
+ 1) & tp
->rx_std_ring_mask
)
6402 tg3_rx_skb_free(tp
, &tpr
->rx_std_buffers
[i
],
6405 if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
6406 for (i
= tpr
->rx_jmb_cons_idx
;
6407 i
!= tpr
->rx_jmb_prod_idx
;
6408 i
= (i
+ 1) & tp
->rx_jmb_ring_mask
) {
6409 tg3_rx_skb_free(tp
, &tpr
->rx_jmb_buffers
[i
],
6417 for (i
= 0; i
<= tp
->rx_std_ring_mask
; i
++)
6418 tg3_rx_skb_free(tp
, &tpr
->rx_std_buffers
[i
],
6421 if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
)) {
6422 for (i
= 0; i
<= tp
->rx_jmb_ring_mask
; i
++)
6423 tg3_rx_skb_free(tp
, &tpr
->rx_jmb_buffers
[i
],
6428 /* Initialize rx rings for packet processing.
6430 * The chip has been shut down and the driver detached from
6431 * the networking, so no interrupts or new tx packets will
6432 * end up in the driver. tp->{tx,}lock are held and thus
6435 static int tg3_rx_prodring_alloc(struct tg3
*tp
,
6436 struct tg3_rx_prodring_set
*tpr
)
6438 u32 i
, rx_pkt_dma_sz
;
6440 tpr
->rx_std_cons_idx
= 0;
6441 tpr
->rx_std_prod_idx
= 0;
6442 tpr
->rx_jmb_cons_idx
= 0;
6443 tpr
->rx_jmb_prod_idx
= 0;
6445 if (tpr
!= &tp
->napi
[0].prodring
) {
6446 memset(&tpr
->rx_std_buffers
[0], 0,
6447 TG3_RX_STD_BUFF_RING_SIZE(tp
));
6448 if (tpr
->rx_jmb_buffers
)
6449 memset(&tpr
->rx_jmb_buffers
[0], 0,
6450 TG3_RX_JMB_BUFF_RING_SIZE(tp
));
6454 /* Zero out all descriptors. */
6455 memset(tpr
->rx_std
, 0, TG3_RX_STD_RING_BYTES(tp
));
6457 rx_pkt_dma_sz
= TG3_RX_STD_DMA_SZ
;
6458 if (tg3_flag(tp
, 5780_CLASS
) &&
6459 tp
->dev
->mtu
> ETH_DATA_LEN
)
6460 rx_pkt_dma_sz
= TG3_RX_JMB_DMA_SZ
;
6461 tp
->rx_pkt_map_sz
= TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz
);
6463 /* Initialize invariants of the rings, we only set this
6464 * stuff once. This works because the card does not
6465 * write into the rx buffer posting rings.
6467 for (i
= 0; i
<= tp
->rx_std_ring_mask
; i
++) {
6468 struct tg3_rx_buffer_desc
*rxd
;
6470 rxd
= &tpr
->rx_std
[i
];
6471 rxd
->idx_len
= rx_pkt_dma_sz
<< RXD_LEN_SHIFT
;
6472 rxd
->type_flags
= (RXD_FLAG_END
<< RXD_FLAGS_SHIFT
);
6473 rxd
->opaque
= (RXD_OPAQUE_RING_STD
|
6474 (i
<< RXD_OPAQUE_INDEX_SHIFT
));
6477 /* Now allocate fresh SKBs for each rx ring. */
6478 for (i
= 0; i
< tp
->rx_pending
; i
++) {
6479 if (tg3_alloc_rx_skb(tp
, tpr
, RXD_OPAQUE_RING_STD
, i
) < 0) {
6480 netdev_warn(tp
->dev
,
6481 "Using a smaller RX standard ring. Only "
6482 "%d out of %d buffers were allocated "
6483 "successfully\n", i
, tp
->rx_pending
);
6491 if (!tg3_flag(tp
, JUMBO_CAPABLE
) || tg3_flag(tp
, 5780_CLASS
))
6494 memset(tpr
->rx_jmb
, 0, TG3_RX_JMB_RING_BYTES(tp
));
6496 if (!tg3_flag(tp
, JUMBO_RING_ENABLE
))
6499 for (i
= 0; i
<= tp
->rx_jmb_ring_mask
; i
++) {
6500 struct tg3_rx_buffer_desc
*rxd
;
6502 rxd
= &tpr
->rx_jmb
[i
].std
;
6503 rxd
->idx_len
= TG3_RX_JMB_DMA_SZ
<< RXD_LEN_SHIFT
;
6504 rxd
->type_flags
= (RXD_FLAG_END
<< RXD_FLAGS_SHIFT
) |
6506 rxd
->opaque
= (RXD_OPAQUE_RING_JUMBO
|
6507 (i
<< RXD_OPAQUE_INDEX_SHIFT
));
6510 for (i
= 0; i
< tp
->rx_jumbo_pending
; i
++) {
6511 if (tg3_alloc_rx_skb(tp
, tpr
, RXD_OPAQUE_RING_JUMBO
, i
) < 0) {
6512 netdev_warn(tp
->dev
,
6513 "Using a smaller RX jumbo ring. Only %d "
6514 "out of %d buffers were allocated "
6515 "successfully\n", i
, tp
->rx_jumbo_pending
);
6518 tp
->rx_jumbo_pending
= i
;
6527 tg3_rx_prodring_free(tp
, tpr
);
6531 static void tg3_rx_prodring_fini(struct tg3
*tp
,
6532 struct tg3_rx_prodring_set
*tpr
)
6534 kfree(tpr
->rx_std_buffers
);
6535 tpr
->rx_std_buffers
= NULL
;
6536 kfree(tpr
->rx_jmb_buffers
);
6537 tpr
->rx_jmb_buffers
= NULL
;
6539 dma_free_coherent(&tp
->pdev
->dev
, TG3_RX_STD_RING_BYTES(tp
),
6540 tpr
->rx_std
, tpr
->rx_std_mapping
);
6544 dma_free_coherent(&tp
->pdev
->dev
, TG3_RX_JMB_RING_BYTES(tp
),
6545 tpr
->rx_jmb
, tpr
->rx_jmb_mapping
);
6550 static int tg3_rx_prodring_init(struct tg3
*tp
,
6551 struct tg3_rx_prodring_set
*tpr
)
6553 tpr
->rx_std_buffers
= kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp
),
6555 if (!tpr
->rx_std_buffers
)
6558 tpr
->rx_std
= dma_alloc_coherent(&tp
->pdev
->dev
,
6559 TG3_RX_STD_RING_BYTES(tp
),
6560 &tpr
->rx_std_mapping
,
6565 if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
)) {
6566 tpr
->rx_jmb_buffers
= kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp
),
6568 if (!tpr
->rx_jmb_buffers
)
6571 tpr
->rx_jmb
= dma_alloc_coherent(&tp
->pdev
->dev
,
6572 TG3_RX_JMB_RING_BYTES(tp
),
6573 &tpr
->rx_jmb_mapping
,
6582 tg3_rx_prodring_fini(tp
, tpr
);
6586 /* Free up pending packets in all rx/tx rings.
6588 * The chip has been shut down and the driver detached from
6589 * the networking, so no interrupts or new tx packets will
6590 * end up in the driver. tp->{tx,}lock is not held and we are not
6591 * in an interrupt context and thus may sleep.
6593 static void tg3_free_rings(struct tg3
*tp
)
6597 for (j
= 0; j
< tp
->irq_cnt
; j
++) {
6598 struct tg3_napi
*tnapi
= &tp
->napi
[j
];
6600 tg3_rx_prodring_free(tp
, &tnapi
->prodring
);
6602 if (!tnapi
->tx_buffers
)
6605 for (i
= 0; i
< TG3_TX_RING_SIZE
; ) {
6606 struct ring_info
*txp
;
6607 struct sk_buff
*skb
;
6610 txp
= &tnapi
->tx_buffers
[i
];
6618 pci_unmap_single(tp
->pdev
,
6619 dma_unmap_addr(txp
, mapping
),
6626 for (k
= 0; k
< skb_shinfo(skb
)->nr_frags
; k
++) {
6627 txp
= &tnapi
->tx_buffers
[i
& (TG3_TX_RING_SIZE
- 1)];
6628 pci_unmap_page(tp
->pdev
,
6629 dma_unmap_addr(txp
, mapping
),
6630 skb_shinfo(skb
)->frags
[k
].size
,
6635 dev_kfree_skb_any(skb
);
6640 /* Initialize tx/rx rings for packet processing.
6642 * The chip has been shut down and the driver detached from
6643 * the networking, so no interrupts or new tx packets will
6644 * end up in the driver. tp->{tx,}lock are held and thus
6647 static int tg3_init_rings(struct tg3
*tp
)
6651 /* Free up all the SKBs. */
6654 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
6655 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
6657 tnapi
->last_tag
= 0;
6658 tnapi
->last_irq_tag
= 0;
6659 tnapi
->hw_status
->status
= 0;
6660 tnapi
->hw_status
->status_tag
= 0;
6661 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
6666 memset(tnapi
->tx_ring
, 0, TG3_TX_RING_BYTES
);
6668 tnapi
->rx_rcb_ptr
= 0;
6670 memset(tnapi
->rx_rcb
, 0, TG3_RX_RCB_RING_BYTES(tp
));
6672 if (tg3_rx_prodring_alloc(tp
, &tnapi
->prodring
)) {
6682 * Must not be invoked with interrupt sources disabled and
6683 * the hardware shutdown down.
6685 static void tg3_free_consistent(struct tg3
*tp
)
6689 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
6690 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
6692 if (tnapi
->tx_ring
) {
6693 dma_free_coherent(&tp
->pdev
->dev
, TG3_TX_RING_BYTES
,
6694 tnapi
->tx_ring
, tnapi
->tx_desc_mapping
);
6695 tnapi
->tx_ring
= NULL
;
6698 kfree(tnapi
->tx_buffers
);
6699 tnapi
->tx_buffers
= NULL
;
6701 if (tnapi
->rx_rcb
) {
6702 dma_free_coherent(&tp
->pdev
->dev
,
6703 TG3_RX_RCB_RING_BYTES(tp
),
6705 tnapi
->rx_rcb_mapping
);
6706 tnapi
->rx_rcb
= NULL
;
6709 tg3_rx_prodring_fini(tp
, &tnapi
->prodring
);
6711 if (tnapi
->hw_status
) {
6712 dma_free_coherent(&tp
->pdev
->dev
, TG3_HW_STATUS_SIZE
,
6714 tnapi
->status_mapping
);
6715 tnapi
->hw_status
= NULL
;
6720 dma_free_coherent(&tp
->pdev
->dev
, sizeof(struct tg3_hw_stats
),
6721 tp
->hw_stats
, tp
->stats_mapping
);
6722 tp
->hw_stats
= NULL
;
6727 * Must not be invoked with interrupt sources disabled and
6728 * the hardware shutdown down. Can sleep.
6730 static int tg3_alloc_consistent(struct tg3
*tp
)
6734 tp
->hw_stats
= dma_alloc_coherent(&tp
->pdev
->dev
,
6735 sizeof(struct tg3_hw_stats
),
6741 memset(tp
->hw_stats
, 0, sizeof(struct tg3_hw_stats
));
6743 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
6744 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
6745 struct tg3_hw_status
*sblk
;
6747 tnapi
->hw_status
= dma_alloc_coherent(&tp
->pdev
->dev
,
6749 &tnapi
->status_mapping
,
6751 if (!tnapi
->hw_status
)
6754 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
6755 sblk
= tnapi
->hw_status
;
6757 if (tg3_rx_prodring_init(tp
, &tnapi
->prodring
))
6760 /* If multivector TSS is enabled, vector 0 does not handle
6761 * tx interrupts. Don't allocate any resources for it.
6763 if ((!i
&& !tg3_flag(tp
, ENABLE_TSS
)) ||
6764 (i
&& tg3_flag(tp
, ENABLE_TSS
))) {
6765 tnapi
->tx_buffers
= kzalloc(sizeof(struct ring_info
) *
6768 if (!tnapi
->tx_buffers
)
6771 tnapi
->tx_ring
= dma_alloc_coherent(&tp
->pdev
->dev
,
6773 &tnapi
->tx_desc_mapping
,
6775 if (!tnapi
->tx_ring
)
6780 * When RSS is enabled, the status block format changes
6781 * slightly. The "rx_jumbo_consumer", "reserved",
6782 * and "rx_mini_consumer" members get mapped to the
6783 * other three rx return ring producer indexes.
6787 tnapi
->rx_rcb_prod_idx
= &sblk
->idx
[0].rx_producer
;
6790 tnapi
->rx_rcb_prod_idx
= &sblk
->rx_jumbo_consumer
;
6793 tnapi
->rx_rcb_prod_idx
= &sblk
->reserved
;
6796 tnapi
->rx_rcb_prod_idx
= &sblk
->rx_mini_consumer
;
6801 * If multivector RSS is enabled, vector 0 does not handle
6802 * rx or tx interrupts. Don't allocate any resources for it.
6804 if (!i
&& tg3_flag(tp
, ENABLE_RSS
))
6807 tnapi
->rx_rcb
= dma_alloc_coherent(&tp
->pdev
->dev
,
6808 TG3_RX_RCB_RING_BYTES(tp
),
6809 &tnapi
->rx_rcb_mapping
,
6814 memset(tnapi
->rx_rcb
, 0, TG3_RX_RCB_RING_BYTES(tp
));
6820 tg3_free_consistent(tp
);
6824 #define MAX_WAIT_CNT 1000
6826 /* To stop a block, clear the enable bit and poll till it
6827 * clears. tp->lock is held.
6829 static int tg3_stop_block(struct tg3
*tp
, unsigned long ofs
, u32 enable_bit
, int silent
)
6834 if (tg3_flag(tp
, 5705_PLUS
)) {
6841 /* We can't enable/disable these bits of the
6842 * 5705/5750, just say success.
6855 for (i
= 0; i
< MAX_WAIT_CNT
; i
++) {
6858 if ((val
& enable_bit
) == 0)
6862 if (i
== MAX_WAIT_CNT
&& !silent
) {
6863 dev_err(&tp
->pdev
->dev
,
6864 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
6872 /* tp->lock is held. */
6873 static int tg3_abort_hw(struct tg3
*tp
, int silent
)
6877 tg3_disable_ints(tp
);
6879 tp
->rx_mode
&= ~RX_MODE_ENABLE
;
6880 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
6883 err
= tg3_stop_block(tp
, RCVBDI_MODE
, RCVBDI_MODE_ENABLE
, silent
);
6884 err
|= tg3_stop_block(tp
, RCVLPC_MODE
, RCVLPC_MODE_ENABLE
, silent
);
6885 err
|= tg3_stop_block(tp
, RCVLSC_MODE
, RCVLSC_MODE_ENABLE
, silent
);
6886 err
|= tg3_stop_block(tp
, RCVDBDI_MODE
, RCVDBDI_MODE_ENABLE
, silent
);
6887 err
|= tg3_stop_block(tp
, RCVDCC_MODE
, RCVDCC_MODE_ENABLE
, silent
);
6888 err
|= tg3_stop_block(tp
, RCVCC_MODE
, RCVCC_MODE_ENABLE
, silent
);
6890 err
|= tg3_stop_block(tp
, SNDBDS_MODE
, SNDBDS_MODE_ENABLE
, silent
);
6891 err
|= tg3_stop_block(tp
, SNDBDI_MODE
, SNDBDI_MODE_ENABLE
, silent
);
6892 err
|= tg3_stop_block(tp
, SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
, silent
);
6893 err
|= tg3_stop_block(tp
, RDMAC_MODE
, RDMAC_MODE_ENABLE
, silent
);
6894 err
|= tg3_stop_block(tp
, SNDDATAC_MODE
, SNDDATAC_MODE_ENABLE
, silent
);
6895 err
|= tg3_stop_block(tp
, DMAC_MODE
, DMAC_MODE_ENABLE
, silent
);
6896 err
|= tg3_stop_block(tp
, SNDBDC_MODE
, SNDBDC_MODE_ENABLE
, silent
);
6898 tp
->mac_mode
&= ~MAC_MODE_TDE_ENABLE
;
6899 tw32_f(MAC_MODE
, tp
->mac_mode
);
6902 tp
->tx_mode
&= ~TX_MODE_ENABLE
;
6903 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
6905 for (i
= 0; i
< MAX_WAIT_CNT
; i
++) {
6907 if (!(tr32(MAC_TX_MODE
) & TX_MODE_ENABLE
))
6910 if (i
>= MAX_WAIT_CNT
) {
6911 dev_err(&tp
->pdev
->dev
,
6912 "%s timed out, TX_MODE_ENABLE will not clear "
6913 "MAC_TX_MODE=%08x\n", __func__
, tr32(MAC_TX_MODE
));
6917 err
|= tg3_stop_block(tp
, HOSTCC_MODE
, HOSTCC_MODE_ENABLE
, silent
);
6918 err
|= tg3_stop_block(tp
, WDMAC_MODE
, WDMAC_MODE_ENABLE
, silent
);
6919 err
|= tg3_stop_block(tp
, MBFREE_MODE
, MBFREE_MODE_ENABLE
, silent
);
6921 tw32(FTQ_RESET
, 0xffffffff);
6922 tw32(FTQ_RESET
, 0x00000000);
6924 err
|= tg3_stop_block(tp
, BUFMGR_MODE
, BUFMGR_MODE_ENABLE
, silent
);
6925 err
|= tg3_stop_block(tp
, MEMARB_MODE
, MEMARB_MODE_ENABLE
, silent
);
6927 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
6928 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
6929 if (tnapi
->hw_status
)
6930 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
6933 memset(tp
->hw_stats
, 0, sizeof(struct tg3_hw_stats
));
6938 static void tg3_ape_send_event(struct tg3
*tp
, u32 event
)
6943 /* NCSI does not support APE events */
6944 if (tg3_flag(tp
, APE_HAS_NCSI
))
6947 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
6948 if (apedata
!= APE_SEG_SIG_MAGIC
)
6951 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
6952 if (!(apedata
& APE_FW_STATUS_READY
))
6955 /* Wait for up to 1 millisecond for APE to service previous event. */
6956 for (i
= 0; i
< 10; i
++) {
6957 if (tg3_ape_lock(tp
, TG3_APE_LOCK_MEM
))
6960 apedata
= tg3_ape_read32(tp
, TG3_APE_EVENT_STATUS
);
6962 if (!(apedata
& APE_EVENT_STATUS_EVENT_PENDING
))
6963 tg3_ape_write32(tp
, TG3_APE_EVENT_STATUS
,
6964 event
| APE_EVENT_STATUS_EVENT_PENDING
);
6966 tg3_ape_unlock(tp
, TG3_APE_LOCK_MEM
);
6968 if (!(apedata
& APE_EVENT_STATUS_EVENT_PENDING
))
6974 if (!(apedata
& APE_EVENT_STATUS_EVENT_PENDING
))
6975 tg3_ape_write32(tp
, TG3_APE_EVENT
, APE_EVENT_1
);
6978 static void tg3_ape_driver_state_change(struct tg3
*tp
, int kind
)
6983 if (!tg3_flag(tp
, ENABLE_APE
))
6987 case RESET_KIND_INIT
:
6988 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_SIG
,
6989 APE_HOST_SEG_SIG_MAGIC
);
6990 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_LEN
,
6991 APE_HOST_SEG_LEN_MAGIC
);
6992 apedata
= tg3_ape_read32(tp
, TG3_APE_HOST_INIT_COUNT
);
6993 tg3_ape_write32(tp
, TG3_APE_HOST_INIT_COUNT
, ++apedata
);
6994 tg3_ape_write32(tp
, TG3_APE_HOST_DRIVER_ID
,
6995 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM
, TG3_MIN_NUM
));
6996 tg3_ape_write32(tp
, TG3_APE_HOST_BEHAVIOR
,
6997 APE_HOST_BEHAV_NO_PHYLOCK
);
6998 tg3_ape_write32(tp
, TG3_APE_HOST_DRVR_STATE
,
6999 TG3_APE_HOST_DRVR_STATE_START
);
7001 event
= APE_EVENT_STATUS_STATE_START
;
7003 case RESET_KIND_SHUTDOWN
:
7004 /* With the interface we are currently using,
7005 * APE does not track driver state. Wiping
7006 * out the HOST SEGMENT SIGNATURE forces
7007 * the APE to assume OS absent status.
7009 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_SIG
, 0x0);
7011 if (device_may_wakeup(&tp
->pdev
->dev
) &&
7012 tg3_flag(tp
, WOL_ENABLE
)) {
7013 tg3_ape_write32(tp
, TG3_APE_HOST_WOL_SPEED
,
7014 TG3_APE_HOST_WOL_SPEED_AUTO
);
7015 apedata
= TG3_APE_HOST_DRVR_STATE_WOL
;
7017 apedata
= TG3_APE_HOST_DRVR_STATE_UNLOAD
;
7019 tg3_ape_write32(tp
, TG3_APE_HOST_DRVR_STATE
, apedata
);
7021 event
= APE_EVENT_STATUS_STATE_UNLOAD
;
7023 case RESET_KIND_SUSPEND
:
7024 event
= APE_EVENT_STATUS_STATE_SUSPEND
;
7030 event
|= APE_EVENT_STATUS_DRIVER_EVNT
| APE_EVENT_STATUS_STATE_CHNGE
;
7032 tg3_ape_send_event(tp
, event
);
7035 /* tp->lock is held. */
7036 static void tg3_write_sig_pre_reset(struct tg3
*tp
, int kind
)
7038 tg3_write_mem(tp
, NIC_SRAM_FIRMWARE_MBOX
,
7039 NIC_SRAM_FIRMWARE_MBOX_MAGIC1
);
7041 if (tg3_flag(tp
, ASF_NEW_HANDSHAKE
)) {
7043 case RESET_KIND_INIT
:
7044 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
7048 case RESET_KIND_SHUTDOWN
:
7049 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
7053 case RESET_KIND_SUSPEND
:
7054 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
7063 if (kind
== RESET_KIND_INIT
||
7064 kind
== RESET_KIND_SUSPEND
)
7065 tg3_ape_driver_state_change(tp
, kind
);
7068 /* tp->lock is held. */
7069 static void tg3_write_sig_post_reset(struct tg3
*tp
, int kind
)
7071 if (tg3_flag(tp
, ASF_NEW_HANDSHAKE
)) {
7073 case RESET_KIND_INIT
:
7074 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
7075 DRV_STATE_START_DONE
);
7078 case RESET_KIND_SHUTDOWN
:
7079 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
7080 DRV_STATE_UNLOAD_DONE
);
7088 if (kind
== RESET_KIND_SHUTDOWN
)
7089 tg3_ape_driver_state_change(tp
, kind
);
7092 /* tp->lock is held. */
7093 static void tg3_write_sig_legacy(struct tg3
*tp
, int kind
)
7095 if (tg3_flag(tp
, ENABLE_ASF
)) {
7097 case RESET_KIND_INIT
:
7098 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
7102 case RESET_KIND_SHUTDOWN
:
7103 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
7107 case RESET_KIND_SUSPEND
:
7108 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
7118 static int tg3_poll_fw(struct tg3
*tp
)
7123 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
7124 /* Wait up to 20ms for init done. */
7125 for (i
= 0; i
< 200; i
++) {
7126 if (tr32(VCPU_STATUS
) & VCPU_STATUS_INIT_DONE
)
7133 /* Wait for firmware initialization to complete. */
7134 for (i
= 0; i
< 100000; i
++) {
7135 tg3_read_mem(tp
, NIC_SRAM_FIRMWARE_MBOX
, &val
);
7136 if (val
== ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1
)
7141 /* Chip might not be fitted with firmware. Some Sun onboard
7142 * parts are configured like that. So don't signal the timeout
7143 * of the above loop as an error, but do report the lack of
7144 * running firmware once.
7146 if (i
>= 100000 && !tg3_flag(tp
, NO_FWARE_REPORTED
)) {
7147 tg3_flag_set(tp
, NO_FWARE_REPORTED
);
7149 netdev_info(tp
->dev
, "No firmware running\n");
7152 if (tp
->pci_chip_rev_id
== CHIPREV_ID_57765_A0
) {
7153 /* The 57765 A0 needs a little more
7154 * time to do some important work.
7162 /* Save PCI command register before chip reset */
7163 static void tg3_save_pci_state(struct tg3
*tp
)
7165 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &tp
->pci_cmd
);
7168 /* Restore PCI state after chip reset */
7169 static void tg3_restore_pci_state(struct tg3
*tp
)
7173 /* Re-enable indirect register accesses. */
7174 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
7175 tp
->misc_host_ctrl
);
7177 /* Set MAX PCI retry to zero. */
7178 val
= (PCISTATE_ROM_ENABLE
| PCISTATE_ROM_RETRY_ENABLE
);
7179 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5704_A0
&&
7180 tg3_flag(tp
, PCIX_MODE
))
7181 val
|= PCISTATE_RETRY_SAME_DMA
;
7182 /* Allow reads and writes to the APE register and memory space. */
7183 if (tg3_flag(tp
, ENABLE_APE
))
7184 val
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
7185 PCISTATE_ALLOW_APE_SHMEM_WR
|
7186 PCISTATE_ALLOW_APE_PSPACE_WR
;
7187 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
, val
);
7189 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, tp
->pci_cmd
);
7191 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5785
) {
7192 if (tg3_flag(tp
, PCI_EXPRESS
))
7193 pcie_set_readrq(tp
->pdev
, tp
->pcie_readrq
);
7195 pci_write_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
,
7196 tp
->pci_cacheline_sz
);
7197 pci_write_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
7202 /* Make sure PCI-X relaxed ordering bit is clear. */
7203 if (tg3_flag(tp
, PCIX_MODE
)) {
7206 pci_read_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
7208 pcix_cmd
&= ~PCI_X_CMD_ERO
;
7209 pci_write_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
7213 if (tg3_flag(tp
, 5780_CLASS
)) {
7215 /* Chip reset on 5780 will reset MSI enable bit,
7216 * so need to restore it.
7218 if (tg3_flag(tp
, USING_MSI
)) {
7221 pci_read_config_word(tp
->pdev
,
7222 tp
->msi_cap
+ PCI_MSI_FLAGS
,
7224 pci_write_config_word(tp
->pdev
,
7225 tp
->msi_cap
+ PCI_MSI_FLAGS
,
7226 ctrl
| PCI_MSI_FLAGS_ENABLE
);
7227 val
= tr32(MSGINT_MODE
);
7228 tw32(MSGINT_MODE
, val
| MSGINT_MODE_ENABLE
);
7233 static void tg3_stop_fw(struct tg3
*);
7235 /* tp->lock is held. */
7236 static int tg3_chip_reset(struct tg3
*tp
)
7239 void (*write_op
)(struct tg3
*, u32
, u32
);
7244 tg3_ape_lock(tp
, TG3_APE_LOCK_GRC
);
7246 /* No matching tg3_nvram_unlock() after this because
7247 * chip reset below will undo the nvram lock.
7249 tp
->nvram_lock_cnt
= 0;
7251 /* GRC_MISC_CFG core clock reset will clear the memory
7252 * enable bit in PCI register 4 and the MSI enable bit
7253 * on some chips, so we save relevant registers here.
7255 tg3_save_pci_state(tp
);
7257 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
||
7258 tg3_flag(tp
, 5755_PLUS
))
7259 tw32(GRC_FASTBOOT_PC
, 0);
7262 * We must avoid the readl() that normally takes place.
7263 * It locks machines, causes machine checks, and other
7264 * fun things. So, temporarily disable the 5701
7265 * hardware workaround, while we do the reset.
7267 write_op
= tp
->write32
;
7268 if (write_op
== tg3_write_flush_reg32
)
7269 tp
->write32
= tg3_write32
;
7271 /* Prevent the irq handler from reading or writing PCI registers
7272 * during chip reset when the memory enable bit in the PCI command
7273 * register may be cleared. The chip does not generate interrupt
7274 * at this time, but the irq handler may still be called due to irq
7275 * sharing or irqpoll.
7277 tg3_flag_set(tp
, CHIP_RESETTING
);
7278 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
7279 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
7280 if (tnapi
->hw_status
) {
7281 tnapi
->hw_status
->status
= 0;
7282 tnapi
->hw_status
->status_tag
= 0;
7284 tnapi
->last_tag
= 0;
7285 tnapi
->last_irq_tag
= 0;
7289 for (i
= 0; i
< tp
->irq_cnt
; i
++)
7290 synchronize_irq(tp
->napi
[i
].irq_vec
);
7292 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
) {
7293 val
= tr32(TG3_PCIE_LNKCTL
) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN
;
7294 tw32(TG3_PCIE_LNKCTL
, val
| TG3_PCIE_LNKCTL_L1_PLL_PD_DIS
);
7298 val
= GRC_MISC_CFG_CORECLK_RESET
;
7300 if (tg3_flag(tp
, PCI_EXPRESS
)) {
7301 /* Force PCIe 1.0a mode */
7302 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5785
&&
7303 !tg3_flag(tp
, 57765_PLUS
) &&
7304 tr32(TG3_PCIE_PHY_TSTCTL
) ==
7305 (TG3_PCIE_PHY_TSTCTL_PCIE10
| TG3_PCIE_PHY_TSTCTL_PSCRAM
))
7306 tw32(TG3_PCIE_PHY_TSTCTL
, TG3_PCIE_PHY_TSTCTL_PSCRAM
);
7308 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A0
) {
7309 tw32(GRC_MISC_CFG
, (1 << 29));
7314 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
7315 tw32(VCPU_STATUS
, tr32(VCPU_STATUS
) | VCPU_STATUS_DRV_RESET
);
7316 tw32(GRC_VCPU_EXT_CTRL
,
7317 tr32(GRC_VCPU_EXT_CTRL
) & ~GRC_VCPU_EXT_CTRL_HALT_CPU
);
7320 /* Manage gphy power for all CPMU absent PCIe devices. */
7321 if (tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, CPMU_PRESENT
))
7322 val
|= GRC_MISC_CFG_KEEP_GPHY_POWER
;
7324 tw32(GRC_MISC_CFG
, val
);
7326 /* restore 5701 hardware bug workaround write method */
7327 tp
->write32
= write_op
;
7329 /* Unfortunately, we have to delay before the PCI read back.
7330 * Some 575X chips even will not respond to a PCI cfg access
7331 * when the reset command is given to the chip.
7333 * How do these hardware designers expect things to work
7334 * properly if the PCI write is posted for a long period
7335 * of time? It is always necessary to have some method by
7336 * which a register read back can occur to push the write
7337 * out which does the reset.
7339 * For most tg3 variants the trick below was working.
7344 /* Flush PCI posted writes. The normal MMIO registers
7345 * are inaccessible at this time so this is the only
7346 * way to make this reliably (actually, this is no longer
7347 * the case, see above). I tried to use indirect
7348 * register read/write but this upset some 5701 variants.
7350 pci_read_config_dword(tp
->pdev
, PCI_COMMAND
, &val
);
7354 if (tg3_flag(tp
, PCI_EXPRESS
) && pci_pcie_cap(tp
->pdev
)) {
7357 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5750_A0
) {
7361 /* Wait for link training to complete. */
7362 for (i
= 0; i
< 5000; i
++)
7365 pci_read_config_dword(tp
->pdev
, 0xc4, &cfg_val
);
7366 pci_write_config_dword(tp
->pdev
, 0xc4,
7367 cfg_val
| (1 << 15));
7370 /* Clear the "no snoop" and "relaxed ordering" bits. */
7371 pci_read_config_word(tp
->pdev
,
7372 pci_pcie_cap(tp
->pdev
) + PCI_EXP_DEVCTL
,
7374 val16
&= ~(PCI_EXP_DEVCTL_RELAX_EN
|
7375 PCI_EXP_DEVCTL_NOSNOOP_EN
);
7377 * Older PCIe devices only support the 128 byte
7378 * MPS setting. Enforce the restriction.
7380 if (!tg3_flag(tp
, CPMU_PRESENT
))
7381 val16
&= ~PCI_EXP_DEVCTL_PAYLOAD
;
7382 pci_write_config_word(tp
->pdev
,
7383 pci_pcie_cap(tp
->pdev
) + PCI_EXP_DEVCTL
,
7386 pcie_set_readrq(tp
->pdev
, tp
->pcie_readrq
);
7388 /* Clear error status */
7389 pci_write_config_word(tp
->pdev
,
7390 pci_pcie_cap(tp
->pdev
) + PCI_EXP_DEVSTA
,
7391 PCI_EXP_DEVSTA_CED
|
7392 PCI_EXP_DEVSTA_NFED
|
7393 PCI_EXP_DEVSTA_FED
|
7394 PCI_EXP_DEVSTA_URD
);
7397 tg3_restore_pci_state(tp
);
7399 tg3_flag_clear(tp
, CHIP_RESETTING
);
7400 tg3_flag_clear(tp
, ERROR_PROCESSED
);
7403 if (tg3_flag(tp
, 5780_CLASS
))
7404 val
= tr32(MEMARB_MODE
);
7405 tw32(MEMARB_MODE
, val
| MEMARB_MODE_ENABLE
);
7407 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5750_A3
) {
7409 tw32(0x5000, 0x400);
7412 tw32(GRC_MODE
, tp
->grc_mode
);
7414 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A0
) {
7417 tw32(0xc4, val
| (1 << 15));
7420 if ((tp
->nic_sram_data_cfg
& NIC_SRAM_DATA_CFG_MINI_PCI
) != 0 &&
7421 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
7422 tp
->pci_clock_ctrl
|= CLOCK_CTRL_CLKRUN_OENABLE
;
7423 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A0
)
7424 tp
->pci_clock_ctrl
|= CLOCK_CTRL_FORCE_CLKRUN
;
7425 tw32(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
);
7428 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
7429 tp
->mac_mode
= MAC_MODE_PORT_MODE_TBI
;
7431 } else if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) {
7432 tp
->mac_mode
= MAC_MODE_PORT_MODE_GMII
;
7437 tw32_f(MAC_MODE
, val
);
7440 tg3_ape_unlock(tp
, TG3_APE_LOCK_GRC
);
7442 err
= tg3_poll_fw(tp
);
7448 if (tg3_flag(tp
, PCI_EXPRESS
) &&
7449 tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A0
&&
7450 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5785
&&
7451 !tg3_flag(tp
, 57765_PLUS
)) {
7454 tw32(0x7c00, val
| (1 << 25));
7457 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
7458 val
= tr32(TG3_CPMU_CLCK_ORIDE
);
7459 tw32(TG3_CPMU_CLCK_ORIDE
, val
& ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN
);
7462 /* Reprobe ASF enable state. */
7463 tg3_flag_clear(tp
, ENABLE_ASF
);
7464 tg3_flag_clear(tp
, ASF_NEW_HANDSHAKE
);
7465 tg3_read_mem(tp
, NIC_SRAM_DATA_SIG
, &val
);
7466 if (val
== NIC_SRAM_DATA_SIG_MAGIC
) {
7469 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG
, &nic_cfg
);
7470 if (nic_cfg
& NIC_SRAM_DATA_CFG_ASF_ENABLE
) {
7471 tg3_flag_set(tp
, ENABLE_ASF
);
7472 tp
->last_event_jiffies
= jiffies
;
7473 if (tg3_flag(tp
, 5750_PLUS
))
7474 tg3_flag_set(tp
, ASF_NEW_HANDSHAKE
);
7481 /* tp->lock is held. */
7482 static void tg3_stop_fw(struct tg3
*tp
)
7484 if (tg3_flag(tp
, ENABLE_ASF
) && !tg3_flag(tp
, ENABLE_APE
)) {
7485 /* Wait for RX cpu to ACK the previous event. */
7486 tg3_wait_for_event_ack(tp
);
7488 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
, FWCMD_NICDRV_PAUSE_FW
);
7490 tg3_generate_fw_event(tp
);
7492 /* Wait for RX cpu to ACK this event. */
7493 tg3_wait_for_event_ack(tp
);
7497 /* tp->lock is held. */
7498 static int tg3_halt(struct tg3
*tp
, int kind
, int silent
)
7504 tg3_write_sig_pre_reset(tp
, kind
);
7506 tg3_abort_hw(tp
, silent
);
7507 err
= tg3_chip_reset(tp
);
7509 __tg3_set_mac_addr(tp
, 0);
7511 tg3_write_sig_legacy(tp
, kind
);
7512 tg3_write_sig_post_reset(tp
, kind
);
7520 #define RX_CPU_SCRATCH_BASE 0x30000
7521 #define RX_CPU_SCRATCH_SIZE 0x04000
7522 #define TX_CPU_SCRATCH_BASE 0x34000
7523 #define TX_CPU_SCRATCH_SIZE 0x04000
7525 /* tp->lock is held. */
7526 static int tg3_halt_cpu(struct tg3
*tp
, u32 offset
)
7530 BUG_ON(offset
== TX_CPU_BASE
&& tg3_flag(tp
, 5705_PLUS
));
7532 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
7533 u32 val
= tr32(GRC_VCPU_EXT_CTRL
);
7535 tw32(GRC_VCPU_EXT_CTRL
, val
| GRC_VCPU_EXT_CTRL_HALT_CPU
);
7538 if (offset
== RX_CPU_BASE
) {
7539 for (i
= 0; i
< 10000; i
++) {
7540 tw32(offset
+ CPU_STATE
, 0xffffffff);
7541 tw32(offset
+ CPU_MODE
, CPU_MODE_HALT
);
7542 if (tr32(offset
+ CPU_MODE
) & CPU_MODE_HALT
)
7546 tw32(offset
+ CPU_STATE
, 0xffffffff);
7547 tw32_f(offset
+ CPU_MODE
, CPU_MODE_HALT
);
7550 for (i
= 0; i
< 10000; i
++) {
7551 tw32(offset
+ CPU_STATE
, 0xffffffff);
7552 tw32(offset
+ CPU_MODE
, CPU_MODE_HALT
);
7553 if (tr32(offset
+ CPU_MODE
) & CPU_MODE_HALT
)
7559 netdev_err(tp
->dev
, "%s timed out, %s CPU\n",
7560 __func__
, offset
== RX_CPU_BASE
? "RX" : "TX");
7564 /* Clear firmware's nvram arbitration. */
7565 if (tg3_flag(tp
, NVRAM
))
7566 tw32(NVRAM_SWARB
, SWARB_REQ_CLR0
);
7571 unsigned int fw_base
;
7572 unsigned int fw_len
;
7573 const __be32
*fw_data
;
7576 /* tp->lock is held. */
7577 static int tg3_load_firmware_cpu(struct tg3
*tp
, u32 cpu_base
, u32 cpu_scratch_base
,
7578 int cpu_scratch_size
, struct fw_info
*info
)
7580 int err
, lock_err
, i
;
7581 void (*write_op
)(struct tg3
*, u32
, u32
);
7583 if (cpu_base
== TX_CPU_BASE
&& tg3_flag(tp
, 5705_PLUS
)) {
7585 "%s: Trying to load TX cpu firmware which is 5705\n",
7590 if (tg3_flag(tp
, 5705_PLUS
))
7591 write_op
= tg3_write_mem
;
7593 write_op
= tg3_write_indirect_reg32
;
7595 /* It is possible that bootcode is still loading at this point.
7596 * Get the nvram lock first before halting the cpu.
7598 lock_err
= tg3_nvram_lock(tp
);
7599 err
= tg3_halt_cpu(tp
, cpu_base
);
7601 tg3_nvram_unlock(tp
);
7605 for (i
= 0; i
< cpu_scratch_size
; i
+= sizeof(u32
))
7606 write_op(tp
, cpu_scratch_base
+ i
, 0);
7607 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
7608 tw32(cpu_base
+ CPU_MODE
, tr32(cpu_base
+CPU_MODE
)|CPU_MODE_HALT
);
7609 for (i
= 0; i
< (info
->fw_len
/ sizeof(u32
)); i
++)
7610 write_op(tp
, (cpu_scratch_base
+
7611 (info
->fw_base
& 0xffff) +
7613 be32_to_cpu(info
->fw_data
[i
]));
7621 /* tp->lock is held. */
7622 static int tg3_load_5701_a0_firmware_fix(struct tg3
*tp
)
7624 struct fw_info info
;
7625 const __be32
*fw_data
;
7628 fw_data
= (void *)tp
->fw
->data
;
7630 /* Firmware blob starts with version numbers, followed by
7631 start address and length. We are setting complete length.
7632 length = end_address_of_bss - start_address_of_text.
7633 Remainder is the blob to be loaded contiguously
7634 from start address. */
7636 info
.fw_base
= be32_to_cpu(fw_data
[1]);
7637 info
.fw_len
= tp
->fw
->size
- 12;
7638 info
.fw_data
= &fw_data
[3];
7640 err
= tg3_load_firmware_cpu(tp
, RX_CPU_BASE
,
7641 RX_CPU_SCRATCH_BASE
, RX_CPU_SCRATCH_SIZE
,
7646 err
= tg3_load_firmware_cpu(tp
, TX_CPU_BASE
,
7647 TX_CPU_SCRATCH_BASE
, TX_CPU_SCRATCH_SIZE
,
7652 /* Now startup only the RX cpu. */
7653 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
7654 tw32_f(RX_CPU_BASE
+ CPU_PC
, info
.fw_base
);
7656 for (i
= 0; i
< 5; i
++) {
7657 if (tr32(RX_CPU_BASE
+ CPU_PC
) == info
.fw_base
)
7659 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
7660 tw32(RX_CPU_BASE
+ CPU_MODE
, CPU_MODE_HALT
);
7661 tw32_f(RX_CPU_BASE
+ CPU_PC
, info
.fw_base
);
7665 netdev_err(tp
->dev
, "%s fails to set RX CPU PC, is %08x "
7666 "should be %08x\n", __func__
,
7667 tr32(RX_CPU_BASE
+ CPU_PC
), info
.fw_base
);
7670 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
7671 tw32_f(RX_CPU_BASE
+ CPU_MODE
, 0x00000000);
7676 /* tp->lock is held. */
7677 static int tg3_load_tso_firmware(struct tg3
*tp
)
7679 struct fw_info info
;
7680 const __be32
*fw_data
;
7681 unsigned long cpu_base
, cpu_scratch_base
, cpu_scratch_size
;
7684 if (tg3_flag(tp
, HW_TSO_1
) ||
7685 tg3_flag(tp
, HW_TSO_2
) ||
7686 tg3_flag(tp
, HW_TSO_3
))
7689 fw_data
= (void *)tp
->fw
->data
;
7691 /* Firmware blob starts with version numbers, followed by
7692 start address and length. We are setting complete length.
7693 length = end_address_of_bss - start_address_of_text.
7694 Remainder is the blob to be loaded contiguously
7695 from start address. */
7697 info
.fw_base
= be32_to_cpu(fw_data
[1]);
7698 cpu_scratch_size
= tp
->fw_len
;
7699 info
.fw_len
= tp
->fw
->size
- 12;
7700 info
.fw_data
= &fw_data
[3];
7702 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
7703 cpu_base
= RX_CPU_BASE
;
7704 cpu_scratch_base
= NIC_SRAM_MBUF_POOL_BASE5705
;
7706 cpu_base
= TX_CPU_BASE
;
7707 cpu_scratch_base
= TX_CPU_SCRATCH_BASE
;
7708 cpu_scratch_size
= TX_CPU_SCRATCH_SIZE
;
7711 err
= tg3_load_firmware_cpu(tp
, cpu_base
,
7712 cpu_scratch_base
, cpu_scratch_size
,
7717 /* Now startup the cpu. */
7718 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
7719 tw32_f(cpu_base
+ CPU_PC
, info
.fw_base
);
7721 for (i
= 0; i
< 5; i
++) {
7722 if (tr32(cpu_base
+ CPU_PC
) == info
.fw_base
)
7724 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
7725 tw32(cpu_base
+ CPU_MODE
, CPU_MODE_HALT
);
7726 tw32_f(cpu_base
+ CPU_PC
, info
.fw_base
);
7731 "%s fails to set CPU PC, is %08x should be %08x\n",
7732 __func__
, tr32(cpu_base
+ CPU_PC
), info
.fw_base
);
7735 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
7736 tw32_f(cpu_base
+ CPU_MODE
, 0x00000000);
7741 static int tg3_set_mac_addr(struct net_device
*dev
, void *p
)
7743 struct tg3
*tp
= netdev_priv(dev
);
7744 struct sockaddr
*addr
= p
;
7745 int err
= 0, skip_mac_1
= 0;
7747 if (!is_valid_ether_addr(addr
->sa_data
))
7750 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
7752 if (!netif_running(dev
))
7755 if (tg3_flag(tp
, ENABLE_ASF
)) {
7756 u32 addr0_high
, addr0_low
, addr1_high
, addr1_low
;
7758 addr0_high
= tr32(MAC_ADDR_0_HIGH
);
7759 addr0_low
= tr32(MAC_ADDR_0_LOW
);
7760 addr1_high
= tr32(MAC_ADDR_1_HIGH
);
7761 addr1_low
= tr32(MAC_ADDR_1_LOW
);
7763 /* Skip MAC addr 1 if ASF is using it. */
7764 if ((addr0_high
!= addr1_high
|| addr0_low
!= addr1_low
) &&
7765 !(addr1_high
== 0 && addr1_low
== 0))
7768 spin_lock_bh(&tp
->lock
);
7769 __tg3_set_mac_addr(tp
, skip_mac_1
);
7770 spin_unlock_bh(&tp
->lock
);
7775 /* tp->lock is held. */
7776 static void tg3_set_bdinfo(struct tg3
*tp
, u32 bdinfo_addr
,
7777 dma_addr_t mapping
, u32 maxlen_flags
,
7781 (bdinfo_addr
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
),
7782 ((u64
) mapping
>> 32));
7784 (bdinfo_addr
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
),
7785 ((u64
) mapping
& 0xffffffff));
7787 (bdinfo_addr
+ TG3_BDINFO_MAXLEN_FLAGS
),
7790 if (!tg3_flag(tp
, 5705_PLUS
))
7792 (bdinfo_addr
+ TG3_BDINFO_NIC_ADDR
),
7796 static void __tg3_set_rx_mode(struct net_device
*);
7797 static void __tg3_set_coalesce(struct tg3
*tp
, struct ethtool_coalesce
*ec
)
7801 if (!tg3_flag(tp
, ENABLE_TSS
)) {
7802 tw32(HOSTCC_TXCOL_TICKS
, ec
->tx_coalesce_usecs
);
7803 tw32(HOSTCC_TXMAX_FRAMES
, ec
->tx_max_coalesced_frames
);
7804 tw32(HOSTCC_TXCOAL_MAXF_INT
, ec
->tx_max_coalesced_frames_irq
);
7806 tw32(HOSTCC_TXCOL_TICKS
, 0);
7807 tw32(HOSTCC_TXMAX_FRAMES
, 0);
7808 tw32(HOSTCC_TXCOAL_MAXF_INT
, 0);
7811 if (!tg3_flag(tp
, ENABLE_RSS
)) {
7812 tw32(HOSTCC_RXCOL_TICKS
, ec
->rx_coalesce_usecs
);
7813 tw32(HOSTCC_RXMAX_FRAMES
, ec
->rx_max_coalesced_frames
);
7814 tw32(HOSTCC_RXCOAL_MAXF_INT
, ec
->rx_max_coalesced_frames_irq
);
7816 tw32(HOSTCC_RXCOL_TICKS
, 0);
7817 tw32(HOSTCC_RXMAX_FRAMES
, 0);
7818 tw32(HOSTCC_RXCOAL_MAXF_INT
, 0);
7821 if (!tg3_flag(tp
, 5705_PLUS
)) {
7822 u32 val
= ec
->stats_block_coalesce_usecs
;
7824 tw32(HOSTCC_RXCOAL_TICK_INT
, ec
->rx_coalesce_usecs_irq
);
7825 tw32(HOSTCC_TXCOAL_TICK_INT
, ec
->tx_coalesce_usecs_irq
);
7827 if (!netif_carrier_ok(tp
->dev
))
7830 tw32(HOSTCC_STAT_COAL_TICKS
, val
);
7833 for (i
= 0; i
< tp
->irq_cnt
- 1; i
++) {
7836 reg
= HOSTCC_RXCOL_TICKS_VEC1
+ i
* 0x18;
7837 tw32(reg
, ec
->rx_coalesce_usecs
);
7838 reg
= HOSTCC_RXMAX_FRAMES_VEC1
+ i
* 0x18;
7839 tw32(reg
, ec
->rx_max_coalesced_frames
);
7840 reg
= HOSTCC_RXCOAL_MAXF_INT_VEC1
+ i
* 0x18;
7841 tw32(reg
, ec
->rx_max_coalesced_frames_irq
);
7843 if (tg3_flag(tp
, ENABLE_TSS
)) {
7844 reg
= HOSTCC_TXCOL_TICKS_VEC1
+ i
* 0x18;
7845 tw32(reg
, ec
->tx_coalesce_usecs
);
7846 reg
= HOSTCC_TXMAX_FRAMES_VEC1
+ i
* 0x18;
7847 tw32(reg
, ec
->tx_max_coalesced_frames
);
7848 reg
= HOSTCC_TXCOAL_MAXF_INT_VEC1
+ i
* 0x18;
7849 tw32(reg
, ec
->tx_max_coalesced_frames_irq
);
7853 for (; i
< tp
->irq_max
- 1; i
++) {
7854 tw32(HOSTCC_RXCOL_TICKS_VEC1
+ i
* 0x18, 0);
7855 tw32(HOSTCC_RXMAX_FRAMES_VEC1
+ i
* 0x18, 0);
7856 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1
+ i
* 0x18, 0);
7858 if (tg3_flag(tp
, ENABLE_TSS
)) {
7859 tw32(HOSTCC_TXCOL_TICKS_VEC1
+ i
* 0x18, 0);
7860 tw32(HOSTCC_TXMAX_FRAMES_VEC1
+ i
* 0x18, 0);
7861 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1
+ i
* 0x18, 0);
7866 /* tp->lock is held. */
7867 static void tg3_rings_reset(struct tg3
*tp
)
7870 u32 stblk
, txrcb
, rxrcb
, limit
;
7871 struct tg3_napi
*tnapi
= &tp
->napi
[0];
7873 /* Disable all transmit rings but the first. */
7874 if (!tg3_flag(tp
, 5705_PLUS
))
7875 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 16;
7876 else if (tg3_flag(tp
, 5717_PLUS
))
7877 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 4;
7878 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
7879 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 2;
7881 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
;
7883 for (txrcb
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
;
7884 txrcb
< limit
; txrcb
+= TG3_BDINFO_SIZE
)
7885 tg3_write_mem(tp
, txrcb
+ TG3_BDINFO_MAXLEN_FLAGS
,
7886 BDINFO_FLAGS_DISABLED
);
7889 /* Disable all receive return rings but the first. */
7890 if (tg3_flag(tp
, 5717_PLUS
))
7891 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 17;
7892 else if (!tg3_flag(tp
, 5705_PLUS
))
7893 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 16;
7894 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
7895 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
7896 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 4;
7898 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
;
7900 for (rxrcb
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
;
7901 rxrcb
< limit
; rxrcb
+= TG3_BDINFO_SIZE
)
7902 tg3_write_mem(tp
, rxrcb
+ TG3_BDINFO_MAXLEN_FLAGS
,
7903 BDINFO_FLAGS_DISABLED
);
7905 /* Disable interrupts */
7906 tw32_mailbox_f(tp
->napi
[0].int_mbox
, 1);
7907 tp
->napi
[0].chk_msi_cnt
= 0;
7908 tp
->napi
[0].last_rx_cons
= 0;
7909 tp
->napi
[0].last_tx_cons
= 0;
7911 /* Zero mailbox registers. */
7912 if (tg3_flag(tp
, SUPPORT_MSIX
)) {
7913 for (i
= 1; i
< tp
->irq_max
; i
++) {
7914 tp
->napi
[i
].tx_prod
= 0;
7915 tp
->napi
[i
].tx_cons
= 0;
7916 if (tg3_flag(tp
, ENABLE_TSS
))
7917 tw32_mailbox(tp
->napi
[i
].prodmbox
, 0);
7918 tw32_rx_mbox(tp
->napi
[i
].consmbox
, 0);
7919 tw32_mailbox_f(tp
->napi
[i
].int_mbox
, 1);
7920 tp
->napi
[0].chk_msi_cnt
= 0;
7921 tp
->napi
[i
].last_rx_cons
= 0;
7922 tp
->napi
[i
].last_tx_cons
= 0;
7924 if (!tg3_flag(tp
, ENABLE_TSS
))
7925 tw32_mailbox(tp
->napi
[0].prodmbox
, 0);
7927 tp
->napi
[0].tx_prod
= 0;
7928 tp
->napi
[0].tx_cons
= 0;
7929 tw32_mailbox(tp
->napi
[0].prodmbox
, 0);
7930 tw32_rx_mbox(tp
->napi
[0].consmbox
, 0);
7933 /* Make sure the NIC-based send BD rings are disabled. */
7934 if (!tg3_flag(tp
, 5705_PLUS
)) {
7935 u32 mbox
= MAILBOX_SNDNIC_PROD_IDX_0
+ TG3_64BIT_REG_LOW
;
7936 for (i
= 0; i
< 16; i
++)
7937 tw32_tx_mbox(mbox
+ i
* 8, 0);
7940 txrcb
= NIC_SRAM_SEND_RCB
;
7941 rxrcb
= NIC_SRAM_RCV_RET_RCB
;
7943 /* Clear status block in ram. */
7944 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
7946 /* Set status block DMA address */
7947 tw32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
7948 ((u64
) tnapi
->status_mapping
>> 32));
7949 tw32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
7950 ((u64
) tnapi
->status_mapping
& 0xffffffff));
7952 if (tnapi
->tx_ring
) {
7953 tg3_set_bdinfo(tp
, txrcb
, tnapi
->tx_desc_mapping
,
7954 (TG3_TX_RING_SIZE
<<
7955 BDINFO_FLAGS_MAXLEN_SHIFT
),
7956 NIC_SRAM_TX_BUFFER_DESC
);
7957 txrcb
+= TG3_BDINFO_SIZE
;
7960 if (tnapi
->rx_rcb
) {
7961 tg3_set_bdinfo(tp
, rxrcb
, tnapi
->rx_rcb_mapping
,
7962 (tp
->rx_ret_ring_mask
+ 1) <<
7963 BDINFO_FLAGS_MAXLEN_SHIFT
, 0);
7964 rxrcb
+= TG3_BDINFO_SIZE
;
7967 stblk
= HOSTCC_STATBLCK_RING1
;
7969 for (i
= 1, tnapi
++; i
< tp
->irq_cnt
; i
++, tnapi
++) {
7970 u64 mapping
= (u64
)tnapi
->status_mapping
;
7971 tw32(stblk
+ TG3_64BIT_REG_HIGH
, mapping
>> 32);
7972 tw32(stblk
+ TG3_64BIT_REG_LOW
, mapping
& 0xffffffff);
7974 /* Clear status block in ram. */
7975 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
7977 if (tnapi
->tx_ring
) {
7978 tg3_set_bdinfo(tp
, txrcb
, tnapi
->tx_desc_mapping
,
7979 (TG3_TX_RING_SIZE
<<
7980 BDINFO_FLAGS_MAXLEN_SHIFT
),
7981 NIC_SRAM_TX_BUFFER_DESC
);
7982 txrcb
+= TG3_BDINFO_SIZE
;
7985 tg3_set_bdinfo(tp
, rxrcb
, tnapi
->rx_rcb_mapping
,
7986 ((tp
->rx_ret_ring_mask
+ 1) <<
7987 BDINFO_FLAGS_MAXLEN_SHIFT
), 0);
7990 rxrcb
+= TG3_BDINFO_SIZE
;
7994 static void tg3_setup_rxbd_thresholds(struct tg3
*tp
)
7996 u32 val
, bdcache_maxcnt
, host_rep_thresh
, nic_rep_thresh
;
7998 if (!tg3_flag(tp
, 5750_PLUS
) ||
7999 tg3_flag(tp
, 5780_CLASS
) ||
8000 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
||
8001 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
)
8002 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5700
;
8003 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
8004 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
)
8005 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5755
;
8007 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5906
;
8009 nic_rep_thresh
= min(bdcache_maxcnt
/ 2, tp
->rx_std_max_post
);
8010 host_rep_thresh
= max_t(u32
, tp
->rx_pending
/ 8, 1);
8012 val
= min(nic_rep_thresh
, host_rep_thresh
);
8013 tw32(RCVBDI_STD_THRESH
, val
);
8015 if (tg3_flag(tp
, 57765_PLUS
))
8016 tw32(STD_REPLENISH_LWM
, bdcache_maxcnt
);
8018 if (!tg3_flag(tp
, JUMBO_CAPABLE
) || tg3_flag(tp
, 5780_CLASS
))
8021 if (!tg3_flag(tp
, 5705_PLUS
))
8022 bdcache_maxcnt
= TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700
;
8024 bdcache_maxcnt
= TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717
;
8026 host_rep_thresh
= max_t(u32
, tp
->rx_jumbo_pending
/ 8, 1);
8028 val
= min(bdcache_maxcnt
/ 2, host_rep_thresh
);
8029 tw32(RCVBDI_JUMBO_THRESH
, val
);
8031 if (tg3_flag(tp
, 57765_PLUS
))
8032 tw32(JMB_REPLENISH_LWM
, bdcache_maxcnt
);
8035 /* tp->lock is held. */
8036 static int tg3_reset_hw(struct tg3
*tp
, int reset_phy
)
8038 u32 val
, rdmac_mode
;
8040 struct tg3_rx_prodring_set
*tpr
= &tp
->napi
[0].prodring
;
8042 tg3_disable_ints(tp
);
8046 tg3_write_sig_pre_reset(tp
, RESET_KIND_INIT
);
8048 if (tg3_flag(tp
, INIT_COMPLETE
))
8049 tg3_abort_hw(tp
, 1);
8051 /* Enable MAC control of LPI */
8052 if (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
) {
8053 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL
,
8054 TG3_CPMU_EEE_LNKIDL_PCIE_NL0
|
8055 TG3_CPMU_EEE_LNKIDL_UART_IDL
);
8057 tw32_f(TG3_CPMU_EEE_CTRL
,
8058 TG3_CPMU_EEE_CTRL_EXIT_20_1_US
);
8060 val
= TG3_CPMU_EEEMD_ERLY_L1_XIT_DET
|
8061 TG3_CPMU_EEEMD_LPI_IN_TX
|
8062 TG3_CPMU_EEEMD_LPI_IN_RX
|
8063 TG3_CPMU_EEEMD_EEE_ENABLE
;
8065 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5717
)
8066 val
|= TG3_CPMU_EEEMD_SND_IDX_DET_EN
;
8068 if (tg3_flag(tp
, ENABLE_APE
))
8069 val
|= TG3_CPMU_EEEMD_APE_TX_DET_EN
;
8071 tw32_f(TG3_CPMU_EEE_MODE
, val
);
8073 tw32_f(TG3_CPMU_EEE_DBTMR1
,
8074 TG3_CPMU_DBTMR1_PCIEXIT_2047US
|
8075 TG3_CPMU_DBTMR1_LNKIDLE_2047US
);
8077 tw32_f(TG3_CPMU_EEE_DBTMR2
,
8078 TG3_CPMU_DBTMR2_APE_TX_2047US
|
8079 TG3_CPMU_DBTMR2_TXIDXEQ_2047US
);
8085 err
= tg3_chip_reset(tp
);
8089 tg3_write_sig_legacy(tp
, RESET_KIND_INIT
);
8091 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5784_AX
) {
8092 val
= tr32(TG3_CPMU_CTRL
);
8093 val
&= ~(CPMU_CTRL_LINK_AWARE_MODE
| CPMU_CTRL_LINK_IDLE_MODE
);
8094 tw32(TG3_CPMU_CTRL
, val
);
8096 val
= tr32(TG3_CPMU_LSPD_10MB_CLK
);
8097 val
&= ~CPMU_LSPD_10MB_MACCLK_MASK
;
8098 val
|= CPMU_LSPD_10MB_MACCLK_6_25
;
8099 tw32(TG3_CPMU_LSPD_10MB_CLK
, val
);
8101 val
= tr32(TG3_CPMU_LNK_AWARE_PWRMD
);
8102 val
&= ~CPMU_LNK_AWARE_MACCLK_MASK
;
8103 val
|= CPMU_LNK_AWARE_MACCLK_6_25
;
8104 tw32(TG3_CPMU_LNK_AWARE_PWRMD
, val
);
8106 val
= tr32(TG3_CPMU_HST_ACC
);
8107 val
&= ~CPMU_HST_ACC_MACCLK_MASK
;
8108 val
|= CPMU_HST_ACC_MACCLK_6_25
;
8109 tw32(TG3_CPMU_HST_ACC
, val
);
8112 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
) {
8113 val
= tr32(PCIE_PWR_MGMT_THRESH
) & ~PCIE_PWR_MGMT_L1_THRESH_MSK
;
8114 val
|= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN
|
8115 PCIE_PWR_MGMT_L1_THRESH_4MS
;
8116 tw32(PCIE_PWR_MGMT_THRESH
, val
);
8118 val
= tr32(TG3_PCIE_EIDLE_DELAY
) & ~TG3_PCIE_EIDLE_DELAY_MASK
;
8119 tw32(TG3_PCIE_EIDLE_DELAY
, val
| TG3_PCIE_EIDLE_DELAY_13_CLKS
);
8121 tw32(TG3_CORR_ERR_STAT
, TG3_CORR_ERR_STAT_CLEAR
);
8123 val
= tr32(TG3_PCIE_LNKCTL
) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN
;
8124 tw32(TG3_PCIE_LNKCTL
, val
| TG3_PCIE_LNKCTL_L1_PLL_PD_DIS
);
8127 if (tg3_flag(tp
, L1PLLPD_EN
)) {
8128 u32 grc_mode
= tr32(GRC_MODE
);
8130 /* Access the lower 1K of PL PCIE block registers. */
8131 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
8132 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_PL_SEL
);
8134 val
= tr32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL1
);
8135 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL1
,
8136 val
| TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN
);
8138 tw32(GRC_MODE
, grc_mode
);
8141 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
) {
8142 if (tp
->pci_chip_rev_id
== CHIPREV_ID_57765_A0
) {
8143 u32 grc_mode
= tr32(GRC_MODE
);
8145 /* Access the lower 1K of PL PCIE block registers. */
8146 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
8147 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_PL_SEL
);
8149 val
= tr32(TG3_PCIE_TLDLPL_PORT
+
8150 TG3_PCIE_PL_LO_PHYCTL5
);
8151 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL5
,
8152 val
| TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ
);
8154 tw32(GRC_MODE
, grc_mode
);
8157 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_57765_AX
) {
8158 u32 grc_mode
= tr32(GRC_MODE
);
8160 /* Access the lower 1K of DL PCIE block registers. */
8161 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
8162 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_DL_SEL
);
8164 val
= tr32(TG3_PCIE_TLDLPL_PORT
+
8165 TG3_PCIE_DL_LO_FTSMAX
);
8166 val
&= ~TG3_PCIE_DL_LO_FTSMAX_MSK
;
8167 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_DL_LO_FTSMAX
,
8168 val
| TG3_PCIE_DL_LO_FTSMAX_VAL
);
8170 tw32(GRC_MODE
, grc_mode
);
8173 val
= tr32(TG3_CPMU_LSPD_10MB_CLK
);
8174 val
&= ~CPMU_LSPD_10MB_MACCLK_MASK
;
8175 val
|= CPMU_LSPD_10MB_MACCLK_6_25
;
8176 tw32(TG3_CPMU_LSPD_10MB_CLK
, val
);
8179 /* This works around an issue with Athlon chipsets on
8180 * B3 tigon3 silicon. This bit has no effect on any
8181 * other revision. But do not set this on PCI Express
8182 * chips and don't even touch the clocks if the CPMU is present.
8184 if (!tg3_flag(tp
, CPMU_PRESENT
)) {
8185 if (!tg3_flag(tp
, PCI_EXPRESS
))
8186 tp
->pci_clock_ctrl
|= CLOCK_CTRL_DELAY_PCI_GRANT
;
8187 tw32_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
);
8190 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5704_A0
&&
8191 tg3_flag(tp
, PCIX_MODE
)) {
8192 val
= tr32(TG3PCI_PCISTATE
);
8193 val
|= PCISTATE_RETRY_SAME_DMA
;
8194 tw32(TG3PCI_PCISTATE
, val
);
8197 if (tg3_flag(tp
, ENABLE_APE
)) {
8198 /* Allow reads and writes to the
8199 * APE register and memory space.
8201 val
= tr32(TG3PCI_PCISTATE
);
8202 val
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
8203 PCISTATE_ALLOW_APE_SHMEM_WR
|
8204 PCISTATE_ALLOW_APE_PSPACE_WR
;
8205 tw32(TG3PCI_PCISTATE
, val
);
8208 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5704_BX
) {
8209 /* Enable some hw fixes. */
8210 val
= tr32(TG3PCI_MSI_DATA
);
8211 val
|= (1 << 26) | (1 << 28) | (1 << 29);
8212 tw32(TG3PCI_MSI_DATA
, val
);
8215 /* Descriptor ring init may make accesses to the
8216 * NIC SRAM area to setup the TX descriptors, so we
8217 * can only do this after the hardware has been
8218 * successfully reset.
8220 err
= tg3_init_rings(tp
);
8224 if (tg3_flag(tp
, 57765_PLUS
)) {
8225 val
= tr32(TG3PCI_DMA_RW_CTRL
) &
8226 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT
;
8227 if (tp
->pci_chip_rev_id
== CHIPREV_ID_57765_A0
)
8228 val
&= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK
;
8229 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_57765
&&
8230 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5717
)
8231 val
|= DMA_RWCTRL_TAGGED_STAT_WA
;
8232 tw32(TG3PCI_DMA_RW_CTRL
, val
| tp
->dma_rwctrl
);
8233 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5784
&&
8234 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5761
) {
8235 /* This value is determined during the probe time DMA
8236 * engine test, tg3_test_dma.
8238 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
8241 tp
->grc_mode
&= ~(GRC_MODE_HOST_SENDBDS
|
8242 GRC_MODE_4X_NIC_SEND_RINGS
|
8243 GRC_MODE_NO_TX_PHDR_CSUM
|
8244 GRC_MODE_NO_RX_PHDR_CSUM
);
8245 tp
->grc_mode
|= GRC_MODE_HOST_SENDBDS
;
8247 /* Pseudo-header checksum is done by hardware logic and not
8248 * the offload processers, so make the chip do the pseudo-
8249 * header checksums on receive. For transmit it is more
8250 * convenient to do the pseudo-header checksum in software
8251 * as Linux does that on transmit for us in all cases.
8253 tp
->grc_mode
|= GRC_MODE_NO_TX_PHDR_CSUM
;
8257 (GRC_MODE_IRQ_ON_MAC_ATTN
| GRC_MODE_HOST_STACKUP
));
8259 /* Setup the timer prescalar register. Clock is always 66Mhz. */
8260 val
= tr32(GRC_MISC_CFG
);
8262 val
|= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT
);
8263 tw32(GRC_MISC_CFG
, val
);
8265 /* Initialize MBUF/DESC pool. */
8266 if (tg3_flag(tp
, 5750_PLUS
)) {
8268 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5705
) {
8269 tw32(BUFMGR_MB_POOL_ADDR
, NIC_SRAM_MBUF_POOL_BASE
);
8270 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
)
8271 tw32(BUFMGR_MB_POOL_SIZE
, NIC_SRAM_MBUF_POOL_SIZE64
);
8273 tw32(BUFMGR_MB_POOL_SIZE
, NIC_SRAM_MBUF_POOL_SIZE96
);
8274 tw32(BUFMGR_DMA_DESC_POOL_ADDR
, NIC_SRAM_DMA_DESC_POOL_BASE
);
8275 tw32(BUFMGR_DMA_DESC_POOL_SIZE
, NIC_SRAM_DMA_DESC_POOL_SIZE
);
8276 } else if (tg3_flag(tp
, TSO_CAPABLE
)) {
8279 fw_len
= tp
->fw_len
;
8280 fw_len
= (fw_len
+ (0x80 - 1)) & ~(0x80 - 1);
8281 tw32(BUFMGR_MB_POOL_ADDR
,
8282 NIC_SRAM_MBUF_POOL_BASE5705
+ fw_len
);
8283 tw32(BUFMGR_MB_POOL_SIZE
,
8284 NIC_SRAM_MBUF_POOL_SIZE5705
- fw_len
- 0xa00);
8287 if (tp
->dev
->mtu
<= ETH_DATA_LEN
) {
8288 tw32(BUFMGR_MB_RDMA_LOW_WATER
,
8289 tp
->bufmgr_config
.mbuf_read_dma_low_water
);
8290 tw32(BUFMGR_MB_MACRX_LOW_WATER
,
8291 tp
->bufmgr_config
.mbuf_mac_rx_low_water
);
8292 tw32(BUFMGR_MB_HIGH_WATER
,
8293 tp
->bufmgr_config
.mbuf_high_water
);
8295 tw32(BUFMGR_MB_RDMA_LOW_WATER
,
8296 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
);
8297 tw32(BUFMGR_MB_MACRX_LOW_WATER
,
8298 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
);
8299 tw32(BUFMGR_MB_HIGH_WATER
,
8300 tp
->bufmgr_config
.mbuf_high_water_jumbo
);
8302 tw32(BUFMGR_DMA_LOW_WATER
,
8303 tp
->bufmgr_config
.dma_low_water
);
8304 tw32(BUFMGR_DMA_HIGH_WATER
,
8305 tp
->bufmgr_config
.dma_high_water
);
8307 val
= BUFMGR_MODE_ENABLE
| BUFMGR_MODE_ATTN_ENABLE
;
8308 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
)
8309 val
|= BUFMGR_MODE_NO_TX_UNDERRUN
;
8310 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
8311 tp
->pci_chip_rev_id
== CHIPREV_ID_5719_A0
||
8312 tp
->pci_chip_rev_id
== CHIPREV_ID_5720_A0
)
8313 val
|= BUFMGR_MODE_MBLOW_ATTN_ENAB
;
8314 tw32(BUFMGR_MODE
, val
);
8315 for (i
= 0; i
< 2000; i
++) {
8316 if (tr32(BUFMGR_MODE
) & BUFMGR_MODE_ENABLE
)
8321 netdev_err(tp
->dev
, "%s cannot enable BUFMGR\n", __func__
);
8325 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5906_A1
)
8326 tw32(ISO_PKT_TX
, (tr32(ISO_PKT_TX
) & ~0x3) | 0x2);
8328 tg3_setup_rxbd_thresholds(tp
);
8330 /* Initialize TG3_BDINFO's at:
8331 * RCVDBDI_STD_BD: standard eth size rx ring
8332 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
8333 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
8336 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
8337 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
8338 * ring attribute flags
8339 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
8341 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8342 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8344 * The size of each ring is fixed in the firmware, but the location is
8347 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
8348 ((u64
) tpr
->rx_std_mapping
>> 32));
8349 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
8350 ((u64
) tpr
->rx_std_mapping
& 0xffffffff));
8351 if (!tg3_flag(tp
, 5717_PLUS
))
8352 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_NIC_ADDR
,
8353 NIC_SRAM_RX_BUFFER_DESC
);
8355 /* Disable the mini ring */
8356 if (!tg3_flag(tp
, 5705_PLUS
))
8357 tw32(RCVDBDI_MINI_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
8358 BDINFO_FLAGS_DISABLED
);
8360 /* Program the jumbo buffer descriptor ring control
8361 * blocks on those devices that have them.
8363 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
8364 (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
))) {
8366 if (tg3_flag(tp
, JUMBO_RING_ENABLE
)) {
8367 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
8368 ((u64
) tpr
->rx_jmb_mapping
>> 32));
8369 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
8370 ((u64
) tpr
->rx_jmb_mapping
& 0xffffffff));
8371 val
= TG3_RX_JMB_RING_SIZE(tp
) <<
8372 BDINFO_FLAGS_MAXLEN_SHIFT
;
8373 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
8374 val
| BDINFO_FLAGS_USE_EXT_RECV
);
8375 if (!tg3_flag(tp
, USE_JUMBO_BDFLAG
) ||
8376 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
8377 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_NIC_ADDR
,
8378 NIC_SRAM_RX_JUMBO_BUFFER_DESC
);
8380 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
8381 BDINFO_FLAGS_DISABLED
);
8384 if (tg3_flag(tp
, 57765_PLUS
)) {
8385 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
8386 val
= TG3_RX_STD_MAX_SIZE_5700
;
8388 val
= TG3_RX_STD_MAX_SIZE_5717
;
8389 val
<<= BDINFO_FLAGS_MAXLEN_SHIFT
;
8390 val
|= (TG3_RX_STD_DMA_SZ
<< 2);
8392 val
= TG3_RX_STD_DMA_SZ
<< BDINFO_FLAGS_MAXLEN_SHIFT
;
8394 val
= TG3_RX_STD_MAX_SIZE_5700
<< BDINFO_FLAGS_MAXLEN_SHIFT
;
8396 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_MAXLEN_FLAGS
, val
);
8398 tpr
->rx_std_prod_idx
= tp
->rx_pending
;
8399 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
, tpr
->rx_std_prod_idx
);
8401 tpr
->rx_jmb_prod_idx
=
8402 tg3_flag(tp
, JUMBO_RING_ENABLE
) ? tp
->rx_jumbo_pending
: 0;
8403 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
, tpr
->rx_jmb_prod_idx
);
8405 tg3_rings_reset(tp
);
8407 /* Initialize MAC address and backoff seed. */
8408 __tg3_set_mac_addr(tp
, 0);
8410 /* MTU + ethernet header + FCS + optional VLAN tag */
8411 tw32(MAC_RX_MTU_SIZE
,
8412 tp
->dev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
+ VLAN_HLEN
);
8414 /* The slot time is changed by tg3_setup_phy if we
8415 * run at gigabit with half duplex.
8417 val
= (2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
8418 (6 << TX_LENGTHS_IPG_SHIFT
) |
8419 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
);
8421 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
8422 val
|= tr32(MAC_TX_LENGTHS
) &
8423 (TX_LENGTHS_JMB_FRM_LEN_MSK
|
8424 TX_LENGTHS_CNT_DWN_VAL_MSK
);
8426 tw32(MAC_TX_LENGTHS
, val
);
8428 /* Receive rules. */
8429 tw32(MAC_RCV_RULE_CFG
, RCV_RULE_CFG_DEFAULT_CLASS
);
8430 tw32(RCVLPC_CONFIG
, 0x0181);
8432 /* Calculate RDMAC_MODE setting early, we need it to determine
8433 * the RCVLPC_STATE_ENABLE mask.
8435 rdmac_mode
= (RDMAC_MODE_ENABLE
| RDMAC_MODE_TGTABORT_ENAB
|
8436 RDMAC_MODE_MSTABORT_ENAB
| RDMAC_MODE_PARITYERR_ENAB
|
8437 RDMAC_MODE_ADDROFLOW_ENAB
| RDMAC_MODE_FIFOOFLOW_ENAB
|
8438 RDMAC_MODE_FIFOURUN_ENAB
| RDMAC_MODE_FIFOOREAD_ENAB
|
8439 RDMAC_MODE_LNGREAD_ENAB
);
8441 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
)
8442 rdmac_mode
|= RDMAC_MODE_MULT_DMA_RD_DIS
;
8444 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
8445 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
8446 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
)
8447 rdmac_mode
|= RDMAC_MODE_BD_SBD_CRPT_ENAB
|
8448 RDMAC_MODE_MBUF_RBD_CRPT_ENAB
|
8449 RDMAC_MODE_MBUF_SBD_CRPT_ENAB
;
8451 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
8452 tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A0
) {
8453 if (tg3_flag(tp
, TSO_CAPABLE
) &&
8454 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
8455 rdmac_mode
|= RDMAC_MODE_FIFO_SIZE_128
;
8456 } else if (!(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
) &&
8457 !tg3_flag(tp
, IS_5788
)) {
8458 rdmac_mode
|= RDMAC_MODE_FIFO_LONG_BURST
;
8462 if (tg3_flag(tp
, PCI_EXPRESS
))
8463 rdmac_mode
|= RDMAC_MODE_FIFO_LONG_BURST
;
8465 if (tg3_flag(tp
, HW_TSO_1
) ||
8466 tg3_flag(tp
, HW_TSO_2
) ||
8467 tg3_flag(tp
, HW_TSO_3
))
8468 rdmac_mode
|= RDMAC_MODE_IPV4_LSO_EN
;
8470 if (tg3_flag(tp
, 57765_PLUS
) ||
8471 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
8472 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
)
8473 rdmac_mode
|= RDMAC_MODE_IPV6_LSO_EN
;
8475 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
8476 rdmac_mode
|= tr32(RDMAC_MODE
) & RDMAC_MODE_H2BNC_VLAN_DET
;
8478 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
||
8479 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
8480 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
8481 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
||
8482 tg3_flag(tp
, 57765_PLUS
)) {
8483 val
= tr32(TG3_RDMA_RSRVCTRL_REG
);
8484 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
8485 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
8486 val
&= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK
|
8487 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK
|
8488 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK
);
8489 val
|= TG3_RDMA_RSRVCTRL_TXMRGN_320B
|
8490 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K
|
8491 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K
;
8493 tw32(TG3_RDMA_RSRVCTRL_REG
,
8494 val
| TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX
);
8497 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
8498 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
8499 val
= tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL
);
8500 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL
, val
|
8501 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K
|
8502 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K
);
8505 /* Receive/send statistics. */
8506 if (tg3_flag(tp
, 5750_PLUS
)) {
8507 val
= tr32(RCVLPC_STATS_ENABLE
);
8508 val
&= ~RCVLPC_STATSENAB_DACK_FIX
;
8509 tw32(RCVLPC_STATS_ENABLE
, val
);
8510 } else if ((rdmac_mode
& RDMAC_MODE_FIFO_SIZE_128
) &&
8511 tg3_flag(tp
, TSO_CAPABLE
)) {
8512 val
= tr32(RCVLPC_STATS_ENABLE
);
8513 val
&= ~RCVLPC_STATSENAB_LNGBRST_RFIX
;
8514 tw32(RCVLPC_STATS_ENABLE
, val
);
8516 tw32(RCVLPC_STATS_ENABLE
, 0xffffff);
8518 tw32(RCVLPC_STATSCTRL
, RCVLPC_STATSCTRL_ENABLE
);
8519 tw32(SNDDATAI_STATSENAB
, 0xffffff);
8520 tw32(SNDDATAI_STATSCTRL
,
8521 (SNDDATAI_SCTRL_ENABLE
|
8522 SNDDATAI_SCTRL_FASTUPD
));
8524 /* Setup host coalescing engine. */
8525 tw32(HOSTCC_MODE
, 0);
8526 for (i
= 0; i
< 2000; i
++) {
8527 if (!(tr32(HOSTCC_MODE
) & HOSTCC_MODE_ENABLE
))
8532 __tg3_set_coalesce(tp
, &tp
->coal
);
8534 if (!tg3_flag(tp
, 5705_PLUS
)) {
8535 /* Status/statistics block address. See tg3_timer,
8536 * the tg3_periodic_fetch_stats call there, and
8537 * tg3_get_stats to see how this works for 5705/5750 chips.
8539 tw32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
8540 ((u64
) tp
->stats_mapping
>> 32));
8541 tw32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
8542 ((u64
) tp
->stats_mapping
& 0xffffffff));
8543 tw32(HOSTCC_STATS_BLK_NIC_ADDR
, NIC_SRAM_STATS_BLK
);
8545 tw32(HOSTCC_STATUS_BLK_NIC_ADDR
, NIC_SRAM_STATUS_BLK
);
8547 /* Clear statistics and status block memory areas */
8548 for (i
= NIC_SRAM_STATS_BLK
;
8549 i
< NIC_SRAM_STATUS_BLK
+ TG3_HW_STATUS_SIZE
;
8551 tg3_write_mem(tp
, i
, 0);
8556 tw32(HOSTCC_MODE
, HOSTCC_MODE_ENABLE
| tp
->coalesce_mode
);
8558 tw32(RCVCC_MODE
, RCVCC_MODE_ENABLE
| RCVCC_MODE_ATTN_ENABLE
);
8559 tw32(RCVLPC_MODE
, RCVLPC_MODE_ENABLE
);
8560 if (!tg3_flag(tp
, 5705_PLUS
))
8561 tw32(RCVLSC_MODE
, RCVLSC_MODE_ENABLE
| RCVLSC_MODE_ATTN_ENABLE
);
8563 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) {
8564 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
8565 /* reset to prevent losing 1st rx packet intermittently */
8566 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
8570 tp
->mac_mode
|= MAC_MODE_TXSTAT_ENABLE
| MAC_MODE_RXSTAT_ENABLE
|
8571 MAC_MODE_TDE_ENABLE
| MAC_MODE_RDE_ENABLE
|
8572 MAC_MODE_FHDE_ENABLE
;
8573 if (tg3_flag(tp
, ENABLE_APE
))
8574 tp
->mac_mode
|= MAC_MODE_APE_TX_EN
| MAC_MODE_APE_RX_EN
;
8575 if (!tg3_flag(tp
, 5705_PLUS
) &&
8576 !(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
8577 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
)
8578 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
8579 tw32_f(MAC_MODE
, tp
->mac_mode
| MAC_MODE_RXSTAT_CLEAR
| MAC_MODE_TXSTAT_CLEAR
);
8582 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8583 * If TG3_FLAG_IS_NIC is zero, we should read the
8584 * register to preserve the GPIO settings for LOMs. The GPIOs,
8585 * whether used as inputs or outputs, are set by boot code after
8588 if (!tg3_flag(tp
, IS_NIC
)) {
8591 gpio_mask
= GRC_LCLCTRL_GPIO_OE0
| GRC_LCLCTRL_GPIO_OE1
|
8592 GRC_LCLCTRL_GPIO_OE2
| GRC_LCLCTRL_GPIO_OUTPUT0
|
8593 GRC_LCLCTRL_GPIO_OUTPUT1
| GRC_LCLCTRL_GPIO_OUTPUT2
;
8595 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
)
8596 gpio_mask
|= GRC_LCLCTRL_GPIO_OE3
|
8597 GRC_LCLCTRL_GPIO_OUTPUT3
;
8599 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
)
8600 gpio_mask
|= GRC_LCLCTRL_GPIO_UART_SEL
;
8602 tp
->grc_local_ctrl
&= ~gpio_mask
;
8603 tp
->grc_local_ctrl
|= tr32(GRC_LOCAL_CTRL
) & gpio_mask
;
8605 /* GPIO1 must be driven high for eeprom write protect */
8606 if (tg3_flag(tp
, EEPROM_WRITE_PROT
))
8607 tp
->grc_local_ctrl
|= (GRC_LCLCTRL_GPIO_OE1
|
8608 GRC_LCLCTRL_GPIO_OUTPUT1
);
8610 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
8613 if (tg3_flag(tp
, USING_MSIX
) && tp
->irq_cnt
> 1) {
8614 val
= tr32(MSGINT_MODE
);
8615 val
|= MSGINT_MODE_MULTIVEC_EN
| MSGINT_MODE_ENABLE
;
8616 tw32(MSGINT_MODE
, val
);
8619 if (!tg3_flag(tp
, 5705_PLUS
)) {
8620 tw32_f(DMAC_MODE
, DMAC_MODE_ENABLE
);
8624 val
= (WDMAC_MODE_ENABLE
| WDMAC_MODE_TGTABORT_ENAB
|
8625 WDMAC_MODE_MSTABORT_ENAB
| WDMAC_MODE_PARITYERR_ENAB
|
8626 WDMAC_MODE_ADDROFLOW_ENAB
| WDMAC_MODE_FIFOOFLOW_ENAB
|
8627 WDMAC_MODE_FIFOURUN_ENAB
| WDMAC_MODE_FIFOOREAD_ENAB
|
8628 WDMAC_MODE_LNGREAD_ENAB
);
8630 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
8631 tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A0
) {
8632 if (tg3_flag(tp
, TSO_CAPABLE
) &&
8633 (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A1
||
8634 tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A2
)) {
8636 } else if (!(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
) &&
8637 !tg3_flag(tp
, IS_5788
)) {
8638 val
|= WDMAC_MODE_RX_ACCEL
;
8642 /* Enable host coalescing bug fix */
8643 if (tg3_flag(tp
, 5755_PLUS
))
8644 val
|= WDMAC_MODE_STATUS_TAG_FIX
;
8646 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
)
8647 val
|= WDMAC_MODE_BURST_ALL_DATA
;
8649 tw32_f(WDMAC_MODE
, val
);
8652 if (tg3_flag(tp
, PCIX_MODE
)) {
8655 pci_read_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
8657 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
) {
8658 pcix_cmd
&= ~PCI_X_CMD_MAX_READ
;
8659 pcix_cmd
|= PCI_X_CMD_READ_2K
;
8660 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
8661 pcix_cmd
&= ~(PCI_X_CMD_MAX_SPLIT
| PCI_X_CMD_MAX_READ
);
8662 pcix_cmd
|= PCI_X_CMD_READ_2K
;
8664 pci_write_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
8668 tw32_f(RDMAC_MODE
, rdmac_mode
);
8671 tw32(RCVDCC_MODE
, RCVDCC_MODE_ENABLE
| RCVDCC_MODE_ATTN_ENABLE
);
8672 if (!tg3_flag(tp
, 5705_PLUS
))
8673 tw32(MBFREE_MODE
, MBFREE_MODE_ENABLE
);
8675 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
8677 SNDDATAC_MODE_ENABLE
| SNDDATAC_MODE_CDELAY
);
8679 tw32(SNDDATAC_MODE
, SNDDATAC_MODE_ENABLE
);
8681 tw32(SNDBDC_MODE
, SNDBDC_MODE_ENABLE
| SNDBDC_MODE_ATTN_ENABLE
);
8682 tw32(RCVBDI_MODE
, RCVBDI_MODE_ENABLE
| RCVBDI_MODE_RCB_ATTN_ENAB
);
8683 val
= RCVDBDI_MODE_ENABLE
| RCVDBDI_MODE_INV_RING_SZ
;
8684 if (tg3_flag(tp
, LRG_PROD_RING_CAP
))
8685 val
|= RCVDBDI_MODE_LRG_RING_SZ
;
8686 tw32(RCVDBDI_MODE
, val
);
8687 tw32(SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
);
8688 if (tg3_flag(tp
, HW_TSO_1
) ||
8689 tg3_flag(tp
, HW_TSO_2
) ||
8690 tg3_flag(tp
, HW_TSO_3
))
8691 tw32(SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
| 0x8);
8692 val
= SNDBDI_MODE_ENABLE
| SNDBDI_MODE_ATTN_ENABLE
;
8693 if (tg3_flag(tp
, ENABLE_TSS
))
8694 val
|= SNDBDI_MODE_MULTI_TXQ_EN
;
8695 tw32(SNDBDI_MODE
, val
);
8696 tw32(SNDBDS_MODE
, SNDBDS_MODE_ENABLE
| SNDBDS_MODE_ATTN_ENABLE
);
8698 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
) {
8699 err
= tg3_load_5701_a0_firmware_fix(tp
);
8704 if (tg3_flag(tp
, TSO_CAPABLE
)) {
8705 err
= tg3_load_tso_firmware(tp
);
8710 tp
->tx_mode
= TX_MODE_ENABLE
;
8712 if (tg3_flag(tp
, 5755_PLUS
) ||
8713 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
8714 tp
->tx_mode
|= TX_MODE_MBUF_LOCKUP_FIX
;
8716 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
8717 val
= TX_MODE_JMB_FRM_LEN
| TX_MODE_CNT_DN_MODE
;
8718 tp
->tx_mode
&= ~val
;
8719 tp
->tx_mode
|= tr32(MAC_TX_MODE
) & val
;
8722 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
8725 if (tg3_flag(tp
, ENABLE_RSS
)) {
8727 u32 reg
= MAC_RSS_INDIR_TBL_0
;
8729 if (tp
->irq_cnt
== 2) {
8730 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
+= 8) {
8737 while (i
< TG3_RSS_INDIR_TBL_SIZE
) {
8738 val
= i
% (tp
->irq_cnt
- 1);
8740 for (; i
% 8; i
++) {
8742 val
|= (i
% (tp
->irq_cnt
- 1));
8749 /* Setup the "secret" hash key. */
8750 tw32(MAC_RSS_HASH_KEY_0
, 0x5f865437);
8751 tw32(MAC_RSS_HASH_KEY_1
, 0xe4ac62cc);
8752 tw32(MAC_RSS_HASH_KEY_2
, 0x50103a45);
8753 tw32(MAC_RSS_HASH_KEY_3
, 0x36621985);
8754 tw32(MAC_RSS_HASH_KEY_4
, 0xbf14c0e8);
8755 tw32(MAC_RSS_HASH_KEY_5
, 0x1bc27a1e);
8756 tw32(MAC_RSS_HASH_KEY_6
, 0x84f4b556);
8757 tw32(MAC_RSS_HASH_KEY_7
, 0x094ea6fe);
8758 tw32(MAC_RSS_HASH_KEY_8
, 0x7dda01e7);
8759 tw32(MAC_RSS_HASH_KEY_9
, 0xc04d7481);
8762 tp
->rx_mode
= RX_MODE_ENABLE
;
8763 if (tg3_flag(tp
, 5755_PLUS
))
8764 tp
->rx_mode
|= RX_MODE_IPV6_CSUM_ENABLE
;
8766 if (tg3_flag(tp
, ENABLE_RSS
))
8767 tp
->rx_mode
|= RX_MODE_RSS_ENABLE
|
8768 RX_MODE_RSS_ITBL_HASH_BITS_7
|
8769 RX_MODE_RSS_IPV6_HASH_EN
|
8770 RX_MODE_RSS_TCP_IPV6_HASH_EN
|
8771 RX_MODE_RSS_IPV4_HASH_EN
|
8772 RX_MODE_RSS_TCP_IPV4_HASH_EN
;
8774 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
8777 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
8779 tw32(MAC_MI_STAT
, MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
8780 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
8781 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
8784 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
8787 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
8788 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) &&
8789 !(tp
->phy_flags
& TG3_PHYFLG_SERDES_PREEMPHASIS
)) {
8790 /* Set drive transmission level to 1.2V */
8791 /* only if the signal pre-emphasis bit is not set */
8792 val
= tr32(MAC_SERDES_CFG
);
8795 tw32(MAC_SERDES_CFG
, val
);
8797 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5703_A1
)
8798 tw32(MAC_SERDES_CFG
, 0x616000);
8801 /* Prevent chip from dropping frames when flow control
8804 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
8808 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME
, val
);
8810 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
&&
8811 (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
8812 /* Use hardware link auto-negotiation */
8813 tg3_flag_set(tp
, HW_AUTONEG
);
8816 if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
8817 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
) {
8820 tmp
= tr32(SERDES_RX_CTRL
);
8821 tw32(SERDES_RX_CTRL
, tmp
| SERDES_RX_SIG_DETECT
);
8822 tp
->grc_local_ctrl
&= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT
;
8823 tp
->grc_local_ctrl
|= GRC_LCLCTRL_USE_SIG_DETECT
;
8824 tw32(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
8827 if (!tg3_flag(tp
, USE_PHYLIB
)) {
8828 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) {
8829 tp
->phy_flags
&= ~TG3_PHYFLG_IS_LOW_POWER
;
8830 tp
->link_config
.speed
= tp
->link_config
.orig_speed
;
8831 tp
->link_config
.duplex
= tp
->link_config
.orig_duplex
;
8832 tp
->link_config
.autoneg
= tp
->link_config
.orig_autoneg
;
8835 err
= tg3_setup_phy(tp
, 0);
8839 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
8840 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
8843 /* Clear CRC stats. */
8844 if (!tg3_readphy(tp
, MII_TG3_TEST1
, &tmp
)) {
8845 tg3_writephy(tp
, MII_TG3_TEST1
,
8846 tmp
| MII_TG3_TEST1_CRC_EN
);
8847 tg3_readphy(tp
, MII_TG3_RXR_COUNTERS
, &tmp
);
8852 __tg3_set_rx_mode(tp
->dev
);
8854 /* Initialize receive rules. */
8855 tw32(MAC_RCV_RULE_0
, 0xc2000000 & RCV_RULE_DISABLE_MASK
);
8856 tw32(MAC_RCV_VALUE_0
, 0xffffffff & RCV_RULE_DISABLE_MASK
);
8857 tw32(MAC_RCV_RULE_1
, 0x86000004 & RCV_RULE_DISABLE_MASK
);
8858 tw32(MAC_RCV_VALUE_1
, 0xffffffff & RCV_RULE_DISABLE_MASK
);
8860 if (tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, 5780_CLASS
))
8864 if (tg3_flag(tp
, ENABLE_ASF
))
8868 tw32(MAC_RCV_RULE_15
, 0); tw32(MAC_RCV_VALUE_15
, 0);
8870 tw32(MAC_RCV_RULE_14
, 0); tw32(MAC_RCV_VALUE_14
, 0);
8872 tw32(MAC_RCV_RULE_13
, 0); tw32(MAC_RCV_VALUE_13
, 0);
8874 tw32(MAC_RCV_RULE_12
, 0); tw32(MAC_RCV_VALUE_12
, 0);
8876 tw32(MAC_RCV_RULE_11
, 0); tw32(MAC_RCV_VALUE_11
, 0);
8878 tw32(MAC_RCV_RULE_10
, 0); tw32(MAC_RCV_VALUE_10
, 0);
8880 tw32(MAC_RCV_RULE_9
, 0); tw32(MAC_RCV_VALUE_9
, 0);
8882 tw32(MAC_RCV_RULE_8
, 0); tw32(MAC_RCV_VALUE_8
, 0);
8884 tw32(MAC_RCV_RULE_7
, 0); tw32(MAC_RCV_VALUE_7
, 0);
8886 tw32(MAC_RCV_RULE_6
, 0); tw32(MAC_RCV_VALUE_6
, 0);
8888 tw32(MAC_RCV_RULE_5
, 0); tw32(MAC_RCV_VALUE_5
, 0);
8890 tw32(MAC_RCV_RULE_4
, 0); tw32(MAC_RCV_VALUE_4
, 0);
8892 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
8894 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
8902 if (tg3_flag(tp
, ENABLE_APE
))
8903 /* Write our heartbeat update interval to APE. */
8904 tg3_ape_write32(tp
, TG3_APE_HOST_HEARTBEAT_INT_MS
,
8905 APE_HOST_HEARTBEAT_INT_DISABLE
);
8907 tg3_write_sig_post_reset(tp
, RESET_KIND_INIT
);
8912 /* Called at device open time to get the chip ready for
8913 * packet processing. Invoked with tp->lock held.
8915 static int tg3_init_hw(struct tg3
*tp
, int reset_phy
)
8917 tg3_switch_clocks(tp
);
8919 tw32(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
8921 return tg3_reset_hw(tp
, reset_phy
);
8924 #define TG3_STAT_ADD32(PSTAT, REG) \
8925 do { u32 __val = tr32(REG); \
8926 (PSTAT)->low += __val; \
8927 if ((PSTAT)->low < __val) \
8928 (PSTAT)->high += 1; \
8931 static void tg3_periodic_fetch_stats(struct tg3
*tp
)
8933 struct tg3_hw_stats
*sp
= tp
->hw_stats
;
8935 if (!netif_carrier_ok(tp
->dev
))
8938 TG3_STAT_ADD32(&sp
->tx_octets
, MAC_TX_STATS_OCTETS
);
8939 TG3_STAT_ADD32(&sp
->tx_collisions
, MAC_TX_STATS_COLLISIONS
);
8940 TG3_STAT_ADD32(&sp
->tx_xon_sent
, MAC_TX_STATS_XON_SENT
);
8941 TG3_STAT_ADD32(&sp
->tx_xoff_sent
, MAC_TX_STATS_XOFF_SENT
);
8942 TG3_STAT_ADD32(&sp
->tx_mac_errors
, MAC_TX_STATS_MAC_ERRORS
);
8943 TG3_STAT_ADD32(&sp
->tx_single_collisions
, MAC_TX_STATS_SINGLE_COLLISIONS
);
8944 TG3_STAT_ADD32(&sp
->tx_mult_collisions
, MAC_TX_STATS_MULT_COLLISIONS
);
8945 TG3_STAT_ADD32(&sp
->tx_deferred
, MAC_TX_STATS_DEFERRED
);
8946 TG3_STAT_ADD32(&sp
->tx_excessive_collisions
, MAC_TX_STATS_EXCESSIVE_COL
);
8947 TG3_STAT_ADD32(&sp
->tx_late_collisions
, MAC_TX_STATS_LATE_COL
);
8948 TG3_STAT_ADD32(&sp
->tx_ucast_packets
, MAC_TX_STATS_UCAST
);
8949 TG3_STAT_ADD32(&sp
->tx_mcast_packets
, MAC_TX_STATS_MCAST
);
8950 TG3_STAT_ADD32(&sp
->tx_bcast_packets
, MAC_TX_STATS_BCAST
);
8952 TG3_STAT_ADD32(&sp
->rx_octets
, MAC_RX_STATS_OCTETS
);
8953 TG3_STAT_ADD32(&sp
->rx_fragments
, MAC_RX_STATS_FRAGMENTS
);
8954 TG3_STAT_ADD32(&sp
->rx_ucast_packets
, MAC_RX_STATS_UCAST
);
8955 TG3_STAT_ADD32(&sp
->rx_mcast_packets
, MAC_RX_STATS_MCAST
);
8956 TG3_STAT_ADD32(&sp
->rx_bcast_packets
, MAC_RX_STATS_BCAST
);
8957 TG3_STAT_ADD32(&sp
->rx_fcs_errors
, MAC_RX_STATS_FCS_ERRORS
);
8958 TG3_STAT_ADD32(&sp
->rx_align_errors
, MAC_RX_STATS_ALIGN_ERRORS
);
8959 TG3_STAT_ADD32(&sp
->rx_xon_pause_rcvd
, MAC_RX_STATS_XON_PAUSE_RECVD
);
8960 TG3_STAT_ADD32(&sp
->rx_xoff_pause_rcvd
, MAC_RX_STATS_XOFF_PAUSE_RECVD
);
8961 TG3_STAT_ADD32(&sp
->rx_mac_ctrl_rcvd
, MAC_RX_STATS_MAC_CTRL_RECVD
);
8962 TG3_STAT_ADD32(&sp
->rx_xoff_entered
, MAC_RX_STATS_XOFF_ENTERED
);
8963 TG3_STAT_ADD32(&sp
->rx_frame_too_long_errors
, MAC_RX_STATS_FRAME_TOO_LONG
);
8964 TG3_STAT_ADD32(&sp
->rx_jabbers
, MAC_RX_STATS_JABBERS
);
8965 TG3_STAT_ADD32(&sp
->rx_undersize_packets
, MAC_RX_STATS_UNDERSIZE
);
8967 TG3_STAT_ADD32(&sp
->rxbds_empty
, RCVLPC_NO_RCV_BD_CNT
);
8968 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5717
&&
8969 tp
->pci_chip_rev_id
!= CHIPREV_ID_5719_A0
&&
8970 tp
->pci_chip_rev_id
!= CHIPREV_ID_5720_A0
) {
8971 TG3_STAT_ADD32(&sp
->rx_discards
, RCVLPC_IN_DISCARDS_CNT
);
8973 u32 val
= tr32(HOSTCC_FLOW_ATTN
);
8974 val
= (val
& HOSTCC_FLOW_ATTN_MBUF_LWM
) ? 1 : 0;
8976 tw32(HOSTCC_FLOW_ATTN
, HOSTCC_FLOW_ATTN_MBUF_LWM
);
8977 sp
->rx_discards
.low
+= val
;
8978 if (sp
->rx_discards
.low
< val
)
8979 sp
->rx_discards
.high
+= 1;
8981 sp
->mbuf_lwm_thresh_hit
= sp
->rx_discards
;
8983 TG3_STAT_ADD32(&sp
->rx_errors
, RCVLPC_IN_ERRORS_CNT
);
8986 static void tg3_chk_missed_msi(struct tg3
*tp
)
8990 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
8991 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8993 if (tg3_has_work(tnapi
)) {
8994 if (tnapi
->last_rx_cons
== tnapi
->rx_rcb_ptr
&&
8995 tnapi
->last_tx_cons
== tnapi
->tx_cons
) {
8996 if (tnapi
->chk_msi_cnt
< 1) {
8997 tnapi
->chk_msi_cnt
++;
9000 tw32_mailbox(tnapi
->int_mbox
,
9001 tnapi
->last_tag
<< 24);
9004 tnapi
->chk_msi_cnt
= 0;
9005 tnapi
->last_rx_cons
= tnapi
->rx_rcb_ptr
;
9006 tnapi
->last_tx_cons
= tnapi
->tx_cons
;
9010 static void tg3_timer(unsigned long __opaque
)
9012 struct tg3
*tp
= (struct tg3
*) __opaque
;
9017 spin_lock(&tp
->lock
);
9019 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
9020 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
9021 tg3_chk_missed_msi(tp
);
9023 if (!tg3_flag(tp
, TAGGED_STATUS
)) {
9024 /* All of this garbage is because when using non-tagged
9025 * IRQ status the mailbox/status_block protocol the chip
9026 * uses with the cpu is race prone.
9028 if (tp
->napi
[0].hw_status
->status
& SD_STATUS_UPDATED
) {
9029 tw32(GRC_LOCAL_CTRL
,
9030 tp
->grc_local_ctrl
| GRC_LCLCTRL_SETINT
);
9032 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
9033 HOSTCC_MODE_ENABLE
| HOSTCC_MODE_NOW
);
9036 if (!(tr32(WDMAC_MODE
) & WDMAC_MODE_ENABLE
)) {
9037 tg3_flag_set(tp
, RESTART_TIMER
);
9038 spin_unlock(&tp
->lock
);
9039 schedule_work(&tp
->reset_task
);
9044 /* This part only runs once per second. */
9045 if (!--tp
->timer_counter
) {
9046 if (tg3_flag(tp
, 5705_PLUS
))
9047 tg3_periodic_fetch_stats(tp
);
9049 if (tp
->setlpicnt
&& !--tp
->setlpicnt
)
9050 tg3_phy_eee_enable(tp
);
9052 if (tg3_flag(tp
, USE_LINKCHG_REG
)) {
9056 mac_stat
= tr32(MAC_STATUS
);
9059 if (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
) {
9060 if (mac_stat
& MAC_STATUS_MI_INTERRUPT
)
9062 } else if (mac_stat
& MAC_STATUS_LNKSTATE_CHANGED
)
9066 tg3_setup_phy(tp
, 0);
9067 } else if (tg3_flag(tp
, POLL_SERDES
)) {
9068 u32 mac_stat
= tr32(MAC_STATUS
);
9071 if (netif_carrier_ok(tp
->dev
) &&
9072 (mac_stat
& MAC_STATUS_LNKSTATE_CHANGED
)) {
9075 if (!netif_carrier_ok(tp
->dev
) &&
9076 (mac_stat
& (MAC_STATUS_PCS_SYNCED
|
9077 MAC_STATUS_SIGNAL_DET
))) {
9081 if (!tp
->serdes_counter
) {
9084 ~MAC_MODE_PORT_MODE_MASK
));
9086 tw32_f(MAC_MODE
, tp
->mac_mode
);
9089 tg3_setup_phy(tp
, 0);
9091 } else if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
9092 tg3_flag(tp
, 5780_CLASS
)) {
9093 tg3_serdes_parallel_detect(tp
);
9096 tp
->timer_counter
= tp
->timer_multiplier
;
9099 /* Heartbeat is only sent once every 2 seconds.
9101 * The heartbeat is to tell the ASF firmware that the host
9102 * driver is still alive. In the event that the OS crashes,
9103 * ASF needs to reset the hardware to free up the FIFO space
9104 * that may be filled with rx packets destined for the host.
9105 * If the FIFO is full, ASF will no longer function properly.
9107 * Unintended resets have been reported on real time kernels
9108 * where the timer doesn't run on time. Netpoll will also have
9111 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9112 * to check the ring condition when the heartbeat is expiring
9113 * before doing the reset. This will prevent most unintended
9116 if (!--tp
->asf_counter
) {
9117 if (tg3_flag(tp
, ENABLE_ASF
) && !tg3_flag(tp
, ENABLE_APE
)) {
9118 tg3_wait_for_event_ack(tp
);
9120 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
,
9121 FWCMD_NICDRV_ALIVE3
);
9122 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_LEN_MBOX
, 4);
9123 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
,
9124 TG3_FW_UPDATE_TIMEOUT_SEC
);
9126 tg3_generate_fw_event(tp
);
9128 tp
->asf_counter
= tp
->asf_multiplier
;
9131 spin_unlock(&tp
->lock
);
9134 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
9135 add_timer(&tp
->timer
);
9138 static int tg3_request_irq(struct tg3
*tp
, int irq_num
)
9141 unsigned long flags
;
9143 struct tg3_napi
*tnapi
= &tp
->napi
[irq_num
];
9145 if (tp
->irq_cnt
== 1)
9146 name
= tp
->dev
->name
;
9148 name
= &tnapi
->irq_lbl
[0];
9149 snprintf(name
, IFNAMSIZ
, "%s-%d", tp
->dev
->name
, irq_num
);
9150 name
[IFNAMSIZ
-1] = 0;
9153 if (tg3_flag(tp
, USING_MSI
) || tg3_flag(tp
, USING_MSIX
)) {
9155 if (tg3_flag(tp
, 1SHOT_MSI
))
9160 if (tg3_flag(tp
, TAGGED_STATUS
))
9161 fn
= tg3_interrupt_tagged
;
9162 flags
= IRQF_SHARED
;
9165 return request_irq(tnapi
->irq_vec
, fn
, flags
, name
, tnapi
);
9168 static int tg3_test_interrupt(struct tg3
*tp
)
9170 struct tg3_napi
*tnapi
= &tp
->napi
[0];
9171 struct net_device
*dev
= tp
->dev
;
9172 int err
, i
, intr_ok
= 0;
9175 if (!netif_running(dev
))
9178 tg3_disable_ints(tp
);
9180 free_irq(tnapi
->irq_vec
, tnapi
);
9183 * Turn off MSI one shot mode. Otherwise this test has no
9184 * observable way to know whether the interrupt was delivered.
9186 if (tg3_flag(tp
, 57765_PLUS
)) {
9187 val
= tr32(MSGINT_MODE
) | MSGINT_MODE_ONE_SHOT_DISABLE
;
9188 tw32(MSGINT_MODE
, val
);
9191 err
= request_irq(tnapi
->irq_vec
, tg3_test_isr
,
9192 IRQF_SHARED
| IRQF_SAMPLE_RANDOM
, dev
->name
, tnapi
);
9196 tnapi
->hw_status
->status
&= ~SD_STATUS_UPDATED
;
9197 tg3_enable_ints(tp
);
9199 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
9202 for (i
= 0; i
< 5; i
++) {
9203 u32 int_mbox
, misc_host_ctrl
;
9205 int_mbox
= tr32_mailbox(tnapi
->int_mbox
);
9206 misc_host_ctrl
= tr32(TG3PCI_MISC_HOST_CTRL
);
9208 if ((int_mbox
!= 0) ||
9209 (misc_host_ctrl
& MISC_HOST_CTRL_MASK_PCI_INT
)) {
9214 if (tg3_flag(tp
, 57765_PLUS
) &&
9215 tnapi
->hw_status
->status_tag
!= tnapi
->last_tag
)
9216 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
9221 tg3_disable_ints(tp
);
9223 free_irq(tnapi
->irq_vec
, tnapi
);
9225 err
= tg3_request_irq(tp
, 0);
9231 /* Reenable MSI one shot mode. */
9232 if (tg3_flag(tp
, 57765_PLUS
)) {
9233 val
= tr32(MSGINT_MODE
) & ~MSGINT_MODE_ONE_SHOT_DISABLE
;
9234 tw32(MSGINT_MODE
, val
);
9242 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9243 * successfully restored
9245 static int tg3_test_msi(struct tg3
*tp
)
9250 if (!tg3_flag(tp
, USING_MSI
))
9253 /* Turn off SERR reporting in case MSI terminates with Master
9256 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
9257 pci_write_config_word(tp
->pdev
, PCI_COMMAND
,
9258 pci_cmd
& ~PCI_COMMAND_SERR
);
9260 err
= tg3_test_interrupt(tp
);
9262 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
9267 /* other failures */
9271 /* MSI test failed, go back to INTx mode */
9272 netdev_warn(tp
->dev
, "No interrupt was generated using MSI. Switching "
9273 "to INTx mode. Please report this failure to the PCI "
9274 "maintainer and include system chipset information\n");
9276 free_irq(tp
->napi
[0].irq_vec
, &tp
->napi
[0]);
9278 pci_disable_msi(tp
->pdev
);
9280 tg3_flag_clear(tp
, USING_MSI
);
9281 tp
->napi
[0].irq_vec
= tp
->pdev
->irq
;
9283 err
= tg3_request_irq(tp
, 0);
9287 /* Need to reset the chip because the MSI cycle may have terminated
9288 * with Master Abort.
9290 tg3_full_lock(tp
, 1);
9292 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
9293 err
= tg3_init_hw(tp
, 1);
9295 tg3_full_unlock(tp
);
9298 free_irq(tp
->napi
[0].irq_vec
, &tp
->napi
[0]);
9303 static int tg3_request_firmware(struct tg3
*tp
)
9305 const __be32
*fw_data
;
9307 if (request_firmware(&tp
->fw
, tp
->fw_needed
, &tp
->pdev
->dev
)) {
9308 netdev_err(tp
->dev
, "Failed to load firmware \"%s\"\n",
9313 fw_data
= (void *)tp
->fw
->data
;
9315 /* Firmware blob starts with version numbers, followed by
9316 * start address and _full_ length including BSS sections
9317 * (which must be longer than the actual data, of course
9320 tp
->fw_len
= be32_to_cpu(fw_data
[2]); /* includes bss */
9321 if (tp
->fw_len
< (tp
->fw
->size
- 12)) {
9322 netdev_err(tp
->dev
, "bogus length %d in \"%s\"\n",
9323 tp
->fw_len
, tp
->fw_needed
);
9324 release_firmware(tp
->fw
);
9329 /* We no longer need firmware; we have it. */
9330 tp
->fw_needed
= NULL
;
9334 static bool tg3_enable_msix(struct tg3
*tp
)
9336 int i
, rc
, cpus
= num_online_cpus();
9337 struct msix_entry msix_ent
[tp
->irq_max
];
9340 /* Just fallback to the simpler MSI mode. */
9344 * We want as many rx rings enabled as there are cpus.
9345 * The first MSIX vector only deals with link interrupts, etc,
9346 * so we add one to the number of vectors we are requesting.
9348 tp
->irq_cnt
= min_t(unsigned, cpus
+ 1, tp
->irq_max
);
9350 for (i
= 0; i
< tp
->irq_max
; i
++) {
9351 msix_ent
[i
].entry
= i
;
9352 msix_ent
[i
].vector
= 0;
9355 rc
= pci_enable_msix(tp
->pdev
, msix_ent
, tp
->irq_cnt
);
9358 } else if (rc
!= 0) {
9359 if (pci_enable_msix(tp
->pdev
, msix_ent
, rc
))
9361 netdev_notice(tp
->dev
, "Requested %d MSI-X vectors, received %d\n",
9366 for (i
= 0; i
< tp
->irq_max
; i
++)
9367 tp
->napi
[i
].irq_vec
= msix_ent
[i
].vector
;
9369 netif_set_real_num_tx_queues(tp
->dev
, 1);
9370 rc
= tp
->irq_cnt
> 1 ? tp
->irq_cnt
- 1 : 1;
9371 if (netif_set_real_num_rx_queues(tp
->dev
, rc
)) {
9372 pci_disable_msix(tp
->pdev
);
9376 if (tp
->irq_cnt
> 1) {
9377 tg3_flag_set(tp
, ENABLE_RSS
);
9379 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
9380 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
9381 tg3_flag_set(tp
, ENABLE_TSS
);
9382 netif_set_real_num_tx_queues(tp
->dev
, tp
->irq_cnt
- 1);
9389 static void tg3_ints_init(struct tg3
*tp
)
9391 if ((tg3_flag(tp
, SUPPORT_MSI
) || tg3_flag(tp
, SUPPORT_MSIX
)) &&
9392 !tg3_flag(tp
, TAGGED_STATUS
)) {
9393 /* All MSI supporting chips should support tagged
9394 * status. Assert that this is the case.
9396 netdev_warn(tp
->dev
,
9397 "MSI without TAGGED_STATUS? Not using MSI\n");
9401 if (tg3_flag(tp
, SUPPORT_MSIX
) && tg3_enable_msix(tp
))
9402 tg3_flag_set(tp
, USING_MSIX
);
9403 else if (tg3_flag(tp
, SUPPORT_MSI
) && pci_enable_msi(tp
->pdev
) == 0)
9404 tg3_flag_set(tp
, USING_MSI
);
9406 if (tg3_flag(tp
, USING_MSI
) || tg3_flag(tp
, USING_MSIX
)) {
9407 u32 msi_mode
= tr32(MSGINT_MODE
);
9408 if (tg3_flag(tp
, USING_MSIX
) && tp
->irq_cnt
> 1)
9409 msi_mode
|= MSGINT_MODE_MULTIVEC_EN
;
9410 tw32(MSGINT_MODE
, msi_mode
| MSGINT_MODE_ENABLE
);
9413 if (!tg3_flag(tp
, USING_MSIX
)) {
9415 tp
->napi
[0].irq_vec
= tp
->pdev
->irq
;
9416 netif_set_real_num_tx_queues(tp
->dev
, 1);
9417 netif_set_real_num_rx_queues(tp
->dev
, 1);
9421 static void tg3_ints_fini(struct tg3
*tp
)
9423 if (tg3_flag(tp
, USING_MSIX
))
9424 pci_disable_msix(tp
->pdev
);
9425 else if (tg3_flag(tp
, USING_MSI
))
9426 pci_disable_msi(tp
->pdev
);
9427 tg3_flag_clear(tp
, USING_MSI
);
9428 tg3_flag_clear(tp
, USING_MSIX
);
9429 tg3_flag_clear(tp
, ENABLE_RSS
);
9430 tg3_flag_clear(tp
, ENABLE_TSS
);
9433 static int tg3_open(struct net_device
*dev
)
9435 struct tg3
*tp
= netdev_priv(dev
);
9438 if (tp
->fw_needed
) {
9439 err
= tg3_request_firmware(tp
);
9440 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
) {
9444 netdev_warn(tp
->dev
, "TSO capability disabled\n");
9445 tg3_flag_clear(tp
, TSO_CAPABLE
);
9446 } else if (!tg3_flag(tp
, TSO_CAPABLE
)) {
9447 netdev_notice(tp
->dev
, "TSO capability restored\n");
9448 tg3_flag_set(tp
, TSO_CAPABLE
);
9452 netif_carrier_off(tp
->dev
);
9454 err
= tg3_power_up(tp
);
9458 tg3_full_lock(tp
, 0);
9460 tg3_disable_ints(tp
);
9461 tg3_flag_clear(tp
, INIT_COMPLETE
);
9463 tg3_full_unlock(tp
);
9466 * Setup interrupts first so we know how
9467 * many NAPI resources to allocate
9471 /* The placement of this call is tied
9472 * to the setup and use of Host TX descriptors.
9474 err
= tg3_alloc_consistent(tp
);
9480 tg3_napi_enable(tp
);
9482 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
9483 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
9484 err
= tg3_request_irq(tp
, i
);
9486 for (i
--; i
>= 0; i
--)
9487 free_irq(tnapi
->irq_vec
, tnapi
);
9495 tg3_full_lock(tp
, 0);
9497 err
= tg3_init_hw(tp
, 1);
9499 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
9502 if (tg3_flag(tp
, TAGGED_STATUS
) &&
9503 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5717
&&
9504 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_57765
)
9505 tp
->timer_offset
= HZ
;
9507 tp
->timer_offset
= HZ
/ 10;
9509 BUG_ON(tp
->timer_offset
> HZ
);
9510 tp
->timer_counter
= tp
->timer_multiplier
=
9511 (HZ
/ tp
->timer_offset
);
9512 tp
->asf_counter
= tp
->asf_multiplier
=
9513 ((HZ
/ tp
->timer_offset
) * 2);
9515 init_timer(&tp
->timer
);
9516 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
9517 tp
->timer
.data
= (unsigned long) tp
;
9518 tp
->timer
.function
= tg3_timer
;
9521 tg3_full_unlock(tp
);
9526 if (tg3_flag(tp
, USING_MSI
)) {
9527 err
= tg3_test_msi(tp
);
9530 tg3_full_lock(tp
, 0);
9531 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
9533 tg3_full_unlock(tp
);
9538 if (!tg3_flag(tp
, 57765_PLUS
) && tg3_flag(tp
, USING_MSI
)) {
9539 u32 val
= tr32(PCIE_TRANSACTION_CFG
);
9541 tw32(PCIE_TRANSACTION_CFG
,
9542 val
| PCIE_TRANS_CFG_1SHOT_MSI
);
9548 tg3_full_lock(tp
, 0);
9550 add_timer(&tp
->timer
);
9551 tg3_flag_set(tp
, INIT_COMPLETE
);
9552 tg3_enable_ints(tp
);
9554 tg3_full_unlock(tp
);
9556 netif_tx_start_all_queues(dev
);
9559 * Reset loopback feature if it was turned on while the device was down
9560 * make sure that it's installed properly now.
9562 if (dev
->features
& NETIF_F_LOOPBACK
)
9563 tg3_set_loopback(dev
, dev
->features
);
9568 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--) {
9569 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
9570 free_irq(tnapi
->irq_vec
, tnapi
);
9574 tg3_napi_disable(tp
);
9576 tg3_free_consistent(tp
);
9580 tg3_frob_aux_power(tp
, false);
9581 pci_set_power_state(tp
->pdev
, PCI_D3hot
);
9585 static struct rtnl_link_stats64
*tg3_get_stats64(struct net_device
*,
9586 struct rtnl_link_stats64
*);
9587 static struct tg3_ethtool_stats
*tg3_get_estats(struct tg3
*);
9589 static int tg3_close(struct net_device
*dev
)
9592 struct tg3
*tp
= netdev_priv(dev
);
9594 tg3_napi_disable(tp
);
9595 cancel_work_sync(&tp
->reset_task
);
9597 netif_tx_stop_all_queues(dev
);
9599 del_timer_sync(&tp
->timer
);
9603 tg3_full_lock(tp
, 1);
9605 tg3_disable_ints(tp
);
9607 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
9609 tg3_flag_clear(tp
, INIT_COMPLETE
);
9611 tg3_full_unlock(tp
);
9613 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--) {
9614 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
9615 free_irq(tnapi
->irq_vec
, tnapi
);
9620 tg3_get_stats64(tp
->dev
, &tp
->net_stats_prev
);
9622 memcpy(&tp
->estats_prev
, tg3_get_estats(tp
),
9623 sizeof(tp
->estats_prev
));
9627 tg3_free_consistent(tp
);
9631 netif_carrier_off(tp
->dev
);
9636 static inline u64
get_stat64(tg3_stat64_t
*val
)
9638 return ((u64
)val
->high
<< 32) | ((u64
)val
->low
);
9641 static u64
calc_crc_errors(struct tg3
*tp
)
9643 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
9645 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
9646 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
9647 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
)) {
9650 spin_lock_bh(&tp
->lock
);
9651 if (!tg3_readphy(tp
, MII_TG3_TEST1
, &val
)) {
9652 tg3_writephy(tp
, MII_TG3_TEST1
,
9653 val
| MII_TG3_TEST1_CRC_EN
);
9654 tg3_readphy(tp
, MII_TG3_RXR_COUNTERS
, &val
);
9657 spin_unlock_bh(&tp
->lock
);
9659 tp
->phy_crc_errors
+= val
;
9661 return tp
->phy_crc_errors
;
9664 return get_stat64(&hw_stats
->rx_fcs_errors
);
9667 #define ESTAT_ADD(member) \
9668 estats->member = old_estats->member + \
9669 get_stat64(&hw_stats->member)
9671 static struct tg3_ethtool_stats
*tg3_get_estats(struct tg3
*tp
)
9673 struct tg3_ethtool_stats
*estats
= &tp
->estats
;
9674 struct tg3_ethtool_stats
*old_estats
= &tp
->estats_prev
;
9675 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
9680 ESTAT_ADD(rx_octets
);
9681 ESTAT_ADD(rx_fragments
);
9682 ESTAT_ADD(rx_ucast_packets
);
9683 ESTAT_ADD(rx_mcast_packets
);
9684 ESTAT_ADD(rx_bcast_packets
);
9685 ESTAT_ADD(rx_fcs_errors
);
9686 ESTAT_ADD(rx_align_errors
);
9687 ESTAT_ADD(rx_xon_pause_rcvd
);
9688 ESTAT_ADD(rx_xoff_pause_rcvd
);
9689 ESTAT_ADD(rx_mac_ctrl_rcvd
);
9690 ESTAT_ADD(rx_xoff_entered
);
9691 ESTAT_ADD(rx_frame_too_long_errors
);
9692 ESTAT_ADD(rx_jabbers
);
9693 ESTAT_ADD(rx_undersize_packets
);
9694 ESTAT_ADD(rx_in_length_errors
);
9695 ESTAT_ADD(rx_out_length_errors
);
9696 ESTAT_ADD(rx_64_or_less_octet_packets
);
9697 ESTAT_ADD(rx_65_to_127_octet_packets
);
9698 ESTAT_ADD(rx_128_to_255_octet_packets
);
9699 ESTAT_ADD(rx_256_to_511_octet_packets
);
9700 ESTAT_ADD(rx_512_to_1023_octet_packets
);
9701 ESTAT_ADD(rx_1024_to_1522_octet_packets
);
9702 ESTAT_ADD(rx_1523_to_2047_octet_packets
);
9703 ESTAT_ADD(rx_2048_to_4095_octet_packets
);
9704 ESTAT_ADD(rx_4096_to_8191_octet_packets
);
9705 ESTAT_ADD(rx_8192_to_9022_octet_packets
);
9707 ESTAT_ADD(tx_octets
);
9708 ESTAT_ADD(tx_collisions
);
9709 ESTAT_ADD(tx_xon_sent
);
9710 ESTAT_ADD(tx_xoff_sent
);
9711 ESTAT_ADD(tx_flow_control
);
9712 ESTAT_ADD(tx_mac_errors
);
9713 ESTAT_ADD(tx_single_collisions
);
9714 ESTAT_ADD(tx_mult_collisions
);
9715 ESTAT_ADD(tx_deferred
);
9716 ESTAT_ADD(tx_excessive_collisions
);
9717 ESTAT_ADD(tx_late_collisions
);
9718 ESTAT_ADD(tx_collide_2times
);
9719 ESTAT_ADD(tx_collide_3times
);
9720 ESTAT_ADD(tx_collide_4times
);
9721 ESTAT_ADD(tx_collide_5times
);
9722 ESTAT_ADD(tx_collide_6times
);
9723 ESTAT_ADD(tx_collide_7times
);
9724 ESTAT_ADD(tx_collide_8times
);
9725 ESTAT_ADD(tx_collide_9times
);
9726 ESTAT_ADD(tx_collide_10times
);
9727 ESTAT_ADD(tx_collide_11times
);
9728 ESTAT_ADD(tx_collide_12times
);
9729 ESTAT_ADD(tx_collide_13times
);
9730 ESTAT_ADD(tx_collide_14times
);
9731 ESTAT_ADD(tx_collide_15times
);
9732 ESTAT_ADD(tx_ucast_packets
);
9733 ESTAT_ADD(tx_mcast_packets
);
9734 ESTAT_ADD(tx_bcast_packets
);
9735 ESTAT_ADD(tx_carrier_sense_errors
);
9736 ESTAT_ADD(tx_discards
);
9737 ESTAT_ADD(tx_errors
);
9739 ESTAT_ADD(dma_writeq_full
);
9740 ESTAT_ADD(dma_write_prioq_full
);
9741 ESTAT_ADD(rxbds_empty
);
9742 ESTAT_ADD(rx_discards
);
9743 ESTAT_ADD(rx_errors
);
9744 ESTAT_ADD(rx_threshold_hit
);
9746 ESTAT_ADD(dma_readq_full
);
9747 ESTAT_ADD(dma_read_prioq_full
);
9748 ESTAT_ADD(tx_comp_queue_full
);
9750 ESTAT_ADD(ring_set_send_prod_index
);
9751 ESTAT_ADD(ring_status_update
);
9752 ESTAT_ADD(nic_irqs
);
9753 ESTAT_ADD(nic_avoided_irqs
);
9754 ESTAT_ADD(nic_tx_threshold_hit
);
9756 ESTAT_ADD(mbuf_lwm_thresh_hit
);
9761 static struct rtnl_link_stats64
*tg3_get_stats64(struct net_device
*dev
,
9762 struct rtnl_link_stats64
*stats
)
9764 struct tg3
*tp
= netdev_priv(dev
);
9765 struct rtnl_link_stats64
*old_stats
= &tp
->net_stats_prev
;
9766 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
9771 stats
->rx_packets
= old_stats
->rx_packets
+
9772 get_stat64(&hw_stats
->rx_ucast_packets
) +
9773 get_stat64(&hw_stats
->rx_mcast_packets
) +
9774 get_stat64(&hw_stats
->rx_bcast_packets
);
9776 stats
->tx_packets
= old_stats
->tx_packets
+
9777 get_stat64(&hw_stats
->tx_ucast_packets
) +
9778 get_stat64(&hw_stats
->tx_mcast_packets
) +
9779 get_stat64(&hw_stats
->tx_bcast_packets
);
9781 stats
->rx_bytes
= old_stats
->rx_bytes
+
9782 get_stat64(&hw_stats
->rx_octets
);
9783 stats
->tx_bytes
= old_stats
->tx_bytes
+
9784 get_stat64(&hw_stats
->tx_octets
);
9786 stats
->rx_errors
= old_stats
->rx_errors
+
9787 get_stat64(&hw_stats
->rx_errors
);
9788 stats
->tx_errors
= old_stats
->tx_errors
+
9789 get_stat64(&hw_stats
->tx_errors
) +
9790 get_stat64(&hw_stats
->tx_mac_errors
) +
9791 get_stat64(&hw_stats
->tx_carrier_sense_errors
) +
9792 get_stat64(&hw_stats
->tx_discards
);
9794 stats
->multicast
= old_stats
->multicast
+
9795 get_stat64(&hw_stats
->rx_mcast_packets
);
9796 stats
->collisions
= old_stats
->collisions
+
9797 get_stat64(&hw_stats
->tx_collisions
);
9799 stats
->rx_length_errors
= old_stats
->rx_length_errors
+
9800 get_stat64(&hw_stats
->rx_frame_too_long_errors
) +
9801 get_stat64(&hw_stats
->rx_undersize_packets
);
9803 stats
->rx_over_errors
= old_stats
->rx_over_errors
+
9804 get_stat64(&hw_stats
->rxbds_empty
);
9805 stats
->rx_frame_errors
= old_stats
->rx_frame_errors
+
9806 get_stat64(&hw_stats
->rx_align_errors
);
9807 stats
->tx_aborted_errors
= old_stats
->tx_aborted_errors
+
9808 get_stat64(&hw_stats
->tx_discards
);
9809 stats
->tx_carrier_errors
= old_stats
->tx_carrier_errors
+
9810 get_stat64(&hw_stats
->tx_carrier_sense_errors
);
9812 stats
->rx_crc_errors
= old_stats
->rx_crc_errors
+
9813 calc_crc_errors(tp
);
9815 stats
->rx_missed_errors
= old_stats
->rx_missed_errors
+
9816 get_stat64(&hw_stats
->rx_discards
);
9818 stats
->rx_dropped
= tp
->rx_dropped
;
9823 static inline u32
calc_crc(unsigned char *buf
, int len
)
9831 for (j
= 0; j
< len
; j
++) {
9834 for (k
= 0; k
< 8; k
++) {
9847 static void tg3_set_multi(struct tg3
*tp
, unsigned int accept_all
)
9849 /* accept or reject all multicast frames */
9850 tw32(MAC_HASH_REG_0
, accept_all
? 0xffffffff : 0);
9851 tw32(MAC_HASH_REG_1
, accept_all
? 0xffffffff : 0);
9852 tw32(MAC_HASH_REG_2
, accept_all
? 0xffffffff : 0);
9853 tw32(MAC_HASH_REG_3
, accept_all
? 0xffffffff : 0);
9856 static void __tg3_set_rx_mode(struct net_device
*dev
)
9858 struct tg3
*tp
= netdev_priv(dev
);
9861 rx_mode
= tp
->rx_mode
& ~(RX_MODE_PROMISC
|
9862 RX_MODE_KEEP_VLAN_TAG
);
9864 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9865 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9868 if (!tg3_flag(tp
, ENABLE_ASF
))
9869 rx_mode
|= RX_MODE_KEEP_VLAN_TAG
;
9872 if (dev
->flags
& IFF_PROMISC
) {
9873 /* Promiscuous mode. */
9874 rx_mode
|= RX_MODE_PROMISC
;
9875 } else if (dev
->flags
& IFF_ALLMULTI
) {
9876 /* Accept all multicast. */
9877 tg3_set_multi(tp
, 1);
9878 } else if (netdev_mc_empty(dev
)) {
9879 /* Reject all multicast. */
9880 tg3_set_multi(tp
, 0);
9882 /* Accept one or more multicast(s). */
9883 struct netdev_hw_addr
*ha
;
9884 u32 mc_filter
[4] = { 0, };
9889 netdev_for_each_mc_addr(ha
, dev
) {
9890 crc
= calc_crc(ha
->addr
, ETH_ALEN
);
9892 regidx
= (bit
& 0x60) >> 5;
9894 mc_filter
[regidx
] |= (1 << bit
);
9897 tw32(MAC_HASH_REG_0
, mc_filter
[0]);
9898 tw32(MAC_HASH_REG_1
, mc_filter
[1]);
9899 tw32(MAC_HASH_REG_2
, mc_filter
[2]);
9900 tw32(MAC_HASH_REG_3
, mc_filter
[3]);
9903 if (rx_mode
!= tp
->rx_mode
) {
9904 tp
->rx_mode
= rx_mode
;
9905 tw32_f(MAC_RX_MODE
, rx_mode
);
9910 static void tg3_set_rx_mode(struct net_device
*dev
)
9912 struct tg3
*tp
= netdev_priv(dev
);
9914 if (!netif_running(dev
))
9917 tg3_full_lock(tp
, 0);
9918 __tg3_set_rx_mode(dev
);
9919 tg3_full_unlock(tp
);
9922 static int tg3_get_regs_len(struct net_device
*dev
)
9924 return TG3_REG_BLK_SIZE
;
9927 static void tg3_get_regs(struct net_device
*dev
,
9928 struct ethtool_regs
*regs
, void *_p
)
9930 struct tg3
*tp
= netdev_priv(dev
);
9934 memset(_p
, 0, TG3_REG_BLK_SIZE
);
9936 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
9939 tg3_full_lock(tp
, 0);
9941 tg3_dump_legacy_regs(tp
, (u32
*)_p
);
9943 tg3_full_unlock(tp
);
9946 static int tg3_get_eeprom_len(struct net_device
*dev
)
9948 struct tg3
*tp
= netdev_priv(dev
);
9950 return tp
->nvram_size
;
9953 static int tg3_get_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
, u8
*data
)
9955 struct tg3
*tp
= netdev_priv(dev
);
9958 u32 i
, offset
, len
, b_offset
, b_count
;
9961 if (tg3_flag(tp
, NO_NVRAM
))
9964 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
9967 offset
= eeprom
->offset
;
9971 eeprom
->magic
= TG3_EEPROM_MAGIC
;
9974 /* adjustments to start on required 4 byte boundary */
9975 b_offset
= offset
& 3;
9976 b_count
= 4 - b_offset
;
9977 if (b_count
> len
) {
9978 /* i.e. offset=1 len=2 */
9981 ret
= tg3_nvram_read_be32(tp
, offset
-b_offset
, &val
);
9984 memcpy(data
, ((char *)&val
) + b_offset
, b_count
);
9987 eeprom
->len
+= b_count
;
9990 /* read bytes up to the last 4 byte boundary */
9991 pd
= &data
[eeprom
->len
];
9992 for (i
= 0; i
< (len
- (len
& 3)); i
+= 4) {
9993 ret
= tg3_nvram_read_be32(tp
, offset
+ i
, &val
);
9998 memcpy(pd
+ i
, &val
, 4);
10003 /* read last bytes not ending on 4 byte boundary */
10004 pd
= &data
[eeprom
->len
];
10006 b_offset
= offset
+ len
- b_count
;
10007 ret
= tg3_nvram_read_be32(tp
, b_offset
, &val
);
10010 memcpy(pd
, &val
, b_count
);
10011 eeprom
->len
+= b_count
;
10016 static int tg3_nvram_write_block(struct tg3
*tp
, u32 offset
, u32 len
, u8
*buf
);
10018 static int tg3_set_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
, u8
*data
)
10020 struct tg3
*tp
= netdev_priv(dev
);
10022 u32 offset
, len
, b_offset
, odd_len
;
10026 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
10029 if (tg3_flag(tp
, NO_NVRAM
) ||
10030 eeprom
->magic
!= TG3_EEPROM_MAGIC
)
10033 offset
= eeprom
->offset
;
10036 if ((b_offset
= (offset
& 3))) {
10037 /* adjustments to start on required 4 byte boundary */
10038 ret
= tg3_nvram_read_be32(tp
, offset
-b_offset
, &start
);
10049 /* adjustments to end on required 4 byte boundary */
10051 len
= (len
+ 3) & ~3;
10052 ret
= tg3_nvram_read_be32(tp
, offset
+len
-4, &end
);
10058 if (b_offset
|| odd_len
) {
10059 buf
= kmalloc(len
, GFP_KERNEL
);
10063 memcpy(buf
, &start
, 4);
10065 memcpy(buf
+len
-4, &end
, 4);
10066 memcpy(buf
+ b_offset
, data
, eeprom
->len
);
10069 ret
= tg3_nvram_write_block(tp
, offset
, len
, buf
);
10077 static int tg3_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
10079 struct tg3
*tp
= netdev_priv(dev
);
10081 if (tg3_flag(tp
, USE_PHYLIB
)) {
10082 struct phy_device
*phydev
;
10083 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
10085 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
10086 return phy_ethtool_gset(phydev
, cmd
);
10089 cmd
->supported
= (SUPPORTED_Autoneg
);
10091 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
10092 cmd
->supported
|= (SUPPORTED_1000baseT_Half
|
10093 SUPPORTED_1000baseT_Full
);
10095 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
10096 cmd
->supported
|= (SUPPORTED_100baseT_Half
|
10097 SUPPORTED_100baseT_Full
|
10098 SUPPORTED_10baseT_Half
|
10099 SUPPORTED_10baseT_Full
|
10101 cmd
->port
= PORT_TP
;
10103 cmd
->supported
|= SUPPORTED_FIBRE
;
10104 cmd
->port
= PORT_FIBRE
;
10107 cmd
->advertising
= tp
->link_config
.advertising
;
10108 if (tg3_flag(tp
, PAUSE_AUTONEG
)) {
10109 if (tp
->link_config
.flowctrl
& FLOW_CTRL_RX
) {
10110 if (tp
->link_config
.flowctrl
& FLOW_CTRL_TX
) {
10111 cmd
->advertising
|= ADVERTISED_Pause
;
10113 cmd
->advertising
|= ADVERTISED_Pause
|
10114 ADVERTISED_Asym_Pause
;
10116 } else if (tp
->link_config
.flowctrl
& FLOW_CTRL_TX
) {
10117 cmd
->advertising
|= ADVERTISED_Asym_Pause
;
10120 if (netif_running(dev
)) {
10121 ethtool_cmd_speed_set(cmd
, tp
->link_config
.active_speed
);
10122 cmd
->duplex
= tp
->link_config
.active_duplex
;
10124 ethtool_cmd_speed_set(cmd
, SPEED_INVALID
);
10125 cmd
->duplex
= DUPLEX_INVALID
;
10127 cmd
->phy_address
= tp
->phy_addr
;
10128 cmd
->transceiver
= XCVR_INTERNAL
;
10129 cmd
->autoneg
= tp
->link_config
.autoneg
;
10135 static int tg3_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
10137 struct tg3
*tp
= netdev_priv(dev
);
10138 u32 speed
= ethtool_cmd_speed(cmd
);
10140 if (tg3_flag(tp
, USE_PHYLIB
)) {
10141 struct phy_device
*phydev
;
10142 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
10144 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
10145 return phy_ethtool_sset(phydev
, cmd
);
10148 if (cmd
->autoneg
!= AUTONEG_ENABLE
&&
10149 cmd
->autoneg
!= AUTONEG_DISABLE
)
10152 if (cmd
->autoneg
== AUTONEG_DISABLE
&&
10153 cmd
->duplex
!= DUPLEX_FULL
&&
10154 cmd
->duplex
!= DUPLEX_HALF
)
10157 if (cmd
->autoneg
== AUTONEG_ENABLE
) {
10158 u32 mask
= ADVERTISED_Autoneg
|
10160 ADVERTISED_Asym_Pause
;
10162 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
10163 mask
|= ADVERTISED_1000baseT_Half
|
10164 ADVERTISED_1000baseT_Full
;
10166 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
10167 mask
|= ADVERTISED_100baseT_Half
|
10168 ADVERTISED_100baseT_Full
|
10169 ADVERTISED_10baseT_Half
|
10170 ADVERTISED_10baseT_Full
|
10173 mask
|= ADVERTISED_FIBRE
;
10175 if (cmd
->advertising
& ~mask
)
10178 mask
&= (ADVERTISED_1000baseT_Half
|
10179 ADVERTISED_1000baseT_Full
|
10180 ADVERTISED_100baseT_Half
|
10181 ADVERTISED_100baseT_Full
|
10182 ADVERTISED_10baseT_Half
|
10183 ADVERTISED_10baseT_Full
);
10185 cmd
->advertising
&= mask
;
10187 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) {
10188 if (speed
!= SPEED_1000
)
10191 if (cmd
->duplex
!= DUPLEX_FULL
)
10194 if (speed
!= SPEED_100
&&
10200 tg3_full_lock(tp
, 0);
10202 tp
->link_config
.autoneg
= cmd
->autoneg
;
10203 if (cmd
->autoneg
== AUTONEG_ENABLE
) {
10204 tp
->link_config
.advertising
= (cmd
->advertising
|
10205 ADVERTISED_Autoneg
);
10206 tp
->link_config
.speed
= SPEED_INVALID
;
10207 tp
->link_config
.duplex
= DUPLEX_INVALID
;
10209 tp
->link_config
.advertising
= 0;
10210 tp
->link_config
.speed
= speed
;
10211 tp
->link_config
.duplex
= cmd
->duplex
;
10214 tp
->link_config
.orig_speed
= tp
->link_config
.speed
;
10215 tp
->link_config
.orig_duplex
= tp
->link_config
.duplex
;
10216 tp
->link_config
.orig_autoneg
= tp
->link_config
.autoneg
;
10218 if (netif_running(dev
))
10219 tg3_setup_phy(tp
, 1);
10221 tg3_full_unlock(tp
);
10226 static void tg3_get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
10228 struct tg3
*tp
= netdev_priv(dev
);
10230 strcpy(info
->driver
, DRV_MODULE_NAME
);
10231 strcpy(info
->version
, DRV_MODULE_VERSION
);
10232 strcpy(info
->fw_version
, tp
->fw_ver
);
10233 strcpy(info
->bus_info
, pci_name(tp
->pdev
));
10236 static void tg3_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
10238 struct tg3
*tp
= netdev_priv(dev
);
10240 if (tg3_flag(tp
, WOL_CAP
) && device_can_wakeup(&tp
->pdev
->dev
))
10241 wol
->supported
= WAKE_MAGIC
;
10243 wol
->supported
= 0;
10245 if (tg3_flag(tp
, WOL_ENABLE
) && device_can_wakeup(&tp
->pdev
->dev
))
10246 wol
->wolopts
= WAKE_MAGIC
;
10247 memset(&wol
->sopass
, 0, sizeof(wol
->sopass
));
10250 static int tg3_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
10252 struct tg3
*tp
= netdev_priv(dev
);
10253 struct device
*dp
= &tp
->pdev
->dev
;
10255 if (wol
->wolopts
& ~WAKE_MAGIC
)
10257 if ((wol
->wolopts
& WAKE_MAGIC
) &&
10258 !(tg3_flag(tp
, WOL_CAP
) && device_can_wakeup(dp
)))
10261 device_set_wakeup_enable(dp
, wol
->wolopts
& WAKE_MAGIC
);
10263 spin_lock_bh(&tp
->lock
);
10264 if (device_may_wakeup(dp
))
10265 tg3_flag_set(tp
, WOL_ENABLE
);
10267 tg3_flag_clear(tp
, WOL_ENABLE
);
10268 spin_unlock_bh(&tp
->lock
);
10273 static u32
tg3_get_msglevel(struct net_device
*dev
)
10275 struct tg3
*tp
= netdev_priv(dev
);
10276 return tp
->msg_enable
;
10279 static void tg3_set_msglevel(struct net_device
*dev
, u32 value
)
10281 struct tg3
*tp
= netdev_priv(dev
);
10282 tp
->msg_enable
= value
;
10285 static int tg3_nway_reset(struct net_device
*dev
)
10287 struct tg3
*tp
= netdev_priv(dev
);
10290 if (!netif_running(dev
))
10293 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
10296 if (tg3_flag(tp
, USE_PHYLIB
)) {
10297 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
10299 r
= phy_start_aneg(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
10303 spin_lock_bh(&tp
->lock
);
10305 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
10306 if (!tg3_readphy(tp
, MII_BMCR
, &bmcr
) &&
10307 ((bmcr
& BMCR_ANENABLE
) ||
10308 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
))) {
10309 tg3_writephy(tp
, MII_BMCR
, bmcr
| BMCR_ANRESTART
|
10313 spin_unlock_bh(&tp
->lock
);
10319 static void tg3_get_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
10321 struct tg3
*tp
= netdev_priv(dev
);
10323 ering
->rx_max_pending
= tp
->rx_std_ring_mask
;
10324 ering
->rx_mini_max_pending
= 0;
10325 if (tg3_flag(tp
, JUMBO_RING_ENABLE
))
10326 ering
->rx_jumbo_max_pending
= tp
->rx_jmb_ring_mask
;
10328 ering
->rx_jumbo_max_pending
= 0;
10330 ering
->tx_max_pending
= TG3_TX_RING_SIZE
- 1;
10332 ering
->rx_pending
= tp
->rx_pending
;
10333 ering
->rx_mini_pending
= 0;
10334 if (tg3_flag(tp
, JUMBO_RING_ENABLE
))
10335 ering
->rx_jumbo_pending
= tp
->rx_jumbo_pending
;
10337 ering
->rx_jumbo_pending
= 0;
10339 ering
->tx_pending
= tp
->napi
[0].tx_pending
;
10342 static int tg3_set_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
10344 struct tg3
*tp
= netdev_priv(dev
);
10345 int i
, irq_sync
= 0, err
= 0;
10347 if ((ering
->rx_pending
> tp
->rx_std_ring_mask
) ||
10348 (ering
->rx_jumbo_pending
> tp
->rx_jmb_ring_mask
) ||
10349 (ering
->tx_pending
> TG3_TX_RING_SIZE
- 1) ||
10350 (ering
->tx_pending
<= MAX_SKB_FRAGS
) ||
10351 (tg3_flag(tp
, TSO_BUG
) &&
10352 (ering
->tx_pending
<= (MAX_SKB_FRAGS
* 3))))
10355 if (netif_running(dev
)) {
10357 tg3_netif_stop(tp
);
10361 tg3_full_lock(tp
, irq_sync
);
10363 tp
->rx_pending
= ering
->rx_pending
;
10365 if (tg3_flag(tp
, MAX_RXPEND_64
) &&
10366 tp
->rx_pending
> 63)
10367 tp
->rx_pending
= 63;
10368 tp
->rx_jumbo_pending
= ering
->rx_jumbo_pending
;
10370 for (i
= 0; i
< tp
->irq_max
; i
++)
10371 tp
->napi
[i
].tx_pending
= ering
->tx_pending
;
10373 if (netif_running(dev
)) {
10374 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
10375 err
= tg3_restart_hw(tp
, 1);
10377 tg3_netif_start(tp
);
10380 tg3_full_unlock(tp
);
10382 if (irq_sync
&& !err
)
10388 static void tg3_get_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
10390 struct tg3
*tp
= netdev_priv(dev
);
10392 epause
->autoneg
= !!tg3_flag(tp
, PAUSE_AUTONEG
);
10394 if (tp
->link_config
.active_flowctrl
& FLOW_CTRL_RX
)
10395 epause
->rx_pause
= 1;
10397 epause
->rx_pause
= 0;
10399 if (tp
->link_config
.active_flowctrl
& FLOW_CTRL_TX
)
10400 epause
->tx_pause
= 1;
10402 epause
->tx_pause
= 0;
10405 static int tg3_set_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
10407 struct tg3
*tp
= netdev_priv(dev
);
10410 if (tg3_flag(tp
, USE_PHYLIB
)) {
10412 struct phy_device
*phydev
;
10414 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
10416 if (!(phydev
->supported
& SUPPORTED_Pause
) ||
10417 (!(phydev
->supported
& SUPPORTED_Asym_Pause
) &&
10418 (epause
->rx_pause
!= epause
->tx_pause
)))
10421 tp
->link_config
.flowctrl
= 0;
10422 if (epause
->rx_pause
) {
10423 tp
->link_config
.flowctrl
|= FLOW_CTRL_RX
;
10425 if (epause
->tx_pause
) {
10426 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
10427 newadv
= ADVERTISED_Pause
;
10429 newadv
= ADVERTISED_Pause
|
10430 ADVERTISED_Asym_Pause
;
10431 } else if (epause
->tx_pause
) {
10432 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
10433 newadv
= ADVERTISED_Asym_Pause
;
10437 if (epause
->autoneg
)
10438 tg3_flag_set(tp
, PAUSE_AUTONEG
);
10440 tg3_flag_clear(tp
, PAUSE_AUTONEG
);
10442 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
10443 u32 oldadv
= phydev
->advertising
&
10444 (ADVERTISED_Pause
| ADVERTISED_Asym_Pause
);
10445 if (oldadv
!= newadv
) {
10446 phydev
->advertising
&=
10447 ~(ADVERTISED_Pause
|
10448 ADVERTISED_Asym_Pause
);
10449 phydev
->advertising
|= newadv
;
10450 if (phydev
->autoneg
) {
10452 * Always renegotiate the link to
10453 * inform our link partner of our
10454 * flow control settings, even if the
10455 * flow control is forced. Let
10456 * tg3_adjust_link() do the final
10457 * flow control setup.
10459 return phy_start_aneg(phydev
);
10463 if (!epause
->autoneg
)
10464 tg3_setup_flow_control(tp
, 0, 0);
10466 tp
->link_config
.orig_advertising
&=
10467 ~(ADVERTISED_Pause
|
10468 ADVERTISED_Asym_Pause
);
10469 tp
->link_config
.orig_advertising
|= newadv
;
10474 if (netif_running(dev
)) {
10475 tg3_netif_stop(tp
);
10479 tg3_full_lock(tp
, irq_sync
);
10481 if (epause
->autoneg
)
10482 tg3_flag_set(tp
, PAUSE_AUTONEG
);
10484 tg3_flag_clear(tp
, PAUSE_AUTONEG
);
10485 if (epause
->rx_pause
)
10486 tp
->link_config
.flowctrl
|= FLOW_CTRL_RX
;
10488 tp
->link_config
.flowctrl
&= ~FLOW_CTRL_RX
;
10489 if (epause
->tx_pause
)
10490 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
10492 tp
->link_config
.flowctrl
&= ~FLOW_CTRL_TX
;
10494 if (netif_running(dev
)) {
10495 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
10496 err
= tg3_restart_hw(tp
, 1);
10498 tg3_netif_start(tp
);
10501 tg3_full_unlock(tp
);
10507 static int tg3_get_sset_count(struct net_device
*dev
, int sset
)
10511 return TG3_NUM_TEST
;
10513 return TG3_NUM_STATS
;
10515 return -EOPNOTSUPP
;
10519 static void tg3_get_strings(struct net_device
*dev
, u32 stringset
, u8
*buf
)
10521 switch (stringset
) {
10523 memcpy(buf
, ðtool_stats_keys
, sizeof(ethtool_stats_keys
));
10526 memcpy(buf
, ðtool_test_keys
, sizeof(ethtool_test_keys
));
10529 WARN_ON(1); /* we need a WARN() */
10534 static int tg3_set_phys_id(struct net_device
*dev
,
10535 enum ethtool_phys_id_state state
)
10537 struct tg3
*tp
= netdev_priv(dev
);
10539 if (!netif_running(tp
->dev
))
10543 case ETHTOOL_ID_ACTIVE
:
10544 return 1; /* cycle on/off once per second */
10546 case ETHTOOL_ID_ON
:
10547 tw32(MAC_LED_CTRL
, LED_CTRL_LNKLED_OVERRIDE
|
10548 LED_CTRL_1000MBPS_ON
|
10549 LED_CTRL_100MBPS_ON
|
10550 LED_CTRL_10MBPS_ON
|
10551 LED_CTRL_TRAFFIC_OVERRIDE
|
10552 LED_CTRL_TRAFFIC_BLINK
|
10553 LED_CTRL_TRAFFIC_LED
);
10556 case ETHTOOL_ID_OFF
:
10557 tw32(MAC_LED_CTRL
, LED_CTRL_LNKLED_OVERRIDE
|
10558 LED_CTRL_TRAFFIC_OVERRIDE
);
10561 case ETHTOOL_ID_INACTIVE
:
10562 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
10569 static void tg3_get_ethtool_stats(struct net_device
*dev
,
10570 struct ethtool_stats
*estats
, u64
*tmp_stats
)
10572 struct tg3
*tp
= netdev_priv(dev
);
10573 memcpy(tmp_stats
, tg3_get_estats(tp
), sizeof(tp
->estats
));
10576 static __be32
*tg3_vpd_readblock(struct tg3
*tp
, u32
*vpdlen
)
10580 u32 offset
= 0, len
= 0;
10583 if (tg3_flag(tp
, NO_NVRAM
) || tg3_nvram_read(tp
, 0, &magic
))
10586 if (magic
== TG3_EEPROM_MAGIC
) {
10587 for (offset
= TG3_NVM_DIR_START
;
10588 offset
< TG3_NVM_DIR_END
;
10589 offset
+= TG3_NVM_DIRENT_SIZE
) {
10590 if (tg3_nvram_read(tp
, offset
, &val
))
10593 if ((val
>> TG3_NVM_DIRTYPE_SHIFT
) ==
10594 TG3_NVM_DIRTYPE_EXTVPD
)
10598 if (offset
!= TG3_NVM_DIR_END
) {
10599 len
= (val
& TG3_NVM_DIRTYPE_LENMSK
) * 4;
10600 if (tg3_nvram_read(tp
, offset
+ 4, &offset
))
10603 offset
= tg3_nvram_logical_addr(tp
, offset
);
10607 if (!offset
|| !len
) {
10608 offset
= TG3_NVM_VPD_OFF
;
10609 len
= TG3_NVM_VPD_LEN
;
10612 buf
= kmalloc(len
, GFP_KERNEL
);
10616 if (magic
== TG3_EEPROM_MAGIC
) {
10617 for (i
= 0; i
< len
; i
+= 4) {
10618 /* The data is in little-endian format in NVRAM.
10619 * Use the big-endian read routines to preserve
10620 * the byte order as it exists in NVRAM.
10622 if (tg3_nvram_read_be32(tp
, offset
+ i
, &buf
[i
/4]))
10628 unsigned int pos
= 0;
10630 ptr
= (u8
*)&buf
[0];
10631 for (i
= 0; pos
< len
&& i
< 3; i
++, pos
+= cnt
, ptr
+= cnt
) {
10632 cnt
= pci_read_vpd(tp
->pdev
, pos
,
10634 if (cnt
== -ETIMEDOUT
|| cnt
== -EINTR
)
10652 #define NVRAM_TEST_SIZE 0x100
10653 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
10654 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
10655 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
10656 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
10657 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
10658 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
10659 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10660 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10662 static int tg3_test_nvram(struct tg3
*tp
)
10664 u32 csum
, magic
, len
;
10666 int i
, j
, k
, err
= 0, size
;
10668 if (tg3_flag(tp
, NO_NVRAM
))
10671 if (tg3_nvram_read(tp
, 0, &magic
) != 0)
10674 if (magic
== TG3_EEPROM_MAGIC
)
10675 size
= NVRAM_TEST_SIZE
;
10676 else if ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) == TG3_EEPROM_MAGIC_FW
) {
10677 if ((magic
& TG3_EEPROM_SB_FORMAT_MASK
) ==
10678 TG3_EEPROM_SB_FORMAT_1
) {
10679 switch (magic
& TG3_EEPROM_SB_REVISION_MASK
) {
10680 case TG3_EEPROM_SB_REVISION_0
:
10681 size
= NVRAM_SELFBOOT_FORMAT1_0_SIZE
;
10683 case TG3_EEPROM_SB_REVISION_2
:
10684 size
= NVRAM_SELFBOOT_FORMAT1_2_SIZE
;
10686 case TG3_EEPROM_SB_REVISION_3
:
10687 size
= NVRAM_SELFBOOT_FORMAT1_3_SIZE
;
10689 case TG3_EEPROM_SB_REVISION_4
:
10690 size
= NVRAM_SELFBOOT_FORMAT1_4_SIZE
;
10692 case TG3_EEPROM_SB_REVISION_5
:
10693 size
= NVRAM_SELFBOOT_FORMAT1_5_SIZE
;
10695 case TG3_EEPROM_SB_REVISION_6
:
10696 size
= NVRAM_SELFBOOT_FORMAT1_6_SIZE
;
10703 } else if ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) == TG3_EEPROM_MAGIC_HW
)
10704 size
= NVRAM_SELFBOOT_HW_SIZE
;
10708 buf
= kmalloc(size
, GFP_KERNEL
);
10713 for (i
= 0, j
= 0; i
< size
; i
+= 4, j
++) {
10714 err
= tg3_nvram_read_be32(tp
, i
, &buf
[j
]);
10721 /* Selfboot format */
10722 magic
= be32_to_cpu(buf
[0]);
10723 if ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) ==
10724 TG3_EEPROM_MAGIC_FW
) {
10725 u8
*buf8
= (u8
*) buf
, csum8
= 0;
10727 if ((magic
& TG3_EEPROM_SB_REVISION_MASK
) ==
10728 TG3_EEPROM_SB_REVISION_2
) {
10729 /* For rev 2, the csum doesn't include the MBA. */
10730 for (i
= 0; i
< TG3_EEPROM_SB_F1R2_MBA_OFF
; i
++)
10732 for (i
= TG3_EEPROM_SB_F1R2_MBA_OFF
+ 4; i
< size
; i
++)
10735 for (i
= 0; i
< size
; i
++)
10748 if ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) ==
10749 TG3_EEPROM_MAGIC_HW
) {
10750 u8 data
[NVRAM_SELFBOOT_DATA_SIZE
];
10751 u8 parity
[NVRAM_SELFBOOT_DATA_SIZE
];
10752 u8
*buf8
= (u8
*) buf
;
10754 /* Separate the parity bits and the data bytes. */
10755 for (i
= 0, j
= 0, k
= 0; i
< NVRAM_SELFBOOT_HW_SIZE
; i
++) {
10756 if ((i
== 0) || (i
== 8)) {
10760 for (l
= 0, msk
= 0x80; l
< 7; l
++, msk
>>= 1)
10761 parity
[k
++] = buf8
[i
] & msk
;
10763 } else if (i
== 16) {
10767 for (l
= 0, msk
= 0x20; l
< 6; l
++, msk
>>= 1)
10768 parity
[k
++] = buf8
[i
] & msk
;
10771 for (l
= 0, msk
= 0x80; l
< 8; l
++, msk
>>= 1)
10772 parity
[k
++] = buf8
[i
] & msk
;
10775 data
[j
++] = buf8
[i
];
10779 for (i
= 0; i
< NVRAM_SELFBOOT_DATA_SIZE
; i
++) {
10780 u8 hw8
= hweight8(data
[i
]);
10782 if ((hw8
& 0x1) && parity
[i
])
10784 else if (!(hw8
& 0x1) && !parity
[i
])
10793 /* Bootstrap checksum at offset 0x10 */
10794 csum
= calc_crc((unsigned char *) buf
, 0x10);
10795 if (csum
!= le32_to_cpu(buf
[0x10/4]))
10798 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10799 csum
= calc_crc((unsigned char *) &buf
[0x74/4], 0x88);
10800 if (csum
!= le32_to_cpu(buf
[0xfc/4]))
10805 buf
= tg3_vpd_readblock(tp
, &len
);
10809 i
= pci_vpd_find_tag((u8
*)buf
, 0, len
, PCI_VPD_LRDT_RO_DATA
);
10811 j
= pci_vpd_lrdt_size(&((u8
*)buf
)[i
]);
10815 if (i
+ PCI_VPD_LRDT_TAG_SIZE
+ j
> len
)
10818 i
+= PCI_VPD_LRDT_TAG_SIZE
;
10819 j
= pci_vpd_find_info_keyword((u8
*)buf
, i
, j
,
10820 PCI_VPD_RO_KEYWORD_CHKSUM
);
10824 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
10826 for (i
= 0; i
<= j
; i
++)
10827 csum8
+= ((u8
*)buf
)[i
];
10841 #define TG3_SERDES_TIMEOUT_SEC 2
10842 #define TG3_COPPER_TIMEOUT_SEC 6
10844 static int tg3_test_link(struct tg3
*tp
)
10848 if (!netif_running(tp
->dev
))
10851 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
10852 max
= TG3_SERDES_TIMEOUT_SEC
;
10854 max
= TG3_COPPER_TIMEOUT_SEC
;
10856 for (i
= 0; i
< max
; i
++) {
10857 if (netif_carrier_ok(tp
->dev
))
10860 if (msleep_interruptible(1000))
10867 /* Only test the commonly used registers */
10868 static int tg3_test_registers(struct tg3
*tp
)
10870 int i
, is_5705
, is_5750
;
10871 u32 offset
, read_mask
, write_mask
, val
, save_val
, read_val
;
10875 #define TG3_FL_5705 0x1
10876 #define TG3_FL_NOT_5705 0x2
10877 #define TG3_FL_NOT_5788 0x4
10878 #define TG3_FL_NOT_5750 0x8
10882 /* MAC Control Registers */
10883 { MAC_MODE
, TG3_FL_NOT_5705
,
10884 0x00000000, 0x00ef6f8c },
10885 { MAC_MODE
, TG3_FL_5705
,
10886 0x00000000, 0x01ef6b8c },
10887 { MAC_STATUS
, TG3_FL_NOT_5705
,
10888 0x03800107, 0x00000000 },
10889 { MAC_STATUS
, TG3_FL_5705
,
10890 0x03800100, 0x00000000 },
10891 { MAC_ADDR_0_HIGH
, 0x0000,
10892 0x00000000, 0x0000ffff },
10893 { MAC_ADDR_0_LOW
, 0x0000,
10894 0x00000000, 0xffffffff },
10895 { MAC_RX_MTU_SIZE
, 0x0000,
10896 0x00000000, 0x0000ffff },
10897 { MAC_TX_MODE
, 0x0000,
10898 0x00000000, 0x00000070 },
10899 { MAC_TX_LENGTHS
, 0x0000,
10900 0x00000000, 0x00003fff },
10901 { MAC_RX_MODE
, TG3_FL_NOT_5705
,
10902 0x00000000, 0x000007fc },
10903 { MAC_RX_MODE
, TG3_FL_5705
,
10904 0x00000000, 0x000007dc },
10905 { MAC_HASH_REG_0
, 0x0000,
10906 0x00000000, 0xffffffff },
10907 { MAC_HASH_REG_1
, 0x0000,
10908 0x00000000, 0xffffffff },
10909 { MAC_HASH_REG_2
, 0x0000,
10910 0x00000000, 0xffffffff },
10911 { MAC_HASH_REG_3
, 0x0000,
10912 0x00000000, 0xffffffff },
10914 /* Receive Data and Receive BD Initiator Control Registers. */
10915 { RCVDBDI_JUMBO_BD
+0, TG3_FL_NOT_5705
,
10916 0x00000000, 0xffffffff },
10917 { RCVDBDI_JUMBO_BD
+4, TG3_FL_NOT_5705
,
10918 0x00000000, 0xffffffff },
10919 { RCVDBDI_JUMBO_BD
+8, TG3_FL_NOT_5705
,
10920 0x00000000, 0x00000003 },
10921 { RCVDBDI_JUMBO_BD
+0xc, TG3_FL_NOT_5705
,
10922 0x00000000, 0xffffffff },
10923 { RCVDBDI_STD_BD
+0, 0x0000,
10924 0x00000000, 0xffffffff },
10925 { RCVDBDI_STD_BD
+4, 0x0000,
10926 0x00000000, 0xffffffff },
10927 { RCVDBDI_STD_BD
+8, 0x0000,
10928 0x00000000, 0xffff0002 },
10929 { RCVDBDI_STD_BD
+0xc, 0x0000,
10930 0x00000000, 0xffffffff },
10932 /* Receive BD Initiator Control Registers. */
10933 { RCVBDI_STD_THRESH
, TG3_FL_NOT_5705
,
10934 0x00000000, 0xffffffff },
10935 { RCVBDI_STD_THRESH
, TG3_FL_5705
,
10936 0x00000000, 0x000003ff },
10937 { RCVBDI_JUMBO_THRESH
, TG3_FL_NOT_5705
,
10938 0x00000000, 0xffffffff },
10940 /* Host Coalescing Control Registers. */
10941 { HOSTCC_MODE
, TG3_FL_NOT_5705
,
10942 0x00000000, 0x00000004 },
10943 { HOSTCC_MODE
, TG3_FL_5705
,
10944 0x00000000, 0x000000f6 },
10945 { HOSTCC_RXCOL_TICKS
, TG3_FL_NOT_5705
,
10946 0x00000000, 0xffffffff },
10947 { HOSTCC_RXCOL_TICKS
, TG3_FL_5705
,
10948 0x00000000, 0x000003ff },
10949 { HOSTCC_TXCOL_TICKS
, TG3_FL_NOT_5705
,
10950 0x00000000, 0xffffffff },
10951 { HOSTCC_TXCOL_TICKS
, TG3_FL_5705
,
10952 0x00000000, 0x000003ff },
10953 { HOSTCC_RXMAX_FRAMES
, TG3_FL_NOT_5705
,
10954 0x00000000, 0xffffffff },
10955 { HOSTCC_RXMAX_FRAMES
, TG3_FL_5705
| TG3_FL_NOT_5788
,
10956 0x00000000, 0x000000ff },
10957 { HOSTCC_TXMAX_FRAMES
, TG3_FL_NOT_5705
,
10958 0x00000000, 0xffffffff },
10959 { HOSTCC_TXMAX_FRAMES
, TG3_FL_5705
| TG3_FL_NOT_5788
,
10960 0x00000000, 0x000000ff },
10961 { HOSTCC_RXCOAL_TICK_INT
, TG3_FL_NOT_5705
,
10962 0x00000000, 0xffffffff },
10963 { HOSTCC_TXCOAL_TICK_INT
, TG3_FL_NOT_5705
,
10964 0x00000000, 0xffffffff },
10965 { HOSTCC_RXCOAL_MAXF_INT
, TG3_FL_NOT_5705
,
10966 0x00000000, 0xffffffff },
10967 { HOSTCC_RXCOAL_MAXF_INT
, TG3_FL_5705
| TG3_FL_NOT_5788
,
10968 0x00000000, 0x000000ff },
10969 { HOSTCC_TXCOAL_MAXF_INT
, TG3_FL_NOT_5705
,
10970 0x00000000, 0xffffffff },
10971 { HOSTCC_TXCOAL_MAXF_INT
, TG3_FL_5705
| TG3_FL_NOT_5788
,
10972 0x00000000, 0x000000ff },
10973 { HOSTCC_STAT_COAL_TICKS
, TG3_FL_NOT_5705
,
10974 0x00000000, 0xffffffff },
10975 { HOSTCC_STATS_BLK_HOST_ADDR
, TG3_FL_NOT_5705
,
10976 0x00000000, 0xffffffff },
10977 { HOSTCC_STATS_BLK_HOST_ADDR
+4, TG3_FL_NOT_5705
,
10978 0x00000000, 0xffffffff },
10979 { HOSTCC_STATUS_BLK_HOST_ADDR
, 0x0000,
10980 0x00000000, 0xffffffff },
10981 { HOSTCC_STATUS_BLK_HOST_ADDR
+4, 0x0000,
10982 0x00000000, 0xffffffff },
10983 { HOSTCC_STATS_BLK_NIC_ADDR
, 0x0000,
10984 0xffffffff, 0x00000000 },
10985 { HOSTCC_STATUS_BLK_NIC_ADDR
, 0x0000,
10986 0xffffffff, 0x00000000 },
10988 /* Buffer Manager Control Registers. */
10989 { BUFMGR_MB_POOL_ADDR
, TG3_FL_NOT_5750
,
10990 0x00000000, 0x007fff80 },
10991 { BUFMGR_MB_POOL_SIZE
, TG3_FL_NOT_5750
,
10992 0x00000000, 0x007fffff },
10993 { BUFMGR_MB_RDMA_LOW_WATER
, 0x0000,
10994 0x00000000, 0x0000003f },
10995 { BUFMGR_MB_MACRX_LOW_WATER
, 0x0000,
10996 0x00000000, 0x000001ff },
10997 { BUFMGR_MB_HIGH_WATER
, 0x0000,
10998 0x00000000, 0x000001ff },
10999 { BUFMGR_DMA_DESC_POOL_ADDR
, TG3_FL_NOT_5705
,
11000 0xffffffff, 0x00000000 },
11001 { BUFMGR_DMA_DESC_POOL_SIZE
, TG3_FL_NOT_5705
,
11002 0xffffffff, 0x00000000 },
11004 /* Mailbox Registers */
11005 { GRCMBOX_RCVSTD_PROD_IDX
+4, 0x0000,
11006 0x00000000, 0x000001ff },
11007 { GRCMBOX_RCVJUMBO_PROD_IDX
+4, TG3_FL_NOT_5705
,
11008 0x00000000, 0x000001ff },
11009 { GRCMBOX_RCVRET_CON_IDX_0
+4, 0x0000,
11010 0x00000000, 0x000007ff },
11011 { GRCMBOX_SNDHOST_PROD_IDX_0
+4, 0x0000,
11012 0x00000000, 0x000001ff },
11014 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11017 is_5705
= is_5750
= 0;
11018 if (tg3_flag(tp
, 5705_PLUS
)) {
11020 if (tg3_flag(tp
, 5750_PLUS
))
11024 for (i
= 0; reg_tbl
[i
].offset
!= 0xffff; i
++) {
11025 if (is_5705
&& (reg_tbl
[i
].flags
& TG3_FL_NOT_5705
))
11028 if (!is_5705
&& (reg_tbl
[i
].flags
& TG3_FL_5705
))
11031 if (tg3_flag(tp
, IS_5788
) &&
11032 (reg_tbl
[i
].flags
& TG3_FL_NOT_5788
))
11035 if (is_5750
&& (reg_tbl
[i
].flags
& TG3_FL_NOT_5750
))
11038 offset
= (u32
) reg_tbl
[i
].offset
;
11039 read_mask
= reg_tbl
[i
].read_mask
;
11040 write_mask
= reg_tbl
[i
].write_mask
;
11042 /* Save the original register content */
11043 save_val
= tr32(offset
);
11045 /* Determine the read-only value. */
11046 read_val
= save_val
& read_mask
;
11048 /* Write zero to the register, then make sure the read-only bits
11049 * are not changed and the read/write bits are all zeros.
11053 val
= tr32(offset
);
11055 /* Test the read-only and read/write bits. */
11056 if (((val
& read_mask
) != read_val
) || (val
& write_mask
))
11059 /* Write ones to all the bits defined by RdMask and WrMask, then
11060 * make sure the read-only bits are not changed and the
11061 * read/write bits are all ones.
11063 tw32(offset
, read_mask
| write_mask
);
11065 val
= tr32(offset
);
11067 /* Test the read-only bits. */
11068 if ((val
& read_mask
) != read_val
)
11071 /* Test the read/write bits. */
11072 if ((val
& write_mask
) != write_mask
)
11075 tw32(offset
, save_val
);
11081 if (netif_msg_hw(tp
))
11082 netdev_err(tp
->dev
,
11083 "Register test failed at offset %x\n", offset
);
11084 tw32(offset
, save_val
);
11088 static int tg3_do_mem_test(struct tg3
*tp
, u32 offset
, u32 len
)
11090 static const u32 test_pattern
[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11094 for (i
= 0; i
< ARRAY_SIZE(test_pattern
); i
++) {
11095 for (j
= 0; j
< len
; j
+= 4) {
11098 tg3_write_mem(tp
, offset
+ j
, test_pattern
[i
]);
11099 tg3_read_mem(tp
, offset
+ j
, &val
);
11100 if (val
!= test_pattern
[i
])
11107 static int tg3_test_memory(struct tg3
*tp
)
11109 static struct mem_entry
{
11112 } mem_tbl_570x
[] = {
11113 { 0x00000000, 0x00b50},
11114 { 0x00002000, 0x1c000},
11115 { 0xffffffff, 0x00000}
11116 }, mem_tbl_5705
[] = {
11117 { 0x00000100, 0x0000c},
11118 { 0x00000200, 0x00008},
11119 { 0x00004000, 0x00800},
11120 { 0x00006000, 0x01000},
11121 { 0x00008000, 0x02000},
11122 { 0x00010000, 0x0e000},
11123 { 0xffffffff, 0x00000}
11124 }, mem_tbl_5755
[] = {
11125 { 0x00000200, 0x00008},
11126 { 0x00004000, 0x00800},
11127 { 0x00006000, 0x00800},
11128 { 0x00008000, 0x02000},
11129 { 0x00010000, 0x0c000},
11130 { 0xffffffff, 0x00000}
11131 }, mem_tbl_5906
[] = {
11132 { 0x00000200, 0x00008},
11133 { 0x00004000, 0x00400},
11134 { 0x00006000, 0x00400},
11135 { 0x00008000, 0x01000},
11136 { 0x00010000, 0x01000},
11137 { 0xffffffff, 0x00000}
11138 }, mem_tbl_5717
[] = {
11139 { 0x00000200, 0x00008},
11140 { 0x00010000, 0x0a000},
11141 { 0x00020000, 0x13c00},
11142 { 0xffffffff, 0x00000}
11143 }, mem_tbl_57765
[] = {
11144 { 0x00000200, 0x00008},
11145 { 0x00004000, 0x00800},
11146 { 0x00006000, 0x09800},
11147 { 0x00010000, 0x0a000},
11148 { 0xffffffff, 0x00000}
11150 struct mem_entry
*mem_tbl
;
11154 if (tg3_flag(tp
, 5717_PLUS
))
11155 mem_tbl
= mem_tbl_5717
;
11156 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
11157 mem_tbl
= mem_tbl_57765
;
11158 else if (tg3_flag(tp
, 5755_PLUS
))
11159 mem_tbl
= mem_tbl_5755
;
11160 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
11161 mem_tbl
= mem_tbl_5906
;
11162 else if (tg3_flag(tp
, 5705_PLUS
))
11163 mem_tbl
= mem_tbl_5705
;
11165 mem_tbl
= mem_tbl_570x
;
11167 for (i
= 0; mem_tbl
[i
].offset
!= 0xffffffff; i
++) {
11168 err
= tg3_do_mem_test(tp
, mem_tbl
[i
].offset
, mem_tbl
[i
].len
);
11176 #define TG3_MAC_LOOPBACK 0
11177 #define TG3_PHY_LOOPBACK 1
11178 #define TG3_TSO_LOOPBACK 2
11180 #define TG3_TSO_MSS 500
11182 #define TG3_TSO_IP_HDR_LEN 20
11183 #define TG3_TSO_TCP_HDR_LEN 20
11184 #define TG3_TSO_TCP_OPT_LEN 12
11186 static const u8 tg3_tso_header
[] = {
11188 0x45, 0x00, 0x00, 0x00,
11189 0x00, 0x00, 0x40, 0x00,
11190 0x40, 0x06, 0x00, 0x00,
11191 0x0a, 0x00, 0x00, 0x01,
11192 0x0a, 0x00, 0x00, 0x02,
11193 0x0d, 0x00, 0xe0, 0x00,
11194 0x00, 0x00, 0x01, 0x00,
11195 0x00, 0x00, 0x02, 0x00,
11196 0x80, 0x10, 0x10, 0x00,
11197 0x14, 0x09, 0x00, 0x00,
11198 0x01, 0x01, 0x08, 0x0a,
11199 0x11, 0x11, 0x11, 0x11,
11200 0x11, 0x11, 0x11, 0x11,
11203 static int tg3_run_loopback(struct tg3
*tp
, u32 pktsz
, int loopback_mode
)
11205 u32 mac_mode
, rx_start_idx
, rx_idx
, tx_idx
, opaque_key
;
11206 u32 base_flags
= 0, mss
= 0, desc_idx
, coal_now
, data_off
, val
;
11207 struct sk_buff
*skb
, *rx_skb
;
11210 int num_pkts
, tx_len
, rx_len
, i
, err
;
11211 struct tg3_rx_buffer_desc
*desc
;
11212 struct tg3_napi
*tnapi
, *rnapi
;
11213 struct tg3_rx_prodring_set
*tpr
= &tp
->napi
[0].prodring
;
11215 tnapi
= &tp
->napi
[0];
11216 rnapi
= &tp
->napi
[0];
11217 if (tp
->irq_cnt
> 1) {
11218 if (tg3_flag(tp
, ENABLE_RSS
))
11219 rnapi
= &tp
->napi
[1];
11220 if (tg3_flag(tp
, ENABLE_TSS
))
11221 tnapi
= &tp
->napi
[1];
11223 coal_now
= tnapi
->coal_now
| rnapi
->coal_now
;
11225 if (loopback_mode
== TG3_MAC_LOOPBACK
) {
11226 /* HW errata - mac loopback fails in some cases on 5780.
11227 * Normal traffic and PHY loopback are not affected by
11228 * errata. Also, the MAC loopback test is deprecated for
11229 * all newer ASIC revisions.
11231 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5780
||
11232 tg3_flag(tp
, CPMU_PRESENT
))
11235 mac_mode
= tp
->mac_mode
&
11236 ~(MAC_MODE_PORT_MODE_MASK
| MAC_MODE_HALF_DUPLEX
);
11237 mac_mode
|= MAC_MODE_PORT_INT_LPBACK
;
11238 if (!tg3_flag(tp
, 5705_PLUS
))
11239 mac_mode
|= MAC_MODE_LINK_POLARITY
;
11240 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
11241 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
11243 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
11244 tw32(MAC_MODE
, mac_mode
);
11246 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
11247 tg3_phy_fet_toggle_apd(tp
, false);
11248 val
= BMCR_LOOPBACK
| BMCR_FULLDPLX
| BMCR_SPEED100
;
11250 val
= BMCR_LOOPBACK
| BMCR_FULLDPLX
| BMCR_SPEED1000
;
11252 tg3_phy_toggle_automdix(tp
, 0);
11254 tg3_writephy(tp
, MII_BMCR
, val
);
11257 mac_mode
= tp
->mac_mode
&
11258 ~(MAC_MODE_PORT_MODE_MASK
| MAC_MODE_HALF_DUPLEX
);
11259 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
11260 tg3_writephy(tp
, MII_TG3_FET_PTEST
,
11261 MII_TG3_FET_PTEST_FRC_TX_LINK
|
11262 MII_TG3_FET_PTEST_FRC_TX_LOCK
);
11263 /* The write needs to be flushed for the AC131 */
11264 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
)
11265 tg3_readphy(tp
, MII_TG3_FET_PTEST
, &val
);
11266 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
11268 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
11270 /* reset to prevent losing 1st rx packet intermittently */
11271 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) {
11272 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
11274 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
11276 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
) {
11277 u32 masked_phy_id
= tp
->phy_id
& TG3_PHY_ID_MASK
;
11278 if (masked_phy_id
== TG3_PHY_ID_BCM5401
)
11279 mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
11280 else if (masked_phy_id
== TG3_PHY_ID_BCM5411
)
11281 mac_mode
|= MAC_MODE_LINK_POLARITY
;
11282 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
11283 MII_TG3_EXT_CTRL_LNK3_LED_MODE
);
11285 tw32(MAC_MODE
, mac_mode
);
11287 /* Wait for link */
11288 for (i
= 0; i
< 100; i
++) {
11289 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
11298 skb
= netdev_alloc_skb(tp
->dev
, tx_len
);
11302 tx_data
= skb_put(skb
, tx_len
);
11303 memcpy(tx_data
, tp
->dev
->dev_addr
, 6);
11304 memset(tx_data
+ 6, 0x0, 8);
11306 tw32(MAC_RX_MTU_SIZE
, tx_len
+ ETH_FCS_LEN
);
11308 if (loopback_mode
== TG3_TSO_LOOPBACK
) {
11309 struct iphdr
*iph
= (struct iphdr
*)&tx_data
[ETH_HLEN
];
11311 u32 hdr_len
= TG3_TSO_IP_HDR_LEN
+ TG3_TSO_TCP_HDR_LEN
+
11312 TG3_TSO_TCP_OPT_LEN
;
11314 memcpy(tx_data
+ ETH_ALEN
* 2, tg3_tso_header
,
11315 sizeof(tg3_tso_header
));
11318 val
= tx_len
- ETH_ALEN
* 2 - sizeof(tg3_tso_header
);
11319 num_pkts
= DIV_ROUND_UP(val
, TG3_TSO_MSS
);
11321 /* Set the total length field in the IP header */
11322 iph
->tot_len
= htons((u16
)(mss
+ hdr_len
));
11324 base_flags
= (TXD_FLAG_CPU_PRE_DMA
|
11325 TXD_FLAG_CPU_POST_DMA
);
11327 if (tg3_flag(tp
, HW_TSO_1
) ||
11328 tg3_flag(tp
, HW_TSO_2
) ||
11329 tg3_flag(tp
, HW_TSO_3
)) {
11331 val
= ETH_HLEN
+ TG3_TSO_IP_HDR_LEN
;
11332 th
= (struct tcphdr
*)&tx_data
[val
];
11335 base_flags
|= TXD_FLAG_TCPUDP_CSUM
;
11337 if (tg3_flag(tp
, HW_TSO_3
)) {
11338 mss
|= (hdr_len
& 0xc) << 12;
11339 if (hdr_len
& 0x10)
11340 base_flags
|= 0x00000010;
11341 base_flags
|= (hdr_len
& 0x3e0) << 5;
11342 } else if (tg3_flag(tp
, HW_TSO_2
))
11343 mss
|= hdr_len
<< 9;
11344 else if (tg3_flag(tp
, HW_TSO_1
) ||
11345 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
11346 mss
|= (TG3_TSO_TCP_OPT_LEN
<< 9);
11348 base_flags
|= (TG3_TSO_TCP_OPT_LEN
<< 10);
11351 data_off
= ETH_ALEN
* 2 + sizeof(tg3_tso_header
);
11354 data_off
= ETH_HLEN
;
11357 for (i
= data_off
; i
< tx_len
; i
++)
11358 tx_data
[i
] = (u8
) (i
& 0xff);
11360 map
= pci_map_single(tp
->pdev
, skb
->data
, tx_len
, PCI_DMA_TODEVICE
);
11361 if (pci_dma_mapping_error(tp
->pdev
, map
)) {
11362 dev_kfree_skb(skb
);
11366 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
11371 rx_start_idx
= rnapi
->hw_status
->idx
[0].rx_producer
;
11373 tg3_set_txd(tnapi
, tnapi
->tx_prod
, map
, tx_len
,
11374 base_flags
, (mss
<< 1) | 1);
11378 tw32_tx_mbox(tnapi
->prodmbox
, tnapi
->tx_prod
);
11379 tr32_mailbox(tnapi
->prodmbox
);
11383 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
11384 for (i
= 0; i
< 35; i
++) {
11385 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
11390 tx_idx
= tnapi
->hw_status
->idx
[0].tx_consumer
;
11391 rx_idx
= rnapi
->hw_status
->idx
[0].rx_producer
;
11392 if ((tx_idx
== tnapi
->tx_prod
) &&
11393 (rx_idx
== (rx_start_idx
+ num_pkts
)))
11397 pci_unmap_single(tp
->pdev
, map
, tx_len
, PCI_DMA_TODEVICE
);
11398 dev_kfree_skb(skb
);
11400 if (tx_idx
!= tnapi
->tx_prod
)
11403 if (rx_idx
!= rx_start_idx
+ num_pkts
)
11407 while (rx_idx
!= rx_start_idx
) {
11408 desc
= &rnapi
->rx_rcb
[rx_start_idx
++];
11409 desc_idx
= desc
->opaque
& RXD_OPAQUE_INDEX_MASK
;
11410 opaque_key
= desc
->opaque
& RXD_OPAQUE_RING_MASK
;
11412 if ((desc
->err_vlan
& RXD_ERR_MASK
) != 0 &&
11413 (desc
->err_vlan
!= RXD_ERR_ODD_NIBBLE_RCVD_MII
))
11416 rx_len
= ((desc
->idx_len
& RXD_LEN_MASK
) >> RXD_LEN_SHIFT
)
11419 if (loopback_mode
!= TG3_TSO_LOOPBACK
) {
11420 if (rx_len
!= tx_len
)
11423 if (pktsz
<= TG3_RX_STD_DMA_SZ
- ETH_FCS_LEN
) {
11424 if (opaque_key
!= RXD_OPAQUE_RING_STD
)
11427 if (opaque_key
!= RXD_OPAQUE_RING_JUMBO
)
11430 } else if ((desc
->type_flags
& RXD_FLAG_TCPUDP_CSUM
) &&
11431 (desc
->ip_tcp_csum
& RXD_TCPCSUM_MASK
)
11432 >> RXD_TCPCSUM_SHIFT
!= 0xffff) {
11436 if (opaque_key
== RXD_OPAQUE_RING_STD
) {
11437 rx_skb
= tpr
->rx_std_buffers
[desc_idx
].skb
;
11438 map
= dma_unmap_addr(&tpr
->rx_std_buffers
[desc_idx
],
11440 } else if (opaque_key
== RXD_OPAQUE_RING_JUMBO
) {
11441 rx_skb
= tpr
->rx_jmb_buffers
[desc_idx
].skb
;
11442 map
= dma_unmap_addr(&tpr
->rx_jmb_buffers
[desc_idx
],
11447 pci_dma_sync_single_for_cpu(tp
->pdev
, map
, rx_len
,
11448 PCI_DMA_FROMDEVICE
);
11450 for (i
= data_off
; i
< rx_len
; i
++, val
++) {
11451 if (*(rx_skb
->data
+ i
) != (u8
) (val
& 0xff))
11458 /* tg3_free_rings will unmap and free the rx_skb */
11463 #define TG3_STD_LOOPBACK_FAILED 1
11464 #define TG3_JMB_LOOPBACK_FAILED 2
11465 #define TG3_TSO_LOOPBACK_FAILED 4
11467 #define TG3_MAC_LOOPBACK_SHIFT 0
11468 #define TG3_PHY_LOOPBACK_SHIFT 4
11469 #define TG3_LOOPBACK_FAILED 0x00000077
11471 static int tg3_test_loopback(struct tg3
*tp
)
11474 u32 eee_cap
, cpmuctrl
= 0;
11476 if (!netif_running(tp
->dev
))
11477 return TG3_LOOPBACK_FAILED
;
11479 eee_cap
= tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
;
11480 tp
->phy_flags
&= ~TG3_PHYFLG_EEE_CAP
;
11482 err
= tg3_reset_hw(tp
, 1);
11484 err
= TG3_LOOPBACK_FAILED
;
11488 if (tg3_flag(tp
, ENABLE_RSS
)) {
11491 /* Reroute all rx packets to the 1st queue */
11492 for (i
= MAC_RSS_INDIR_TBL_0
;
11493 i
< MAC_RSS_INDIR_TBL_0
+ TG3_RSS_INDIR_TBL_SIZE
; i
+= 4)
11497 /* Turn off gphy autopowerdown. */
11498 if (tp
->phy_flags
& TG3_PHYFLG_ENABLE_APD
)
11499 tg3_phy_toggle_apd(tp
, false);
11501 if (tg3_flag(tp
, CPMU_PRESENT
)) {
11505 tw32(TG3_CPMU_MUTEX_REQ
, CPMU_MUTEX_REQ_DRIVER
);
11507 /* Wait for up to 40 microseconds to acquire lock. */
11508 for (i
= 0; i
< 4; i
++) {
11509 status
= tr32(TG3_CPMU_MUTEX_GNT
);
11510 if (status
== CPMU_MUTEX_GNT_DRIVER
)
11515 if (status
!= CPMU_MUTEX_GNT_DRIVER
) {
11516 err
= TG3_LOOPBACK_FAILED
;
11520 /* Turn off link-based power management. */
11521 cpmuctrl
= tr32(TG3_CPMU_CTRL
);
11522 tw32(TG3_CPMU_CTRL
,
11523 cpmuctrl
& ~(CPMU_CTRL_LINK_SPEED_MODE
|
11524 CPMU_CTRL_LINK_AWARE_MODE
));
11527 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, TG3_MAC_LOOPBACK
))
11528 err
|= TG3_STD_LOOPBACK_FAILED
<< TG3_MAC_LOOPBACK_SHIFT
;
11530 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
11531 tg3_run_loopback(tp
, 9000 + ETH_HLEN
, TG3_MAC_LOOPBACK
))
11532 err
|= TG3_JMB_LOOPBACK_FAILED
<< TG3_MAC_LOOPBACK_SHIFT
;
11534 if (tg3_flag(tp
, CPMU_PRESENT
)) {
11535 tw32(TG3_CPMU_CTRL
, cpmuctrl
);
11537 /* Release the mutex */
11538 tw32(TG3_CPMU_MUTEX_GNT
, CPMU_MUTEX_GNT_DRIVER
);
11541 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
11542 !tg3_flag(tp
, USE_PHYLIB
)) {
11543 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, TG3_PHY_LOOPBACK
))
11544 err
|= TG3_STD_LOOPBACK_FAILED
<<
11545 TG3_PHY_LOOPBACK_SHIFT
;
11546 if (tg3_flag(tp
, TSO_CAPABLE
) &&
11547 tg3_run_loopback(tp
, ETH_FRAME_LEN
, TG3_TSO_LOOPBACK
))
11548 err
|= TG3_TSO_LOOPBACK_FAILED
<<
11549 TG3_PHY_LOOPBACK_SHIFT
;
11550 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
11551 tg3_run_loopback(tp
, 9000 + ETH_HLEN
, TG3_PHY_LOOPBACK
))
11552 err
|= TG3_JMB_LOOPBACK_FAILED
<<
11553 TG3_PHY_LOOPBACK_SHIFT
;
11556 /* Re-enable gphy autopowerdown. */
11557 if (tp
->phy_flags
& TG3_PHYFLG_ENABLE_APD
)
11558 tg3_phy_toggle_apd(tp
, true);
11561 tp
->phy_flags
|= eee_cap
;
11566 static void tg3_self_test(struct net_device
*dev
, struct ethtool_test
*etest
,
11569 struct tg3
*tp
= netdev_priv(dev
);
11571 if ((tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) &&
11572 tg3_power_up(tp
)) {
11573 etest
->flags
|= ETH_TEST_FL_FAILED
;
11574 memset(data
, 1, sizeof(u64
) * TG3_NUM_TEST
);
11578 memset(data
, 0, sizeof(u64
) * TG3_NUM_TEST
);
11580 if (tg3_test_nvram(tp
) != 0) {
11581 etest
->flags
|= ETH_TEST_FL_FAILED
;
11584 if (tg3_test_link(tp
) != 0) {
11585 etest
->flags
|= ETH_TEST_FL_FAILED
;
11588 if (etest
->flags
& ETH_TEST_FL_OFFLINE
) {
11589 int err
, err2
= 0, irq_sync
= 0;
11591 if (netif_running(dev
)) {
11593 tg3_netif_stop(tp
);
11597 tg3_full_lock(tp
, irq_sync
);
11599 tg3_halt(tp
, RESET_KIND_SUSPEND
, 1);
11600 err
= tg3_nvram_lock(tp
);
11601 tg3_halt_cpu(tp
, RX_CPU_BASE
);
11602 if (!tg3_flag(tp
, 5705_PLUS
))
11603 tg3_halt_cpu(tp
, TX_CPU_BASE
);
11605 tg3_nvram_unlock(tp
);
11607 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
11610 if (tg3_test_registers(tp
) != 0) {
11611 etest
->flags
|= ETH_TEST_FL_FAILED
;
11614 if (tg3_test_memory(tp
) != 0) {
11615 etest
->flags
|= ETH_TEST_FL_FAILED
;
11618 if ((data
[4] = tg3_test_loopback(tp
)) != 0)
11619 etest
->flags
|= ETH_TEST_FL_FAILED
;
11621 tg3_full_unlock(tp
);
11623 if (tg3_test_interrupt(tp
) != 0) {
11624 etest
->flags
|= ETH_TEST_FL_FAILED
;
11628 tg3_full_lock(tp
, 0);
11630 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11631 if (netif_running(dev
)) {
11632 tg3_flag_set(tp
, INIT_COMPLETE
);
11633 err2
= tg3_restart_hw(tp
, 1);
11635 tg3_netif_start(tp
);
11638 tg3_full_unlock(tp
);
11640 if (irq_sync
&& !err2
)
11643 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
11644 tg3_power_down(tp
);
11648 static int tg3_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
11650 struct mii_ioctl_data
*data
= if_mii(ifr
);
11651 struct tg3
*tp
= netdev_priv(dev
);
11654 if (tg3_flag(tp
, USE_PHYLIB
)) {
11655 struct phy_device
*phydev
;
11656 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
11658 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
11659 return phy_mii_ioctl(phydev
, ifr
, cmd
);
11664 data
->phy_id
= tp
->phy_addr
;
11667 case SIOCGMIIREG
: {
11670 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
11671 break; /* We have no PHY */
11673 if (!netif_running(dev
))
11676 spin_lock_bh(&tp
->lock
);
11677 err
= tg3_readphy(tp
, data
->reg_num
& 0x1f, &mii_regval
);
11678 spin_unlock_bh(&tp
->lock
);
11680 data
->val_out
= mii_regval
;
11686 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
11687 break; /* We have no PHY */
11689 if (!netif_running(dev
))
11692 spin_lock_bh(&tp
->lock
);
11693 err
= tg3_writephy(tp
, data
->reg_num
& 0x1f, data
->val_in
);
11694 spin_unlock_bh(&tp
->lock
);
11702 return -EOPNOTSUPP
;
11705 static int tg3_get_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*ec
)
11707 struct tg3
*tp
= netdev_priv(dev
);
11709 memcpy(ec
, &tp
->coal
, sizeof(*ec
));
11713 static int tg3_set_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*ec
)
11715 struct tg3
*tp
= netdev_priv(dev
);
11716 u32 max_rxcoal_tick_int
= 0, max_txcoal_tick_int
= 0;
11717 u32 max_stat_coal_ticks
= 0, min_stat_coal_ticks
= 0;
11719 if (!tg3_flag(tp
, 5705_PLUS
)) {
11720 max_rxcoal_tick_int
= MAX_RXCOAL_TICK_INT
;
11721 max_txcoal_tick_int
= MAX_TXCOAL_TICK_INT
;
11722 max_stat_coal_ticks
= MAX_STAT_COAL_TICKS
;
11723 min_stat_coal_ticks
= MIN_STAT_COAL_TICKS
;
11726 if ((ec
->rx_coalesce_usecs
> MAX_RXCOL_TICKS
) ||
11727 (ec
->tx_coalesce_usecs
> MAX_TXCOL_TICKS
) ||
11728 (ec
->rx_max_coalesced_frames
> MAX_RXMAX_FRAMES
) ||
11729 (ec
->tx_max_coalesced_frames
> MAX_TXMAX_FRAMES
) ||
11730 (ec
->rx_coalesce_usecs_irq
> max_rxcoal_tick_int
) ||
11731 (ec
->tx_coalesce_usecs_irq
> max_txcoal_tick_int
) ||
11732 (ec
->rx_max_coalesced_frames_irq
> MAX_RXCOAL_MAXF_INT
) ||
11733 (ec
->tx_max_coalesced_frames_irq
> MAX_TXCOAL_MAXF_INT
) ||
11734 (ec
->stats_block_coalesce_usecs
> max_stat_coal_ticks
) ||
11735 (ec
->stats_block_coalesce_usecs
< min_stat_coal_ticks
))
11738 /* No rx interrupts will be generated if both are zero */
11739 if ((ec
->rx_coalesce_usecs
== 0) &&
11740 (ec
->rx_max_coalesced_frames
== 0))
11743 /* No tx interrupts will be generated if both are zero */
11744 if ((ec
->tx_coalesce_usecs
== 0) &&
11745 (ec
->tx_max_coalesced_frames
== 0))
11748 /* Only copy relevant parameters, ignore all others. */
11749 tp
->coal
.rx_coalesce_usecs
= ec
->rx_coalesce_usecs
;
11750 tp
->coal
.tx_coalesce_usecs
= ec
->tx_coalesce_usecs
;
11751 tp
->coal
.rx_max_coalesced_frames
= ec
->rx_max_coalesced_frames
;
11752 tp
->coal
.tx_max_coalesced_frames
= ec
->tx_max_coalesced_frames
;
11753 tp
->coal
.rx_coalesce_usecs_irq
= ec
->rx_coalesce_usecs_irq
;
11754 tp
->coal
.tx_coalesce_usecs_irq
= ec
->tx_coalesce_usecs_irq
;
11755 tp
->coal
.rx_max_coalesced_frames_irq
= ec
->rx_max_coalesced_frames_irq
;
11756 tp
->coal
.tx_max_coalesced_frames_irq
= ec
->tx_max_coalesced_frames_irq
;
11757 tp
->coal
.stats_block_coalesce_usecs
= ec
->stats_block_coalesce_usecs
;
11759 if (netif_running(dev
)) {
11760 tg3_full_lock(tp
, 0);
11761 __tg3_set_coalesce(tp
, &tp
->coal
);
11762 tg3_full_unlock(tp
);
11767 static const struct ethtool_ops tg3_ethtool_ops
= {
11768 .get_settings
= tg3_get_settings
,
11769 .set_settings
= tg3_set_settings
,
11770 .get_drvinfo
= tg3_get_drvinfo
,
11771 .get_regs_len
= tg3_get_regs_len
,
11772 .get_regs
= tg3_get_regs
,
11773 .get_wol
= tg3_get_wol
,
11774 .set_wol
= tg3_set_wol
,
11775 .get_msglevel
= tg3_get_msglevel
,
11776 .set_msglevel
= tg3_set_msglevel
,
11777 .nway_reset
= tg3_nway_reset
,
11778 .get_link
= ethtool_op_get_link
,
11779 .get_eeprom_len
= tg3_get_eeprom_len
,
11780 .get_eeprom
= tg3_get_eeprom
,
11781 .set_eeprom
= tg3_set_eeprom
,
11782 .get_ringparam
= tg3_get_ringparam
,
11783 .set_ringparam
= tg3_set_ringparam
,
11784 .get_pauseparam
= tg3_get_pauseparam
,
11785 .set_pauseparam
= tg3_set_pauseparam
,
11786 .self_test
= tg3_self_test
,
11787 .get_strings
= tg3_get_strings
,
11788 .set_phys_id
= tg3_set_phys_id
,
11789 .get_ethtool_stats
= tg3_get_ethtool_stats
,
11790 .get_coalesce
= tg3_get_coalesce
,
11791 .set_coalesce
= tg3_set_coalesce
,
11792 .get_sset_count
= tg3_get_sset_count
,
11795 static void __devinit
tg3_get_eeprom_size(struct tg3
*tp
)
11797 u32 cursize
, val
, magic
;
11799 tp
->nvram_size
= EEPROM_CHIP_SIZE
;
11801 if (tg3_nvram_read(tp
, 0, &magic
) != 0)
11804 if ((magic
!= TG3_EEPROM_MAGIC
) &&
11805 ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) != TG3_EEPROM_MAGIC_FW
) &&
11806 ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) != TG3_EEPROM_MAGIC_HW
))
11810 * Size the chip by reading offsets at increasing powers of two.
11811 * When we encounter our validation signature, we know the addressing
11812 * has wrapped around, and thus have our chip size.
11816 while (cursize
< tp
->nvram_size
) {
11817 if (tg3_nvram_read(tp
, cursize
, &val
) != 0)
11826 tp
->nvram_size
= cursize
;
11829 static void __devinit
tg3_get_nvram_size(struct tg3
*tp
)
11833 if (tg3_flag(tp
, NO_NVRAM
) || tg3_nvram_read(tp
, 0, &val
) != 0)
11836 /* Selfboot format */
11837 if (val
!= TG3_EEPROM_MAGIC
) {
11838 tg3_get_eeprom_size(tp
);
11842 if (tg3_nvram_read(tp
, 0xf0, &val
) == 0) {
11844 /* This is confusing. We want to operate on the
11845 * 16-bit value at offset 0xf2. The tg3_nvram_read()
11846 * call will read from NVRAM and byteswap the data
11847 * according to the byteswapping settings for all
11848 * other register accesses. This ensures the data we
11849 * want will always reside in the lower 16-bits.
11850 * However, the data in NVRAM is in LE format, which
11851 * means the data from the NVRAM read will always be
11852 * opposite the endianness of the CPU. The 16-bit
11853 * byteswap then brings the data to CPU endianness.
11855 tp
->nvram_size
= swab16((u16
)(val
& 0x0000ffff)) * 1024;
11859 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
11862 static void __devinit
tg3_get_nvram_info(struct tg3
*tp
)
11866 nvcfg1
= tr32(NVRAM_CFG1
);
11867 if (nvcfg1
& NVRAM_CFG1_FLASHIF_ENAB
) {
11868 tg3_flag_set(tp
, FLASH
);
11870 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
11871 tw32(NVRAM_CFG1
, nvcfg1
);
11874 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
||
11875 tg3_flag(tp
, 5780_CLASS
)) {
11876 switch (nvcfg1
& NVRAM_CFG1_VENDOR_MASK
) {
11877 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED
:
11878 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
11879 tp
->nvram_pagesize
= ATMEL_AT45DB0X1B_PAGE_SIZE
;
11880 tg3_flag_set(tp
, NVRAM_BUFFERED
);
11882 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED
:
11883 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
11884 tp
->nvram_pagesize
= ATMEL_AT25F512_PAGE_SIZE
;
11886 case FLASH_VENDOR_ATMEL_EEPROM
:
11887 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
11888 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
11889 tg3_flag_set(tp
, NVRAM_BUFFERED
);
11891 case FLASH_VENDOR_ST
:
11892 tp
->nvram_jedecnum
= JEDEC_ST
;
11893 tp
->nvram_pagesize
= ST_M45PEX0_PAGE_SIZE
;
11894 tg3_flag_set(tp
, NVRAM_BUFFERED
);
11896 case FLASH_VENDOR_SAIFUN
:
11897 tp
->nvram_jedecnum
= JEDEC_SAIFUN
;
11898 tp
->nvram_pagesize
= SAIFUN_SA25F0XX_PAGE_SIZE
;
11900 case FLASH_VENDOR_SST_SMALL
:
11901 case FLASH_VENDOR_SST_LARGE
:
11902 tp
->nvram_jedecnum
= JEDEC_SST
;
11903 tp
->nvram_pagesize
= SST_25VF0X0_PAGE_SIZE
;
11907 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
11908 tp
->nvram_pagesize
= ATMEL_AT45DB0X1B_PAGE_SIZE
;
11909 tg3_flag_set(tp
, NVRAM_BUFFERED
);
11913 static void __devinit
tg3_nvram_get_pagesize(struct tg3
*tp
, u32 nvmcfg1
)
11915 switch (nvmcfg1
& NVRAM_CFG1_5752PAGE_SIZE_MASK
) {
11916 case FLASH_5752PAGE_SIZE_256
:
11917 tp
->nvram_pagesize
= 256;
11919 case FLASH_5752PAGE_SIZE_512
:
11920 tp
->nvram_pagesize
= 512;
11922 case FLASH_5752PAGE_SIZE_1K
:
11923 tp
->nvram_pagesize
= 1024;
11925 case FLASH_5752PAGE_SIZE_2K
:
11926 tp
->nvram_pagesize
= 2048;
11928 case FLASH_5752PAGE_SIZE_4K
:
11929 tp
->nvram_pagesize
= 4096;
11931 case FLASH_5752PAGE_SIZE_264
:
11932 tp
->nvram_pagesize
= 264;
11934 case FLASH_5752PAGE_SIZE_528
:
11935 tp
->nvram_pagesize
= 528;
11940 static void __devinit
tg3_get_5752_nvram_info(struct tg3
*tp
)
11944 nvcfg1
= tr32(NVRAM_CFG1
);
11946 /* NVRAM protection for TPM */
11947 if (nvcfg1
& (1 << 27))
11948 tg3_flag_set(tp
, PROTECTED_NVRAM
);
11950 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
11951 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ
:
11952 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ
:
11953 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
11954 tg3_flag_set(tp
, NVRAM_BUFFERED
);
11956 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
11957 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
11958 tg3_flag_set(tp
, NVRAM_BUFFERED
);
11959 tg3_flag_set(tp
, FLASH
);
11961 case FLASH_5752VENDOR_ST_M45PE10
:
11962 case FLASH_5752VENDOR_ST_M45PE20
:
11963 case FLASH_5752VENDOR_ST_M45PE40
:
11964 tp
->nvram_jedecnum
= JEDEC_ST
;
11965 tg3_flag_set(tp
, NVRAM_BUFFERED
);
11966 tg3_flag_set(tp
, FLASH
);
11970 if (tg3_flag(tp
, FLASH
)) {
11971 tg3_nvram_get_pagesize(tp
, nvcfg1
);
11973 /* For eeprom, set pagesize to maximum eeprom size */
11974 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
11976 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
11977 tw32(NVRAM_CFG1
, nvcfg1
);
11981 static void __devinit
tg3_get_5755_nvram_info(struct tg3
*tp
)
11983 u32 nvcfg1
, protect
= 0;
11985 nvcfg1
= tr32(NVRAM_CFG1
);
11987 /* NVRAM protection for TPM */
11988 if (nvcfg1
& (1 << 27)) {
11989 tg3_flag_set(tp
, PROTECTED_NVRAM
);
11993 nvcfg1
&= NVRAM_CFG1_5752VENDOR_MASK
;
11995 case FLASH_5755VENDOR_ATMEL_FLASH_1
:
11996 case FLASH_5755VENDOR_ATMEL_FLASH_2
:
11997 case FLASH_5755VENDOR_ATMEL_FLASH_3
:
11998 case FLASH_5755VENDOR_ATMEL_FLASH_5
:
11999 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12000 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12001 tg3_flag_set(tp
, FLASH
);
12002 tp
->nvram_pagesize
= 264;
12003 if (nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_1
||
12004 nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_5
)
12005 tp
->nvram_size
= (protect
? 0x3e200 :
12006 TG3_NVRAM_SIZE_512KB
);
12007 else if (nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_2
)
12008 tp
->nvram_size
= (protect
? 0x1f200 :
12009 TG3_NVRAM_SIZE_256KB
);
12011 tp
->nvram_size
= (protect
? 0x1f200 :
12012 TG3_NVRAM_SIZE_128KB
);
12014 case FLASH_5752VENDOR_ST_M45PE10
:
12015 case FLASH_5752VENDOR_ST_M45PE20
:
12016 case FLASH_5752VENDOR_ST_M45PE40
:
12017 tp
->nvram_jedecnum
= JEDEC_ST
;
12018 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12019 tg3_flag_set(tp
, FLASH
);
12020 tp
->nvram_pagesize
= 256;
12021 if (nvcfg1
== FLASH_5752VENDOR_ST_M45PE10
)
12022 tp
->nvram_size
= (protect
?
12023 TG3_NVRAM_SIZE_64KB
:
12024 TG3_NVRAM_SIZE_128KB
);
12025 else if (nvcfg1
== FLASH_5752VENDOR_ST_M45PE20
)
12026 tp
->nvram_size
= (protect
?
12027 TG3_NVRAM_SIZE_64KB
:
12028 TG3_NVRAM_SIZE_256KB
);
12030 tp
->nvram_size
= (protect
?
12031 TG3_NVRAM_SIZE_128KB
:
12032 TG3_NVRAM_SIZE_512KB
);
12037 static void __devinit
tg3_get_5787_nvram_info(struct tg3
*tp
)
12041 nvcfg1
= tr32(NVRAM_CFG1
);
12043 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
12044 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ
:
12045 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ
:
12046 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ
:
12047 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ
:
12048 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12049 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12050 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
12052 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
12053 tw32(NVRAM_CFG1
, nvcfg1
);
12055 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
12056 case FLASH_5755VENDOR_ATMEL_FLASH_1
:
12057 case FLASH_5755VENDOR_ATMEL_FLASH_2
:
12058 case FLASH_5755VENDOR_ATMEL_FLASH_3
:
12059 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12060 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12061 tg3_flag_set(tp
, FLASH
);
12062 tp
->nvram_pagesize
= 264;
12064 case FLASH_5752VENDOR_ST_M45PE10
:
12065 case FLASH_5752VENDOR_ST_M45PE20
:
12066 case FLASH_5752VENDOR_ST_M45PE40
:
12067 tp
->nvram_jedecnum
= JEDEC_ST
;
12068 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12069 tg3_flag_set(tp
, FLASH
);
12070 tp
->nvram_pagesize
= 256;
12075 static void __devinit
tg3_get_5761_nvram_info(struct tg3
*tp
)
12077 u32 nvcfg1
, protect
= 0;
12079 nvcfg1
= tr32(NVRAM_CFG1
);
12081 /* NVRAM protection for TPM */
12082 if (nvcfg1
& (1 << 27)) {
12083 tg3_flag_set(tp
, PROTECTED_NVRAM
);
12087 nvcfg1
&= NVRAM_CFG1_5752VENDOR_MASK
;
12089 case FLASH_5761VENDOR_ATMEL_ADB021D
:
12090 case FLASH_5761VENDOR_ATMEL_ADB041D
:
12091 case FLASH_5761VENDOR_ATMEL_ADB081D
:
12092 case FLASH_5761VENDOR_ATMEL_ADB161D
:
12093 case FLASH_5761VENDOR_ATMEL_MDB021D
:
12094 case FLASH_5761VENDOR_ATMEL_MDB041D
:
12095 case FLASH_5761VENDOR_ATMEL_MDB081D
:
12096 case FLASH_5761VENDOR_ATMEL_MDB161D
:
12097 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12098 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12099 tg3_flag_set(tp
, FLASH
);
12100 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
12101 tp
->nvram_pagesize
= 256;
12103 case FLASH_5761VENDOR_ST_A_M45PE20
:
12104 case FLASH_5761VENDOR_ST_A_M45PE40
:
12105 case FLASH_5761VENDOR_ST_A_M45PE80
:
12106 case FLASH_5761VENDOR_ST_A_M45PE16
:
12107 case FLASH_5761VENDOR_ST_M_M45PE20
:
12108 case FLASH_5761VENDOR_ST_M_M45PE40
:
12109 case FLASH_5761VENDOR_ST_M_M45PE80
:
12110 case FLASH_5761VENDOR_ST_M_M45PE16
:
12111 tp
->nvram_jedecnum
= JEDEC_ST
;
12112 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12113 tg3_flag_set(tp
, FLASH
);
12114 tp
->nvram_pagesize
= 256;
12119 tp
->nvram_size
= tr32(NVRAM_ADDR_LOCKOUT
);
12122 case FLASH_5761VENDOR_ATMEL_ADB161D
:
12123 case FLASH_5761VENDOR_ATMEL_MDB161D
:
12124 case FLASH_5761VENDOR_ST_A_M45PE16
:
12125 case FLASH_5761VENDOR_ST_M_M45PE16
:
12126 tp
->nvram_size
= TG3_NVRAM_SIZE_2MB
;
12128 case FLASH_5761VENDOR_ATMEL_ADB081D
:
12129 case FLASH_5761VENDOR_ATMEL_MDB081D
:
12130 case FLASH_5761VENDOR_ST_A_M45PE80
:
12131 case FLASH_5761VENDOR_ST_M_M45PE80
:
12132 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
12134 case FLASH_5761VENDOR_ATMEL_ADB041D
:
12135 case FLASH_5761VENDOR_ATMEL_MDB041D
:
12136 case FLASH_5761VENDOR_ST_A_M45PE40
:
12137 case FLASH_5761VENDOR_ST_M_M45PE40
:
12138 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
12140 case FLASH_5761VENDOR_ATMEL_ADB021D
:
12141 case FLASH_5761VENDOR_ATMEL_MDB021D
:
12142 case FLASH_5761VENDOR_ST_A_M45PE20
:
12143 case FLASH_5761VENDOR_ST_M_M45PE20
:
12144 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
12150 static void __devinit
tg3_get_5906_nvram_info(struct tg3
*tp
)
12152 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12153 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12154 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
12157 static void __devinit
tg3_get_57780_nvram_info(struct tg3
*tp
)
12161 nvcfg1
= tr32(NVRAM_CFG1
);
12163 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
12164 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ
:
12165 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ
:
12166 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12167 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12168 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
12170 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
12171 tw32(NVRAM_CFG1
, nvcfg1
);
12173 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
12174 case FLASH_57780VENDOR_ATMEL_AT45DB011D
:
12175 case FLASH_57780VENDOR_ATMEL_AT45DB011B
:
12176 case FLASH_57780VENDOR_ATMEL_AT45DB021D
:
12177 case FLASH_57780VENDOR_ATMEL_AT45DB021B
:
12178 case FLASH_57780VENDOR_ATMEL_AT45DB041D
:
12179 case FLASH_57780VENDOR_ATMEL_AT45DB041B
:
12180 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12181 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12182 tg3_flag_set(tp
, FLASH
);
12184 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
12185 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
12186 case FLASH_57780VENDOR_ATMEL_AT45DB011D
:
12187 case FLASH_57780VENDOR_ATMEL_AT45DB011B
:
12188 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
12190 case FLASH_57780VENDOR_ATMEL_AT45DB021D
:
12191 case FLASH_57780VENDOR_ATMEL_AT45DB021B
:
12192 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
12194 case FLASH_57780VENDOR_ATMEL_AT45DB041D
:
12195 case FLASH_57780VENDOR_ATMEL_AT45DB041B
:
12196 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
12200 case FLASH_5752VENDOR_ST_M45PE10
:
12201 case FLASH_5752VENDOR_ST_M45PE20
:
12202 case FLASH_5752VENDOR_ST_M45PE40
:
12203 tp
->nvram_jedecnum
= JEDEC_ST
;
12204 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12205 tg3_flag_set(tp
, FLASH
);
12207 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
12208 case FLASH_5752VENDOR_ST_M45PE10
:
12209 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
12211 case FLASH_5752VENDOR_ST_M45PE20
:
12212 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
12214 case FLASH_5752VENDOR_ST_M45PE40
:
12215 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
12220 tg3_flag_set(tp
, NO_NVRAM
);
12224 tg3_nvram_get_pagesize(tp
, nvcfg1
);
12225 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
12226 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
12230 static void __devinit
tg3_get_5717_nvram_info(struct tg3
*tp
)
12234 nvcfg1
= tr32(NVRAM_CFG1
);
12236 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
12237 case FLASH_5717VENDOR_ATMEL_EEPROM
:
12238 case FLASH_5717VENDOR_MICRO_EEPROM
:
12239 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12240 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12241 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
12243 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
12244 tw32(NVRAM_CFG1
, nvcfg1
);
12246 case FLASH_5717VENDOR_ATMEL_MDB011D
:
12247 case FLASH_5717VENDOR_ATMEL_ADB011B
:
12248 case FLASH_5717VENDOR_ATMEL_ADB011D
:
12249 case FLASH_5717VENDOR_ATMEL_MDB021D
:
12250 case FLASH_5717VENDOR_ATMEL_ADB021B
:
12251 case FLASH_5717VENDOR_ATMEL_ADB021D
:
12252 case FLASH_5717VENDOR_ATMEL_45USPT
:
12253 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12254 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12255 tg3_flag_set(tp
, FLASH
);
12257 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
12258 case FLASH_5717VENDOR_ATMEL_MDB021D
:
12259 /* Detect size with tg3_nvram_get_size() */
12261 case FLASH_5717VENDOR_ATMEL_ADB021B
:
12262 case FLASH_5717VENDOR_ATMEL_ADB021D
:
12263 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
12266 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
12270 case FLASH_5717VENDOR_ST_M_M25PE10
:
12271 case FLASH_5717VENDOR_ST_A_M25PE10
:
12272 case FLASH_5717VENDOR_ST_M_M45PE10
:
12273 case FLASH_5717VENDOR_ST_A_M45PE10
:
12274 case FLASH_5717VENDOR_ST_M_M25PE20
:
12275 case FLASH_5717VENDOR_ST_A_M25PE20
:
12276 case FLASH_5717VENDOR_ST_M_M45PE20
:
12277 case FLASH_5717VENDOR_ST_A_M45PE20
:
12278 case FLASH_5717VENDOR_ST_25USPT
:
12279 case FLASH_5717VENDOR_ST_45USPT
:
12280 tp
->nvram_jedecnum
= JEDEC_ST
;
12281 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12282 tg3_flag_set(tp
, FLASH
);
12284 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
12285 case FLASH_5717VENDOR_ST_M_M25PE20
:
12286 case FLASH_5717VENDOR_ST_M_M45PE20
:
12287 /* Detect size with tg3_nvram_get_size() */
12289 case FLASH_5717VENDOR_ST_A_M25PE20
:
12290 case FLASH_5717VENDOR_ST_A_M45PE20
:
12291 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
12294 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
12299 tg3_flag_set(tp
, NO_NVRAM
);
12303 tg3_nvram_get_pagesize(tp
, nvcfg1
);
12304 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
12305 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
12308 static void __devinit
tg3_get_5720_nvram_info(struct tg3
*tp
)
12310 u32 nvcfg1
, nvmpinstrp
;
12312 nvcfg1
= tr32(NVRAM_CFG1
);
12313 nvmpinstrp
= nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
;
12315 switch (nvmpinstrp
) {
12316 case FLASH_5720_EEPROM_HD
:
12317 case FLASH_5720_EEPROM_LD
:
12318 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12319 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12321 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
12322 tw32(NVRAM_CFG1
, nvcfg1
);
12323 if (nvmpinstrp
== FLASH_5720_EEPROM_HD
)
12324 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
12326 tp
->nvram_pagesize
= ATMEL_AT24C02_CHIP_SIZE
;
12328 case FLASH_5720VENDOR_M_ATMEL_DB011D
:
12329 case FLASH_5720VENDOR_A_ATMEL_DB011B
:
12330 case FLASH_5720VENDOR_A_ATMEL_DB011D
:
12331 case FLASH_5720VENDOR_M_ATMEL_DB021D
:
12332 case FLASH_5720VENDOR_A_ATMEL_DB021B
:
12333 case FLASH_5720VENDOR_A_ATMEL_DB021D
:
12334 case FLASH_5720VENDOR_M_ATMEL_DB041D
:
12335 case FLASH_5720VENDOR_A_ATMEL_DB041B
:
12336 case FLASH_5720VENDOR_A_ATMEL_DB041D
:
12337 case FLASH_5720VENDOR_M_ATMEL_DB081D
:
12338 case FLASH_5720VENDOR_A_ATMEL_DB081D
:
12339 case FLASH_5720VENDOR_ATMEL_45USPT
:
12340 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12341 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12342 tg3_flag_set(tp
, FLASH
);
12344 switch (nvmpinstrp
) {
12345 case FLASH_5720VENDOR_M_ATMEL_DB021D
:
12346 case FLASH_5720VENDOR_A_ATMEL_DB021B
:
12347 case FLASH_5720VENDOR_A_ATMEL_DB021D
:
12348 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
12350 case FLASH_5720VENDOR_M_ATMEL_DB041D
:
12351 case FLASH_5720VENDOR_A_ATMEL_DB041B
:
12352 case FLASH_5720VENDOR_A_ATMEL_DB041D
:
12353 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
12355 case FLASH_5720VENDOR_M_ATMEL_DB081D
:
12356 case FLASH_5720VENDOR_A_ATMEL_DB081D
:
12357 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
12360 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
12364 case FLASH_5720VENDOR_M_ST_M25PE10
:
12365 case FLASH_5720VENDOR_M_ST_M45PE10
:
12366 case FLASH_5720VENDOR_A_ST_M25PE10
:
12367 case FLASH_5720VENDOR_A_ST_M45PE10
:
12368 case FLASH_5720VENDOR_M_ST_M25PE20
:
12369 case FLASH_5720VENDOR_M_ST_M45PE20
:
12370 case FLASH_5720VENDOR_A_ST_M25PE20
:
12371 case FLASH_5720VENDOR_A_ST_M45PE20
:
12372 case FLASH_5720VENDOR_M_ST_M25PE40
:
12373 case FLASH_5720VENDOR_M_ST_M45PE40
:
12374 case FLASH_5720VENDOR_A_ST_M25PE40
:
12375 case FLASH_5720VENDOR_A_ST_M45PE40
:
12376 case FLASH_5720VENDOR_M_ST_M25PE80
:
12377 case FLASH_5720VENDOR_M_ST_M45PE80
:
12378 case FLASH_5720VENDOR_A_ST_M25PE80
:
12379 case FLASH_5720VENDOR_A_ST_M45PE80
:
12380 case FLASH_5720VENDOR_ST_25USPT
:
12381 case FLASH_5720VENDOR_ST_45USPT
:
12382 tp
->nvram_jedecnum
= JEDEC_ST
;
12383 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12384 tg3_flag_set(tp
, FLASH
);
12386 switch (nvmpinstrp
) {
12387 case FLASH_5720VENDOR_M_ST_M25PE20
:
12388 case FLASH_5720VENDOR_M_ST_M45PE20
:
12389 case FLASH_5720VENDOR_A_ST_M25PE20
:
12390 case FLASH_5720VENDOR_A_ST_M45PE20
:
12391 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
12393 case FLASH_5720VENDOR_M_ST_M25PE40
:
12394 case FLASH_5720VENDOR_M_ST_M45PE40
:
12395 case FLASH_5720VENDOR_A_ST_M25PE40
:
12396 case FLASH_5720VENDOR_A_ST_M45PE40
:
12397 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
12399 case FLASH_5720VENDOR_M_ST_M25PE80
:
12400 case FLASH_5720VENDOR_M_ST_M45PE80
:
12401 case FLASH_5720VENDOR_A_ST_M25PE80
:
12402 case FLASH_5720VENDOR_A_ST_M45PE80
:
12403 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
12406 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
12411 tg3_flag_set(tp
, NO_NVRAM
);
12415 tg3_nvram_get_pagesize(tp
, nvcfg1
);
12416 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
12417 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
12420 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12421 static void __devinit
tg3_nvram_init(struct tg3
*tp
)
12423 tw32_f(GRC_EEPROM_ADDR
,
12424 (EEPROM_ADDR_FSM_RESET
|
12425 (EEPROM_DEFAULT_CLOCK_PERIOD
<<
12426 EEPROM_ADDR_CLKPERD_SHIFT
)));
12430 /* Enable seeprom accesses. */
12431 tw32_f(GRC_LOCAL_CTRL
,
12432 tr32(GRC_LOCAL_CTRL
) | GRC_LCLCTRL_AUTO_SEEPROM
);
12435 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
12436 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
) {
12437 tg3_flag_set(tp
, NVRAM
);
12439 if (tg3_nvram_lock(tp
)) {
12440 netdev_warn(tp
->dev
,
12441 "Cannot get nvram lock, %s failed\n",
12445 tg3_enable_nvram_access(tp
);
12447 tp
->nvram_size
= 0;
12449 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
)
12450 tg3_get_5752_nvram_info(tp
);
12451 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
)
12452 tg3_get_5755_nvram_info(tp
);
12453 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
||
12454 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
12455 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
)
12456 tg3_get_5787_nvram_info(tp
);
12457 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
12458 tg3_get_5761_nvram_info(tp
);
12459 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
12460 tg3_get_5906_nvram_info(tp
);
12461 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
||
12462 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
12463 tg3_get_57780_nvram_info(tp
);
12464 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
12465 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
)
12466 tg3_get_5717_nvram_info(tp
);
12467 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
12468 tg3_get_5720_nvram_info(tp
);
12470 tg3_get_nvram_info(tp
);
12472 if (tp
->nvram_size
== 0)
12473 tg3_get_nvram_size(tp
);
12475 tg3_disable_nvram_access(tp
);
12476 tg3_nvram_unlock(tp
);
12479 tg3_flag_clear(tp
, NVRAM
);
12480 tg3_flag_clear(tp
, NVRAM_BUFFERED
);
12482 tg3_get_eeprom_size(tp
);
12486 static int tg3_nvram_write_block_using_eeprom(struct tg3
*tp
,
12487 u32 offset
, u32 len
, u8
*buf
)
12492 for (i
= 0; i
< len
; i
+= 4) {
12498 memcpy(&data
, buf
+ i
, 4);
12501 * The SEEPROM interface expects the data to always be opposite
12502 * the native endian format. We accomplish this by reversing
12503 * all the operations that would have been performed on the
12504 * data from a call to tg3_nvram_read_be32().
12506 tw32(GRC_EEPROM_DATA
, swab32(be32_to_cpu(data
)));
12508 val
= tr32(GRC_EEPROM_ADDR
);
12509 tw32(GRC_EEPROM_ADDR
, val
| EEPROM_ADDR_COMPLETE
);
12511 val
&= ~(EEPROM_ADDR_ADDR_MASK
| EEPROM_ADDR_DEVID_MASK
|
12513 tw32(GRC_EEPROM_ADDR
, val
|
12514 (0 << EEPROM_ADDR_DEVID_SHIFT
) |
12515 (addr
& EEPROM_ADDR_ADDR_MASK
) |
12516 EEPROM_ADDR_START
|
12517 EEPROM_ADDR_WRITE
);
12519 for (j
= 0; j
< 1000; j
++) {
12520 val
= tr32(GRC_EEPROM_ADDR
);
12522 if (val
& EEPROM_ADDR_COMPLETE
)
12526 if (!(val
& EEPROM_ADDR_COMPLETE
)) {
12535 /* offset and length are dword aligned */
12536 static int tg3_nvram_write_block_unbuffered(struct tg3
*tp
, u32 offset
, u32 len
,
12540 u32 pagesize
= tp
->nvram_pagesize
;
12541 u32 pagemask
= pagesize
- 1;
12545 tmp
= kmalloc(pagesize
, GFP_KERNEL
);
12551 u32 phy_addr
, page_off
, size
;
12553 phy_addr
= offset
& ~pagemask
;
12555 for (j
= 0; j
< pagesize
; j
+= 4) {
12556 ret
= tg3_nvram_read_be32(tp
, phy_addr
+ j
,
12557 (__be32
*) (tmp
+ j
));
12564 page_off
= offset
& pagemask
;
12571 memcpy(tmp
+ page_off
, buf
, size
);
12573 offset
= offset
+ (pagesize
- page_off
);
12575 tg3_enable_nvram_access(tp
);
12578 * Before we can erase the flash page, we need
12579 * to issue a special "write enable" command.
12581 nvram_cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
12583 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
12586 /* Erase the target page */
12587 tw32(NVRAM_ADDR
, phy_addr
);
12589 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
| NVRAM_CMD_WR
|
12590 NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
| NVRAM_CMD_ERASE
;
12592 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
12595 /* Issue another write enable to start the write. */
12596 nvram_cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
12598 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
12601 for (j
= 0; j
< pagesize
; j
+= 4) {
12604 data
= *((__be32
*) (tmp
+ j
));
12606 tw32(NVRAM_WRDATA
, be32_to_cpu(data
));
12608 tw32(NVRAM_ADDR
, phy_addr
+ j
);
12610 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
|
12614 nvram_cmd
|= NVRAM_CMD_FIRST
;
12615 else if (j
== (pagesize
- 4))
12616 nvram_cmd
|= NVRAM_CMD_LAST
;
12618 if ((ret
= tg3_nvram_exec_cmd(tp
, nvram_cmd
)))
12625 nvram_cmd
= NVRAM_CMD_WRDI
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
12626 tg3_nvram_exec_cmd(tp
, nvram_cmd
);
12633 /* offset and length are dword aligned */
12634 static int tg3_nvram_write_block_buffered(struct tg3
*tp
, u32 offset
, u32 len
,
12639 for (i
= 0; i
< len
; i
+= 4, offset
+= 4) {
12640 u32 page_off
, phy_addr
, nvram_cmd
;
12643 memcpy(&data
, buf
+ i
, 4);
12644 tw32(NVRAM_WRDATA
, be32_to_cpu(data
));
12646 page_off
= offset
% tp
->nvram_pagesize
;
12648 phy_addr
= tg3_nvram_phys_addr(tp
, offset
);
12650 tw32(NVRAM_ADDR
, phy_addr
);
12652 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
| NVRAM_CMD_WR
;
12654 if (page_off
== 0 || i
== 0)
12655 nvram_cmd
|= NVRAM_CMD_FIRST
;
12656 if (page_off
== (tp
->nvram_pagesize
- 4))
12657 nvram_cmd
|= NVRAM_CMD_LAST
;
12659 if (i
== (len
- 4))
12660 nvram_cmd
|= NVRAM_CMD_LAST
;
12662 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5752
&&
12663 !tg3_flag(tp
, 5755_PLUS
) &&
12664 (tp
->nvram_jedecnum
== JEDEC_ST
) &&
12665 (nvram_cmd
& NVRAM_CMD_FIRST
)) {
12667 if ((ret
= tg3_nvram_exec_cmd(tp
,
12668 NVRAM_CMD_WREN
| NVRAM_CMD_GO
|
12673 if (!tg3_flag(tp
, FLASH
)) {
12674 /* We always do complete word writes to eeprom. */
12675 nvram_cmd
|= (NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
);
12678 if ((ret
= tg3_nvram_exec_cmd(tp
, nvram_cmd
)))
12684 /* offset and length are dword aligned */
12685 static int tg3_nvram_write_block(struct tg3
*tp
, u32 offset
, u32 len
, u8
*buf
)
12689 if (tg3_flag(tp
, EEPROM_WRITE_PROT
)) {
12690 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
&
12691 ~GRC_LCLCTRL_GPIO_OUTPUT1
);
12695 if (!tg3_flag(tp
, NVRAM
)) {
12696 ret
= tg3_nvram_write_block_using_eeprom(tp
, offset
, len
, buf
);
12700 ret
= tg3_nvram_lock(tp
);
12704 tg3_enable_nvram_access(tp
);
12705 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
))
12706 tw32(NVRAM_WRITE1
, 0x406);
12708 grc_mode
= tr32(GRC_MODE
);
12709 tw32(GRC_MODE
, grc_mode
| GRC_MODE_NVRAM_WR_ENABLE
);
12711 if (tg3_flag(tp
, NVRAM_BUFFERED
) || !tg3_flag(tp
, FLASH
)) {
12712 ret
= tg3_nvram_write_block_buffered(tp
, offset
, len
,
12715 ret
= tg3_nvram_write_block_unbuffered(tp
, offset
, len
,
12719 grc_mode
= tr32(GRC_MODE
);
12720 tw32(GRC_MODE
, grc_mode
& ~GRC_MODE_NVRAM_WR_ENABLE
);
12722 tg3_disable_nvram_access(tp
);
12723 tg3_nvram_unlock(tp
);
12726 if (tg3_flag(tp
, EEPROM_WRITE_PROT
)) {
12727 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
12734 struct subsys_tbl_ent
{
12735 u16 subsys_vendor
, subsys_devid
;
12739 static struct subsys_tbl_ent subsys_id_to_phy_id
[] __devinitdata
= {
12740 /* Broadcom boards. */
12741 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12742 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6
, TG3_PHY_ID_BCM5401
},
12743 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12744 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5
, TG3_PHY_ID_BCM5701
},
12745 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12746 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6
, TG3_PHY_ID_BCM8002
},
12747 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12748 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9
, 0 },
12749 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12750 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1
, TG3_PHY_ID_BCM5701
},
12751 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12752 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8
, TG3_PHY_ID_BCM5701
},
12753 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12754 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7
, 0 },
12755 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12756 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10
, TG3_PHY_ID_BCM5701
},
12757 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12758 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12
, TG3_PHY_ID_BCM5701
},
12759 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12760 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1
, TG3_PHY_ID_BCM5703
},
12761 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12762 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2
, TG3_PHY_ID_BCM5703
},
12765 { TG3PCI_SUBVENDOR_ID_3COM
,
12766 TG3PCI_SUBDEVICE_ID_3COM_3C996T
, TG3_PHY_ID_BCM5401
},
12767 { TG3PCI_SUBVENDOR_ID_3COM
,
12768 TG3PCI_SUBDEVICE_ID_3COM_3C996BT
, TG3_PHY_ID_BCM5701
},
12769 { TG3PCI_SUBVENDOR_ID_3COM
,
12770 TG3PCI_SUBDEVICE_ID_3COM_3C996SX
, 0 },
12771 { TG3PCI_SUBVENDOR_ID_3COM
,
12772 TG3PCI_SUBDEVICE_ID_3COM_3C1000T
, TG3_PHY_ID_BCM5701
},
12773 { TG3PCI_SUBVENDOR_ID_3COM
,
12774 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01
, TG3_PHY_ID_BCM5701
},
12777 { TG3PCI_SUBVENDOR_ID_DELL
,
12778 TG3PCI_SUBDEVICE_ID_DELL_VIPER
, TG3_PHY_ID_BCM5401
},
12779 { TG3PCI_SUBVENDOR_ID_DELL
,
12780 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR
, TG3_PHY_ID_BCM5401
},
12781 { TG3PCI_SUBVENDOR_ID_DELL
,
12782 TG3PCI_SUBDEVICE_ID_DELL_MERLOT
, TG3_PHY_ID_BCM5411
},
12783 { TG3PCI_SUBVENDOR_ID_DELL
,
12784 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT
, TG3_PHY_ID_BCM5411
},
12786 /* Compaq boards. */
12787 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
12788 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE
, TG3_PHY_ID_BCM5701
},
12789 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
12790 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2
, TG3_PHY_ID_BCM5701
},
12791 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
12792 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING
, 0 },
12793 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
12794 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780
, TG3_PHY_ID_BCM5701
},
12795 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
12796 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2
, TG3_PHY_ID_BCM5701
},
12799 { TG3PCI_SUBVENDOR_ID_IBM
,
12800 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2
, 0 }
12803 static struct subsys_tbl_ent
* __devinit
tg3_lookup_by_subsys(struct tg3
*tp
)
12807 for (i
= 0; i
< ARRAY_SIZE(subsys_id_to_phy_id
); i
++) {
12808 if ((subsys_id_to_phy_id
[i
].subsys_vendor
==
12809 tp
->pdev
->subsystem_vendor
) &&
12810 (subsys_id_to_phy_id
[i
].subsys_devid
==
12811 tp
->pdev
->subsystem_device
))
12812 return &subsys_id_to_phy_id
[i
];
12817 static void __devinit
tg3_get_eeprom_hw_cfg(struct tg3
*tp
)
12821 tp
->phy_id
= TG3_PHY_ID_INVALID
;
12822 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
12824 /* Assume an onboard device and WOL capable by default. */
12825 tg3_flag_set(tp
, EEPROM_WRITE_PROT
);
12826 tg3_flag_set(tp
, WOL_CAP
);
12828 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
12829 if (!(tr32(PCIE_TRANSACTION_CFG
) & PCIE_TRANS_CFG_LOM
)) {
12830 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
12831 tg3_flag_set(tp
, IS_NIC
);
12833 val
= tr32(VCPU_CFGSHDW
);
12834 if (val
& VCPU_CFGSHDW_ASPM_DBNC
)
12835 tg3_flag_set(tp
, ASPM_WORKAROUND
);
12836 if ((val
& VCPU_CFGSHDW_WOL_ENABLE
) &&
12837 (val
& VCPU_CFGSHDW_WOL_MAGPKT
)) {
12838 tg3_flag_set(tp
, WOL_ENABLE
);
12839 device_set_wakeup_enable(&tp
->pdev
->dev
, true);
12844 tg3_read_mem(tp
, NIC_SRAM_DATA_SIG
, &val
);
12845 if (val
== NIC_SRAM_DATA_SIG_MAGIC
) {
12846 u32 nic_cfg
, led_cfg
;
12847 u32 nic_phy_id
, ver
, cfg2
= 0, cfg4
= 0, eeprom_phy_id
;
12848 int eeprom_phy_serdes
= 0;
12850 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG
, &nic_cfg
);
12851 tp
->nic_sram_data_cfg
= nic_cfg
;
12853 tg3_read_mem(tp
, NIC_SRAM_DATA_VER
, &ver
);
12854 ver
>>= NIC_SRAM_DATA_VER_SHIFT
;
12855 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
12856 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
&&
12857 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5703
&&
12858 (ver
> 0) && (ver
< 0x100))
12859 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_2
, &cfg2
);
12861 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
)
12862 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_4
, &cfg4
);
12864 if ((nic_cfg
& NIC_SRAM_DATA_CFG_PHY_TYPE_MASK
) ==
12865 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER
)
12866 eeprom_phy_serdes
= 1;
12868 tg3_read_mem(tp
, NIC_SRAM_DATA_PHY_ID
, &nic_phy_id
);
12869 if (nic_phy_id
!= 0) {
12870 u32 id1
= nic_phy_id
& NIC_SRAM_DATA_PHY_ID1_MASK
;
12871 u32 id2
= nic_phy_id
& NIC_SRAM_DATA_PHY_ID2_MASK
;
12873 eeprom_phy_id
= (id1
>> 16) << 10;
12874 eeprom_phy_id
|= (id2
& 0xfc00) << 16;
12875 eeprom_phy_id
|= (id2
& 0x03ff) << 0;
12879 tp
->phy_id
= eeprom_phy_id
;
12880 if (eeprom_phy_serdes
) {
12881 if (!tg3_flag(tp
, 5705_PLUS
))
12882 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
12884 tp
->phy_flags
|= TG3_PHYFLG_MII_SERDES
;
12887 if (tg3_flag(tp
, 5750_PLUS
))
12888 led_cfg
= cfg2
& (NIC_SRAM_DATA_CFG_LED_MODE_MASK
|
12889 SHASTA_EXT_LED_MODE_MASK
);
12891 led_cfg
= nic_cfg
& NIC_SRAM_DATA_CFG_LED_MODE_MASK
;
12895 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1
:
12896 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
12899 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2
:
12900 tp
->led_ctrl
= LED_CTRL_MODE_PHY_2
;
12903 case NIC_SRAM_DATA_CFG_LED_MODE_MAC
:
12904 tp
->led_ctrl
= LED_CTRL_MODE_MAC
;
12906 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
12907 * read on some older 5700/5701 bootcode.
12909 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
12911 GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
12913 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
12917 case SHASTA_EXT_LED_SHARED
:
12918 tp
->led_ctrl
= LED_CTRL_MODE_SHARED
;
12919 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A0
&&
12920 tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A1
)
12921 tp
->led_ctrl
|= (LED_CTRL_MODE_PHY_1
|
12922 LED_CTRL_MODE_PHY_2
);
12925 case SHASTA_EXT_LED_MAC
:
12926 tp
->led_ctrl
= LED_CTRL_MODE_SHASTA_MAC
;
12929 case SHASTA_EXT_LED_COMBO
:
12930 tp
->led_ctrl
= LED_CTRL_MODE_COMBO
;
12931 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A0
)
12932 tp
->led_ctrl
|= (LED_CTRL_MODE_PHY_1
|
12933 LED_CTRL_MODE_PHY_2
);
12938 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
12939 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) &&
12940 tp
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_DELL
)
12941 tp
->led_ctrl
= LED_CTRL_MODE_PHY_2
;
12943 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5784_AX
)
12944 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
12946 if (nic_cfg
& NIC_SRAM_DATA_CFG_EEPROM_WP
) {
12947 tg3_flag_set(tp
, EEPROM_WRITE_PROT
);
12948 if ((tp
->pdev
->subsystem_vendor
==
12949 PCI_VENDOR_ID_ARIMA
) &&
12950 (tp
->pdev
->subsystem_device
== 0x205a ||
12951 tp
->pdev
->subsystem_device
== 0x2063))
12952 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
12954 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
12955 tg3_flag_set(tp
, IS_NIC
);
12958 if (nic_cfg
& NIC_SRAM_DATA_CFG_ASF_ENABLE
) {
12959 tg3_flag_set(tp
, ENABLE_ASF
);
12960 if (tg3_flag(tp
, 5750_PLUS
))
12961 tg3_flag_set(tp
, ASF_NEW_HANDSHAKE
);
12964 if ((nic_cfg
& NIC_SRAM_DATA_CFG_APE_ENABLE
) &&
12965 tg3_flag(tp
, 5750_PLUS
))
12966 tg3_flag_set(tp
, ENABLE_APE
);
12968 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
&&
12969 !(nic_cfg
& NIC_SRAM_DATA_CFG_FIBER_WOL
))
12970 tg3_flag_clear(tp
, WOL_CAP
);
12972 if (tg3_flag(tp
, WOL_CAP
) &&
12973 (nic_cfg
& NIC_SRAM_DATA_CFG_WOL_ENABLE
)) {
12974 tg3_flag_set(tp
, WOL_ENABLE
);
12975 device_set_wakeup_enable(&tp
->pdev
->dev
, true);
12978 if (cfg2
& (1 << 17))
12979 tp
->phy_flags
|= TG3_PHYFLG_CAPACITIVE_COUPLING
;
12981 /* serdes signal pre-emphasis in register 0x590 set by */
12982 /* bootcode if bit 18 is set */
12983 if (cfg2
& (1 << 18))
12984 tp
->phy_flags
|= TG3_PHYFLG_SERDES_PREEMPHASIS
;
12986 if ((tg3_flag(tp
, 57765_PLUS
) ||
12987 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
&&
12988 GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5784_AX
)) &&
12989 (cfg2
& NIC_SRAM_DATA_CFG_2_APD_EN
))
12990 tp
->phy_flags
|= TG3_PHYFLG_ENABLE_APD
;
12992 if (tg3_flag(tp
, PCI_EXPRESS
) &&
12993 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5785
&&
12994 !tg3_flag(tp
, 57765_PLUS
)) {
12997 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_3
, &cfg3
);
12998 if (cfg3
& NIC_SRAM_ASPM_DEBOUNCE
)
12999 tg3_flag_set(tp
, ASPM_WORKAROUND
);
13002 if (cfg4
& NIC_SRAM_RGMII_INBAND_DISABLE
)
13003 tg3_flag_set(tp
, RGMII_INBAND_DISABLE
);
13004 if (cfg4
& NIC_SRAM_RGMII_EXT_IBND_RX_EN
)
13005 tg3_flag_set(tp
, RGMII_EXT_IBND_RX_EN
);
13006 if (cfg4
& NIC_SRAM_RGMII_EXT_IBND_TX_EN
)
13007 tg3_flag_set(tp
, RGMII_EXT_IBND_TX_EN
);
13010 if (tg3_flag(tp
, WOL_CAP
))
13011 device_set_wakeup_enable(&tp
->pdev
->dev
,
13012 tg3_flag(tp
, WOL_ENABLE
));
13014 device_set_wakeup_capable(&tp
->pdev
->dev
, false);
13017 static int __devinit
tg3_issue_otp_command(struct tg3
*tp
, u32 cmd
)
13022 tw32(OTP_CTRL
, cmd
| OTP_CTRL_OTP_CMD_START
);
13023 tw32(OTP_CTRL
, cmd
);
13025 /* Wait for up to 1 ms for command to execute. */
13026 for (i
= 0; i
< 100; i
++) {
13027 val
= tr32(OTP_STATUS
);
13028 if (val
& OTP_STATUS_CMD_DONE
)
13033 return (val
& OTP_STATUS_CMD_DONE
) ? 0 : -EBUSY
;
13036 /* Read the gphy configuration from the OTP region of the chip. The gphy
13037 * configuration is a 32-bit value that straddles the alignment boundary.
13038 * We do two 32-bit reads and then shift and merge the results.
13040 static u32 __devinit
tg3_read_otp_phycfg(struct tg3
*tp
)
13042 u32 bhalf_otp
, thalf_otp
;
13044 tw32(OTP_MODE
, OTP_MODE_OTP_THRU_GRC
);
13046 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_INIT
))
13049 tw32(OTP_ADDRESS
, OTP_ADDRESS_MAGIC1
);
13051 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_READ
))
13054 thalf_otp
= tr32(OTP_READ_DATA
);
13056 tw32(OTP_ADDRESS
, OTP_ADDRESS_MAGIC2
);
13058 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_READ
))
13061 bhalf_otp
= tr32(OTP_READ_DATA
);
13063 return ((thalf_otp
& 0x0000ffff) << 16) | (bhalf_otp
>> 16);
13066 static void __devinit
tg3_phy_init_link_config(struct tg3
*tp
)
13068 u32 adv
= ADVERTISED_Autoneg
|
13071 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
13072 adv
|= ADVERTISED_1000baseT_Half
|
13073 ADVERTISED_1000baseT_Full
;
13075 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
13076 adv
|= ADVERTISED_100baseT_Half
|
13077 ADVERTISED_100baseT_Full
|
13078 ADVERTISED_10baseT_Half
|
13079 ADVERTISED_10baseT_Full
|
13082 adv
|= ADVERTISED_FIBRE
;
13084 tp
->link_config
.advertising
= adv
;
13085 tp
->link_config
.speed
= SPEED_INVALID
;
13086 tp
->link_config
.duplex
= DUPLEX_INVALID
;
13087 tp
->link_config
.autoneg
= AUTONEG_ENABLE
;
13088 tp
->link_config
.active_speed
= SPEED_INVALID
;
13089 tp
->link_config
.active_duplex
= DUPLEX_INVALID
;
13090 tp
->link_config
.orig_speed
= SPEED_INVALID
;
13091 tp
->link_config
.orig_duplex
= DUPLEX_INVALID
;
13092 tp
->link_config
.orig_autoneg
= AUTONEG_INVALID
;
13095 static int __devinit
tg3_phy_probe(struct tg3
*tp
)
13097 u32 hw_phy_id_1
, hw_phy_id_2
;
13098 u32 hw_phy_id
, hw_phy_id_masked
;
13101 /* flow control autonegotiation is default behavior */
13102 tg3_flag_set(tp
, PAUSE_AUTONEG
);
13103 tp
->link_config
.flowctrl
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
13105 if (tg3_flag(tp
, USE_PHYLIB
))
13106 return tg3_phy_init(tp
);
13108 /* Reading the PHY ID register can conflict with ASF
13109 * firmware access to the PHY hardware.
13112 if (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
)) {
13113 hw_phy_id
= hw_phy_id_masked
= TG3_PHY_ID_INVALID
;
13115 /* Now read the physical PHY_ID from the chip and verify
13116 * that it is sane. If it doesn't look good, we fall back
13117 * to either the hard-coded table based PHY_ID and failing
13118 * that the value found in the eeprom area.
13120 err
|= tg3_readphy(tp
, MII_PHYSID1
, &hw_phy_id_1
);
13121 err
|= tg3_readphy(tp
, MII_PHYSID2
, &hw_phy_id_2
);
13123 hw_phy_id
= (hw_phy_id_1
& 0xffff) << 10;
13124 hw_phy_id
|= (hw_phy_id_2
& 0xfc00) << 16;
13125 hw_phy_id
|= (hw_phy_id_2
& 0x03ff) << 0;
13127 hw_phy_id_masked
= hw_phy_id
& TG3_PHY_ID_MASK
;
13130 if (!err
&& TG3_KNOWN_PHY_ID(hw_phy_id_masked
)) {
13131 tp
->phy_id
= hw_phy_id
;
13132 if (hw_phy_id_masked
== TG3_PHY_ID_BCM8002
)
13133 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
13135 tp
->phy_flags
&= ~TG3_PHYFLG_PHY_SERDES
;
13137 if (tp
->phy_id
!= TG3_PHY_ID_INVALID
) {
13138 /* Do nothing, phy ID already set up in
13139 * tg3_get_eeprom_hw_cfg().
13142 struct subsys_tbl_ent
*p
;
13144 /* No eeprom signature? Try the hardcoded
13145 * subsys device table.
13147 p
= tg3_lookup_by_subsys(tp
);
13151 tp
->phy_id
= p
->phy_id
;
13153 tp
->phy_id
== TG3_PHY_ID_BCM8002
)
13154 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
13158 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) &&
13159 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
13160 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
||
13161 (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
&&
13162 tp
->pci_chip_rev_id
!= CHIPREV_ID_5717_A0
) ||
13163 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
&&
13164 tp
->pci_chip_rev_id
!= CHIPREV_ID_57765_A0
)))
13165 tp
->phy_flags
|= TG3_PHYFLG_EEE_CAP
;
13167 tg3_phy_init_link_config(tp
);
13169 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) &&
13170 !tg3_flag(tp
, ENABLE_APE
) &&
13171 !tg3_flag(tp
, ENABLE_ASF
)) {
13174 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
13175 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
13176 (bmsr
& BMSR_LSTATUS
))
13177 goto skip_phy_reset
;
13179 err
= tg3_phy_reset(tp
);
13183 tg3_phy_set_wirespeed(tp
);
13185 mask
= (ADVERTISED_10baseT_Half
| ADVERTISED_10baseT_Full
|
13186 ADVERTISED_100baseT_Half
| ADVERTISED_100baseT_Full
|
13187 ADVERTISED_1000baseT_Half
| ADVERTISED_1000baseT_Full
);
13188 if (!tg3_copper_is_advertising_all(tp
, mask
)) {
13189 tg3_phy_autoneg_cfg(tp
, tp
->link_config
.advertising
,
13190 tp
->link_config
.flowctrl
);
13192 tg3_writephy(tp
, MII_BMCR
,
13193 BMCR_ANENABLE
| BMCR_ANRESTART
);
13198 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
13199 err
= tg3_init_5401phy_dsp(tp
);
13203 err
= tg3_init_5401phy_dsp(tp
);
13209 static void __devinit
tg3_read_vpd(struct tg3
*tp
)
13212 unsigned int block_end
, rosize
, len
;
13216 vpd_data
= (u8
*)tg3_vpd_readblock(tp
, &vpdlen
);
13220 i
= pci_vpd_find_tag(vpd_data
, 0, vpdlen
, PCI_VPD_LRDT_RO_DATA
);
13222 goto out_not_found
;
13224 rosize
= pci_vpd_lrdt_size(&vpd_data
[i
]);
13225 block_end
= i
+ PCI_VPD_LRDT_TAG_SIZE
+ rosize
;
13226 i
+= PCI_VPD_LRDT_TAG_SIZE
;
13228 if (block_end
> vpdlen
)
13229 goto out_not_found
;
13231 j
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
13232 PCI_VPD_RO_KEYWORD_MFR_ID
);
13234 len
= pci_vpd_info_field_size(&vpd_data
[j
]);
13236 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
13237 if (j
+ len
> block_end
|| len
!= 4 ||
13238 memcmp(&vpd_data
[j
], "1028", 4))
13241 j
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
13242 PCI_VPD_RO_KEYWORD_VENDOR0
);
13246 len
= pci_vpd_info_field_size(&vpd_data
[j
]);
13248 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
13249 if (j
+ len
> block_end
)
13252 memcpy(tp
->fw_ver
, &vpd_data
[j
], len
);
13253 strncat(tp
->fw_ver
, " bc ", vpdlen
- len
- 1);
13257 i
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
13258 PCI_VPD_RO_KEYWORD_PARTNO
);
13260 goto out_not_found
;
13262 len
= pci_vpd_info_field_size(&vpd_data
[i
]);
13264 i
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
13265 if (len
> TG3_BPN_SIZE
||
13266 (len
+ i
) > vpdlen
)
13267 goto out_not_found
;
13269 memcpy(tp
->board_part_number
, &vpd_data
[i
], len
);
13273 if (tp
->board_part_number
[0])
13277 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
) {
13278 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
)
13279 strcpy(tp
->board_part_number
, "BCM5717");
13280 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
)
13281 strcpy(tp
->board_part_number
, "BCM5718");
13284 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
) {
13285 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57780
)
13286 strcpy(tp
->board_part_number
, "BCM57780");
13287 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57760
)
13288 strcpy(tp
->board_part_number
, "BCM57760");
13289 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57790
)
13290 strcpy(tp
->board_part_number
, "BCM57790");
13291 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57788
)
13292 strcpy(tp
->board_part_number
, "BCM57788");
13295 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
) {
13296 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57761
)
13297 strcpy(tp
->board_part_number
, "BCM57761");
13298 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57765
)
13299 strcpy(tp
->board_part_number
, "BCM57765");
13300 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57781
)
13301 strcpy(tp
->board_part_number
, "BCM57781");
13302 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57785
)
13303 strcpy(tp
->board_part_number
, "BCM57785");
13304 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57791
)
13305 strcpy(tp
->board_part_number
, "BCM57791");
13306 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57795
)
13307 strcpy(tp
->board_part_number
, "BCM57795");
13310 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
13311 strcpy(tp
->board_part_number
, "BCM95906");
13314 strcpy(tp
->board_part_number
, "none");
13318 static int __devinit
tg3_fw_img_is_valid(struct tg3
*tp
, u32 offset
)
13322 if (tg3_nvram_read(tp
, offset
, &val
) ||
13323 (val
& 0xfc000000) != 0x0c000000 ||
13324 tg3_nvram_read(tp
, offset
+ 4, &val
) ||
13331 static void __devinit
tg3_read_bc_ver(struct tg3
*tp
)
13333 u32 val
, offset
, start
, ver_offset
;
13335 bool newver
= false;
13337 if (tg3_nvram_read(tp
, 0xc, &offset
) ||
13338 tg3_nvram_read(tp
, 0x4, &start
))
13341 offset
= tg3_nvram_logical_addr(tp
, offset
);
13343 if (tg3_nvram_read(tp
, offset
, &val
))
13346 if ((val
& 0xfc000000) == 0x0c000000) {
13347 if (tg3_nvram_read(tp
, offset
+ 4, &val
))
13354 dst_off
= strlen(tp
->fw_ver
);
13357 if (TG3_VER_SIZE
- dst_off
< 16 ||
13358 tg3_nvram_read(tp
, offset
+ 8, &ver_offset
))
13361 offset
= offset
+ ver_offset
- start
;
13362 for (i
= 0; i
< 16; i
+= 4) {
13364 if (tg3_nvram_read_be32(tp
, offset
+ i
, &v
))
13367 memcpy(tp
->fw_ver
+ dst_off
+ i
, &v
, sizeof(v
));
13372 if (tg3_nvram_read(tp
, TG3_NVM_PTREV_BCVER
, &ver_offset
))
13375 major
= (ver_offset
& TG3_NVM_BCVER_MAJMSK
) >>
13376 TG3_NVM_BCVER_MAJSFT
;
13377 minor
= ver_offset
& TG3_NVM_BCVER_MINMSK
;
13378 snprintf(&tp
->fw_ver
[dst_off
], TG3_VER_SIZE
- dst_off
,
13379 "v%d.%02d", major
, minor
);
13383 static void __devinit
tg3_read_hwsb_ver(struct tg3
*tp
)
13385 u32 val
, major
, minor
;
13387 /* Use native endian representation */
13388 if (tg3_nvram_read(tp
, TG3_NVM_HWSB_CFG1
, &val
))
13391 major
= (val
& TG3_NVM_HWSB_CFG1_MAJMSK
) >>
13392 TG3_NVM_HWSB_CFG1_MAJSFT
;
13393 minor
= (val
& TG3_NVM_HWSB_CFG1_MINMSK
) >>
13394 TG3_NVM_HWSB_CFG1_MINSFT
;
13396 snprintf(&tp
->fw_ver
[0], 32, "sb v%d.%02d", major
, minor
);
13399 static void __devinit
tg3_read_sb_ver(struct tg3
*tp
, u32 val
)
13401 u32 offset
, major
, minor
, build
;
13403 strncat(tp
->fw_ver
, "sb", TG3_VER_SIZE
- strlen(tp
->fw_ver
) - 1);
13405 if ((val
& TG3_EEPROM_SB_FORMAT_MASK
) != TG3_EEPROM_SB_FORMAT_1
)
13408 switch (val
& TG3_EEPROM_SB_REVISION_MASK
) {
13409 case TG3_EEPROM_SB_REVISION_0
:
13410 offset
= TG3_EEPROM_SB_F1R0_EDH_OFF
;
13412 case TG3_EEPROM_SB_REVISION_2
:
13413 offset
= TG3_EEPROM_SB_F1R2_EDH_OFF
;
13415 case TG3_EEPROM_SB_REVISION_3
:
13416 offset
= TG3_EEPROM_SB_F1R3_EDH_OFF
;
13418 case TG3_EEPROM_SB_REVISION_4
:
13419 offset
= TG3_EEPROM_SB_F1R4_EDH_OFF
;
13421 case TG3_EEPROM_SB_REVISION_5
:
13422 offset
= TG3_EEPROM_SB_F1R5_EDH_OFF
;
13424 case TG3_EEPROM_SB_REVISION_6
:
13425 offset
= TG3_EEPROM_SB_F1R6_EDH_OFF
;
13431 if (tg3_nvram_read(tp
, offset
, &val
))
13434 build
= (val
& TG3_EEPROM_SB_EDH_BLD_MASK
) >>
13435 TG3_EEPROM_SB_EDH_BLD_SHFT
;
13436 major
= (val
& TG3_EEPROM_SB_EDH_MAJ_MASK
) >>
13437 TG3_EEPROM_SB_EDH_MAJ_SHFT
;
13438 minor
= val
& TG3_EEPROM_SB_EDH_MIN_MASK
;
13440 if (minor
> 99 || build
> 26)
13443 offset
= strlen(tp
->fw_ver
);
13444 snprintf(&tp
->fw_ver
[offset
], TG3_VER_SIZE
- offset
,
13445 " v%d.%02d", major
, minor
);
13448 offset
= strlen(tp
->fw_ver
);
13449 if (offset
< TG3_VER_SIZE
- 1)
13450 tp
->fw_ver
[offset
] = 'a' + build
- 1;
13454 static void __devinit
tg3_read_mgmtfw_ver(struct tg3
*tp
)
13456 u32 val
, offset
, start
;
13459 for (offset
= TG3_NVM_DIR_START
;
13460 offset
< TG3_NVM_DIR_END
;
13461 offset
+= TG3_NVM_DIRENT_SIZE
) {
13462 if (tg3_nvram_read(tp
, offset
, &val
))
13465 if ((val
>> TG3_NVM_DIRTYPE_SHIFT
) == TG3_NVM_DIRTYPE_ASFINI
)
13469 if (offset
== TG3_NVM_DIR_END
)
13472 if (!tg3_flag(tp
, 5705_PLUS
))
13473 start
= 0x08000000;
13474 else if (tg3_nvram_read(tp
, offset
- 4, &start
))
13477 if (tg3_nvram_read(tp
, offset
+ 4, &offset
) ||
13478 !tg3_fw_img_is_valid(tp
, offset
) ||
13479 tg3_nvram_read(tp
, offset
+ 8, &val
))
13482 offset
+= val
- start
;
13484 vlen
= strlen(tp
->fw_ver
);
13486 tp
->fw_ver
[vlen
++] = ',';
13487 tp
->fw_ver
[vlen
++] = ' ';
13489 for (i
= 0; i
< 4; i
++) {
13491 if (tg3_nvram_read_be32(tp
, offset
, &v
))
13494 offset
+= sizeof(v
);
13496 if (vlen
> TG3_VER_SIZE
- sizeof(v
)) {
13497 memcpy(&tp
->fw_ver
[vlen
], &v
, TG3_VER_SIZE
- vlen
);
13501 memcpy(&tp
->fw_ver
[vlen
], &v
, sizeof(v
));
13506 static void __devinit
tg3_read_dash_ver(struct tg3
*tp
)
13512 if (!tg3_flag(tp
, ENABLE_APE
) || !tg3_flag(tp
, ENABLE_ASF
))
13515 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
13516 if (apedata
!= APE_SEG_SIG_MAGIC
)
13519 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
13520 if (!(apedata
& APE_FW_STATUS_READY
))
13523 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_VERSION
);
13525 if (tg3_ape_read32(tp
, TG3_APE_FW_FEATURES
) & TG3_APE_FW_FEATURE_NCSI
) {
13526 tg3_flag_set(tp
, APE_HAS_NCSI
);
13532 vlen
= strlen(tp
->fw_ver
);
13534 snprintf(&tp
->fw_ver
[vlen
], TG3_VER_SIZE
- vlen
, " %s v%d.%d.%d.%d",
13536 (apedata
& APE_FW_VERSION_MAJMSK
) >> APE_FW_VERSION_MAJSFT
,
13537 (apedata
& APE_FW_VERSION_MINMSK
) >> APE_FW_VERSION_MINSFT
,
13538 (apedata
& APE_FW_VERSION_REVMSK
) >> APE_FW_VERSION_REVSFT
,
13539 (apedata
& APE_FW_VERSION_BLDMSK
));
13542 static void __devinit
tg3_read_fw_ver(struct tg3
*tp
)
13545 bool vpd_vers
= false;
13547 if (tp
->fw_ver
[0] != 0)
13550 if (tg3_flag(tp
, NO_NVRAM
)) {
13551 strcat(tp
->fw_ver
, "sb");
13555 if (tg3_nvram_read(tp
, 0, &val
))
13558 if (val
== TG3_EEPROM_MAGIC
)
13559 tg3_read_bc_ver(tp
);
13560 else if ((val
& TG3_EEPROM_MAGIC_FW_MSK
) == TG3_EEPROM_MAGIC_FW
)
13561 tg3_read_sb_ver(tp
, val
);
13562 else if ((val
& TG3_EEPROM_MAGIC_HW_MSK
) == TG3_EEPROM_MAGIC_HW
)
13563 tg3_read_hwsb_ver(tp
);
13570 if (tg3_flag(tp
, ENABLE_APE
)) {
13571 if (tg3_flag(tp
, ENABLE_ASF
))
13572 tg3_read_dash_ver(tp
);
13573 } else if (tg3_flag(tp
, ENABLE_ASF
)) {
13574 tg3_read_mgmtfw_ver(tp
);
13578 tp
->fw_ver
[TG3_VER_SIZE
- 1] = 0;
13581 static struct pci_dev
* __devinit
tg3_find_peer(struct tg3
*);
13583 static inline u32
tg3_rx_ret_ring_size(struct tg3
*tp
)
13585 if (tg3_flag(tp
, LRG_PROD_RING_CAP
))
13586 return TG3_RX_RET_MAX_SIZE_5717
;
13587 else if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
))
13588 return TG3_RX_RET_MAX_SIZE_5700
;
13590 return TG3_RX_RET_MAX_SIZE_5705
;
13593 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets
) = {
13594 { PCI_DEVICE(PCI_VENDOR_ID_AMD
, PCI_DEVICE_ID_AMD_FE_GATE_700C
) },
13595 { PCI_DEVICE(PCI_VENDOR_ID_AMD
, PCI_DEVICE_ID_AMD_8131_BRIDGE
) },
13596 { PCI_DEVICE(PCI_VENDOR_ID_VIA
, PCI_DEVICE_ID_VIA_8385_0
) },
13600 static int __devinit
tg3_get_invariants(struct tg3
*tp
)
13603 u32 pci_state_reg
, grc_misc_cfg
;
13608 /* Force memory write invalidate off. If we leave it on,
13609 * then on 5700_BX chips we have to enable a workaround.
13610 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13611 * to match the cacheline size. The Broadcom driver have this
13612 * workaround but turns MWI off all the times so never uses
13613 * it. This seems to suggest that the workaround is insufficient.
13615 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
13616 pci_cmd
&= ~PCI_COMMAND_INVALIDATE
;
13617 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
13619 /* Important! -- Make sure register accesses are byteswapped
13620 * correctly. Also, for those chips that require it, make
13621 * sure that indirect register accesses are enabled before
13622 * the first operation.
13624 pci_read_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
13626 tp
->misc_host_ctrl
|= (misc_ctrl_reg
&
13627 MISC_HOST_CTRL_CHIPREV
);
13628 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
13629 tp
->misc_host_ctrl
);
13631 tp
->pci_chip_rev_id
= (misc_ctrl_reg
>>
13632 MISC_HOST_CTRL_CHIPREV_SHIFT
);
13633 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_USE_PROD_ID_REG
) {
13634 u32 prod_id_asic_rev
;
13636 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
||
13637 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
||
13638 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5719
||
13639 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5720
)
13640 pci_read_config_dword(tp
->pdev
,
13641 TG3PCI_GEN2_PRODID_ASICREV
,
13642 &prod_id_asic_rev
);
13643 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57781
||
13644 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57785
||
13645 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57761
||
13646 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57765
||
13647 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57791
||
13648 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57795
)
13649 pci_read_config_dword(tp
->pdev
,
13650 TG3PCI_GEN15_PRODID_ASICREV
,
13651 &prod_id_asic_rev
);
13653 pci_read_config_dword(tp
->pdev
, TG3PCI_PRODID_ASICREV
,
13654 &prod_id_asic_rev
);
13656 tp
->pci_chip_rev_id
= prod_id_asic_rev
;
13659 /* Wrong chip ID in 5752 A0. This code can be removed later
13660 * as A0 is not in production.
13662 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5752_A0_HW
)
13663 tp
->pci_chip_rev_id
= CHIPREV_ID_5752_A0
;
13665 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13666 * we need to disable memory and use config. cycles
13667 * only to access all registers. The 5702/03 chips
13668 * can mistakenly decode the special cycles from the
13669 * ICH chipsets as memory write cycles, causing corruption
13670 * of register and memory space. Only certain ICH bridges
13671 * will drive special cycles with non-zero data during the
13672 * address phase which can fall within the 5703's address
13673 * range. This is not an ICH bug as the PCI spec allows
13674 * non-zero address during special cycles. However, only
13675 * these ICH bridges are known to drive non-zero addresses
13676 * during special cycles.
13678 * Since special cycles do not cross PCI bridges, we only
13679 * enable this workaround if the 5703 is on the secondary
13680 * bus of these ICH bridges.
13682 if ((tp
->pci_chip_rev_id
== CHIPREV_ID_5703_A1
) ||
13683 (tp
->pci_chip_rev_id
== CHIPREV_ID_5703_A2
)) {
13684 static struct tg3_dev_id
{
13688 } ich_chipsets
[] = {
13689 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801AA_8
,
13691 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801AB_8
,
13693 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801BA_11
,
13695 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801BA_6
,
13699 struct tg3_dev_id
*pci_id
= &ich_chipsets
[0];
13700 struct pci_dev
*bridge
= NULL
;
13702 while (pci_id
->vendor
!= 0) {
13703 bridge
= pci_get_device(pci_id
->vendor
, pci_id
->device
,
13709 if (pci_id
->rev
!= PCI_ANY_ID
) {
13710 if (bridge
->revision
> pci_id
->rev
)
13713 if (bridge
->subordinate
&&
13714 (bridge
->subordinate
->number
==
13715 tp
->pdev
->bus
->number
)) {
13716 tg3_flag_set(tp
, ICH_WORKAROUND
);
13717 pci_dev_put(bridge
);
13723 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
13724 static struct tg3_dev_id
{
13727 } bridge_chipsets
[] = {
13728 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_PXH_0
},
13729 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_PXH_1
},
13732 struct tg3_dev_id
*pci_id
= &bridge_chipsets
[0];
13733 struct pci_dev
*bridge
= NULL
;
13735 while (pci_id
->vendor
!= 0) {
13736 bridge
= pci_get_device(pci_id
->vendor
,
13743 if (bridge
->subordinate
&&
13744 (bridge
->subordinate
->number
<=
13745 tp
->pdev
->bus
->number
) &&
13746 (bridge
->subordinate
->subordinate
>=
13747 tp
->pdev
->bus
->number
)) {
13748 tg3_flag_set(tp
, 5701_DMA_BUG
);
13749 pci_dev_put(bridge
);
13755 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
13756 * DMA addresses > 40-bit. This bridge may have other additional
13757 * 57xx devices behind it in some 4-port NIC designs for example.
13758 * Any tg3 device found behind the bridge will also need the 40-bit
13761 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5780
||
13762 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
) {
13763 tg3_flag_set(tp
, 5780_CLASS
);
13764 tg3_flag_set(tp
, 40BIT_DMA_BUG
);
13765 tp
->msi_cap
= pci_find_capability(tp
->pdev
, PCI_CAP_ID_MSI
);
13767 struct pci_dev
*bridge
= NULL
;
13770 bridge
= pci_get_device(PCI_VENDOR_ID_SERVERWORKS
,
13771 PCI_DEVICE_ID_SERVERWORKS_EPB
,
13773 if (bridge
&& bridge
->subordinate
&&
13774 (bridge
->subordinate
->number
<=
13775 tp
->pdev
->bus
->number
) &&
13776 (bridge
->subordinate
->subordinate
>=
13777 tp
->pdev
->bus
->number
)) {
13778 tg3_flag_set(tp
, 40BIT_DMA_BUG
);
13779 pci_dev_put(bridge
);
13785 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
13786 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
)
13787 tp
->pdev_peer
= tg3_find_peer(tp
);
13789 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
13790 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
13791 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
13792 tg3_flag_set(tp
, 5717_PLUS
);
13794 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
||
13795 tg3_flag(tp
, 5717_PLUS
))
13796 tg3_flag_set(tp
, 57765_PLUS
);
13798 /* Intentionally exclude ASIC_REV_5906 */
13799 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
13800 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
||
13801 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
13802 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
||
13803 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
13804 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
||
13805 tg3_flag(tp
, 57765_PLUS
))
13806 tg3_flag_set(tp
, 5755_PLUS
);
13808 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
||
13809 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
||
13810 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
||
13811 tg3_flag(tp
, 5755_PLUS
) ||
13812 tg3_flag(tp
, 5780_CLASS
))
13813 tg3_flag_set(tp
, 5750_PLUS
);
13815 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
||
13816 tg3_flag(tp
, 5750_PLUS
))
13817 tg3_flag_set(tp
, 5705_PLUS
);
13819 /* Determine TSO capabilities */
13820 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
)
13821 ; /* Do nothing. HW bug. */
13822 else if (tg3_flag(tp
, 57765_PLUS
))
13823 tg3_flag_set(tp
, HW_TSO_3
);
13824 else if (tg3_flag(tp
, 5755_PLUS
) ||
13825 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
13826 tg3_flag_set(tp
, HW_TSO_2
);
13827 else if (tg3_flag(tp
, 5750_PLUS
)) {
13828 tg3_flag_set(tp
, HW_TSO_1
);
13829 tg3_flag_set(tp
, TSO_BUG
);
13830 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
&&
13831 tp
->pci_chip_rev_id
>= CHIPREV_ID_5750_C2
)
13832 tg3_flag_clear(tp
, TSO_BUG
);
13833 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
13834 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
&&
13835 tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A0
) {
13836 tg3_flag_set(tp
, TSO_BUG
);
13837 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
)
13838 tp
->fw_needed
= FIRMWARE_TG3TSO5
;
13840 tp
->fw_needed
= FIRMWARE_TG3TSO
;
13843 /* Selectively allow TSO based on operating conditions */
13844 if (tg3_flag(tp
, HW_TSO_1
) ||
13845 tg3_flag(tp
, HW_TSO_2
) ||
13846 tg3_flag(tp
, HW_TSO_3
) ||
13847 (tp
->fw_needed
&& !tg3_flag(tp
, ENABLE_ASF
)))
13848 tg3_flag_set(tp
, TSO_CAPABLE
);
13850 tg3_flag_clear(tp
, TSO_CAPABLE
);
13851 tg3_flag_clear(tp
, TSO_BUG
);
13852 tp
->fw_needed
= NULL
;
13855 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
)
13856 tp
->fw_needed
= FIRMWARE_TG3
;
13860 if (tg3_flag(tp
, 5750_PLUS
)) {
13861 tg3_flag_set(tp
, SUPPORT_MSI
);
13862 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5750_AX
||
13863 GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5750_BX
||
13864 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
&&
13865 tp
->pci_chip_rev_id
<= CHIPREV_ID_5714_A2
&&
13866 tp
->pdev_peer
== tp
->pdev
))
13867 tg3_flag_clear(tp
, SUPPORT_MSI
);
13869 if (tg3_flag(tp
, 5755_PLUS
) ||
13870 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
13871 tg3_flag_set(tp
, 1SHOT_MSI
);
13874 if (tg3_flag(tp
, 57765_PLUS
)) {
13875 tg3_flag_set(tp
, SUPPORT_MSIX
);
13876 tp
->irq_max
= TG3_IRQ_MAX_VECS
;
13880 if (tg3_flag(tp
, 5755_PLUS
))
13881 tg3_flag_set(tp
, SHORT_DMA_BUG
);
13883 if (tg3_flag(tp
, 5717_PLUS
))
13884 tg3_flag_set(tp
, LRG_PROD_RING_CAP
);
13886 if (tg3_flag(tp
, 57765_PLUS
) &&
13887 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5719
)
13888 tg3_flag_set(tp
, USE_JUMBO_BDFLAG
);
13890 if (!tg3_flag(tp
, 5705_PLUS
) ||
13891 tg3_flag(tp
, 5780_CLASS
) ||
13892 tg3_flag(tp
, USE_JUMBO_BDFLAG
))
13893 tg3_flag_set(tp
, JUMBO_CAPABLE
);
13895 pci_read_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
13898 if (pci_is_pcie(tp
->pdev
)) {
13901 tg3_flag_set(tp
, PCI_EXPRESS
);
13903 tp
->pcie_readrq
= 4096;
13904 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
13905 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
13906 tp
->pcie_readrq
= 2048;
13908 pcie_set_readrq(tp
->pdev
, tp
->pcie_readrq
);
13910 pci_read_config_word(tp
->pdev
,
13911 pci_pcie_cap(tp
->pdev
) + PCI_EXP_LNKCTL
,
13913 if (lnkctl
& PCI_EXP_LNKCTL_CLKREQ_EN
) {
13914 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
13916 tg3_flag_clear(tp
, HW_TSO_2
);
13917 tg3_flag_clear(tp
, TSO_CAPABLE
);
13919 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
13920 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
||
13921 tp
->pci_chip_rev_id
== CHIPREV_ID_57780_A0
||
13922 tp
->pci_chip_rev_id
== CHIPREV_ID_57780_A1
)
13923 tg3_flag_set(tp
, CLKREQ_BUG
);
13924 } else if (tp
->pci_chip_rev_id
== CHIPREV_ID_5717_A0
) {
13925 tg3_flag_set(tp
, L1PLLPD_EN
);
13927 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
) {
13928 /* BCM5785 devices are effectively PCIe devices, and should
13929 * follow PCIe codepaths, but do not have a PCIe capabilities
13932 tg3_flag_set(tp
, PCI_EXPRESS
);
13933 } else if (!tg3_flag(tp
, 5705_PLUS
) ||
13934 tg3_flag(tp
, 5780_CLASS
)) {
13935 tp
->pcix_cap
= pci_find_capability(tp
->pdev
, PCI_CAP_ID_PCIX
);
13936 if (!tp
->pcix_cap
) {
13937 dev_err(&tp
->pdev
->dev
,
13938 "Cannot find PCI-X capability, aborting\n");
13942 if (!(pci_state_reg
& PCISTATE_CONV_PCI_MODE
))
13943 tg3_flag_set(tp
, PCIX_MODE
);
13946 /* If we have an AMD 762 or VIA K8T800 chipset, write
13947 * reordering to the mailbox registers done by the host
13948 * controller can cause major troubles. We read back from
13949 * every mailbox register write to force the writes to be
13950 * posted to the chip in order.
13952 if (pci_dev_present(tg3_write_reorder_chipsets
) &&
13953 !tg3_flag(tp
, PCI_EXPRESS
))
13954 tg3_flag_set(tp
, MBOX_WRITE_REORDER
);
13956 pci_read_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
,
13957 &tp
->pci_cacheline_sz
);
13958 pci_read_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
13959 &tp
->pci_lat_timer
);
13960 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
&&
13961 tp
->pci_lat_timer
< 64) {
13962 tp
->pci_lat_timer
= 64;
13963 pci_write_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
13964 tp
->pci_lat_timer
);
13967 /* Important! -- It is critical that the PCI-X hw workaround
13968 * situation is decided before the first MMIO register access.
13970 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5700_BX
) {
13971 /* 5700 BX chips need to have their TX producer index
13972 * mailboxes written twice to workaround a bug.
13974 tg3_flag_set(tp
, TXD_MBOX_HWBUG
);
13976 /* If we are in PCI-X mode, enable register write workaround.
13978 * The workaround is to use indirect register accesses
13979 * for all chip writes not to mailbox registers.
13981 if (tg3_flag(tp
, PCIX_MODE
)) {
13984 tg3_flag_set(tp
, PCIX_TARGET_HWBUG
);
13986 /* The chip can have it's power management PCI config
13987 * space registers clobbered due to this bug.
13988 * So explicitly force the chip into D0 here.
13990 pci_read_config_dword(tp
->pdev
,
13991 tp
->pm_cap
+ PCI_PM_CTRL
,
13993 pm_reg
&= ~PCI_PM_CTRL_STATE_MASK
;
13994 pm_reg
|= PCI_PM_CTRL_PME_ENABLE
| 0 /* D0 */;
13995 pci_write_config_dword(tp
->pdev
,
13996 tp
->pm_cap
+ PCI_PM_CTRL
,
13999 /* Also, force SERR#/PERR# in PCI command. */
14000 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
14001 pci_cmd
|= PCI_COMMAND_PARITY
| PCI_COMMAND_SERR
;
14002 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
14006 if ((pci_state_reg
& PCISTATE_BUS_SPEED_HIGH
) != 0)
14007 tg3_flag_set(tp
, PCI_HIGH_SPEED
);
14008 if ((pci_state_reg
& PCISTATE_BUS_32BIT
) != 0)
14009 tg3_flag_set(tp
, PCI_32BIT
);
14011 /* Chip-specific fixup from Broadcom driver */
14012 if ((tp
->pci_chip_rev_id
== CHIPREV_ID_5704_A0
) &&
14013 (!(pci_state_reg
& PCISTATE_RETRY_SAME_DMA
))) {
14014 pci_state_reg
|= PCISTATE_RETRY_SAME_DMA
;
14015 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
, pci_state_reg
);
14018 /* Default fast path register access methods */
14019 tp
->read32
= tg3_read32
;
14020 tp
->write32
= tg3_write32
;
14021 tp
->read32_mbox
= tg3_read32
;
14022 tp
->write32_mbox
= tg3_write32
;
14023 tp
->write32_tx_mbox
= tg3_write32
;
14024 tp
->write32_rx_mbox
= tg3_write32
;
14026 /* Various workaround register access methods */
14027 if (tg3_flag(tp
, PCIX_TARGET_HWBUG
))
14028 tp
->write32
= tg3_write_indirect_reg32
;
14029 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
||
14030 (tg3_flag(tp
, PCI_EXPRESS
) &&
14031 tp
->pci_chip_rev_id
== CHIPREV_ID_5750_A0
)) {
14033 * Back to back register writes can cause problems on these
14034 * chips, the workaround is to read back all reg writes
14035 * except those to mailbox regs.
14037 * See tg3_write_indirect_reg32().
14039 tp
->write32
= tg3_write_flush_reg32
;
14042 if (tg3_flag(tp
, TXD_MBOX_HWBUG
) || tg3_flag(tp
, MBOX_WRITE_REORDER
)) {
14043 tp
->write32_tx_mbox
= tg3_write32_tx_mbox
;
14044 if (tg3_flag(tp
, MBOX_WRITE_REORDER
))
14045 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
14048 if (tg3_flag(tp
, ICH_WORKAROUND
)) {
14049 tp
->read32
= tg3_read_indirect_reg32
;
14050 tp
->write32
= tg3_write_indirect_reg32
;
14051 tp
->read32_mbox
= tg3_read_indirect_mbox
;
14052 tp
->write32_mbox
= tg3_write_indirect_mbox
;
14053 tp
->write32_tx_mbox
= tg3_write_indirect_mbox
;
14054 tp
->write32_rx_mbox
= tg3_write_indirect_mbox
;
14059 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
14060 pci_cmd
&= ~PCI_COMMAND_MEMORY
;
14061 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
14063 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
14064 tp
->read32_mbox
= tg3_read32_mbox_5906
;
14065 tp
->write32_mbox
= tg3_write32_mbox_5906
;
14066 tp
->write32_tx_mbox
= tg3_write32_mbox_5906
;
14067 tp
->write32_rx_mbox
= tg3_write32_mbox_5906
;
14070 if (tp
->write32
== tg3_write_indirect_reg32
||
14071 (tg3_flag(tp
, PCIX_MODE
) &&
14072 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
14073 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
)))
14074 tg3_flag_set(tp
, SRAM_USE_CONFIG
);
14076 /* The memory arbiter has to be enabled in order for SRAM accesses
14077 * to succeed. Normally on powerup the tg3 chip firmware will make
14078 * sure it is enabled, but other entities such as system netboot
14079 * code might disable it.
14081 val
= tr32(MEMARB_MODE
);
14082 tw32(MEMARB_MODE
, val
| MEMARB_MODE_ENABLE
);
14084 if (tg3_flag(tp
, PCIX_MODE
)) {
14085 pci_read_config_dword(tp
->pdev
,
14086 tp
->pcix_cap
+ PCI_X_STATUS
, &val
);
14087 tp
->pci_fn
= val
& 0x7;
14089 tp
->pci_fn
= PCI_FUNC(tp
->pdev
->devfn
) & 3;
14092 /* Get eeprom hw config before calling tg3_set_power_state().
14093 * In particular, the TG3_FLAG_IS_NIC flag must be
14094 * determined before calling tg3_set_power_state() so that
14095 * we know whether or not to switch out of Vaux power.
14096 * When the flag is set, it means that GPIO1 is used for eeprom
14097 * write protect and also implies that it is a LOM where GPIOs
14098 * are not used to switch power.
14100 tg3_get_eeprom_hw_cfg(tp
);
14102 if (tg3_flag(tp
, ENABLE_APE
)) {
14103 /* Allow reads and writes to the
14104 * APE register and memory space.
14106 pci_state_reg
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
14107 PCISTATE_ALLOW_APE_SHMEM_WR
|
14108 PCISTATE_ALLOW_APE_PSPACE_WR
;
14109 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
14112 tg3_ape_lock_init(tp
);
14115 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
14116 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
||
14117 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
14118 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
||
14119 tg3_flag(tp
, 57765_PLUS
))
14120 tg3_flag_set(tp
, CPMU_PRESENT
);
14122 /* Set up tp->grc_local_ctrl before calling
14123 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
14124 * will bring 5700's external PHY out of reset.
14125 * It is also used as eeprom write protect on LOMs.
14127 tp
->grc_local_ctrl
= GRC_LCLCTRL_INT_ON_ATTN
| GRC_LCLCTRL_AUTO_SEEPROM
;
14128 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
14129 tg3_flag(tp
, EEPROM_WRITE_PROT
))
14130 tp
->grc_local_ctrl
|= (GRC_LCLCTRL_GPIO_OE1
|
14131 GRC_LCLCTRL_GPIO_OUTPUT1
);
14132 /* Unused GPIO3 must be driven as output on 5752 because there
14133 * are no pull-up resistors on unused GPIO pins.
14135 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
)
14136 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE3
;
14138 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
14139 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
||
14140 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
14141 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_UART_SEL
;
14143 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
14144 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
) {
14145 /* Turn off the debug UART. */
14146 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_UART_SEL
;
14147 if (tg3_flag(tp
, IS_NIC
))
14148 /* Keep VMain power. */
14149 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE0
|
14150 GRC_LCLCTRL_GPIO_OUTPUT0
;
14153 /* Switch out of Vaux if it is a NIC */
14154 tg3_pwrsrc_switch_to_vmain(tp
);
14156 /* Derive initial jumbo mode from MTU assigned in
14157 * ether_setup() via the alloc_etherdev() call
14159 if (tp
->dev
->mtu
> ETH_DATA_LEN
&& !tg3_flag(tp
, 5780_CLASS
))
14160 tg3_flag_set(tp
, JUMBO_RING_ENABLE
);
14162 /* Determine WakeOnLan speed to use. */
14163 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
14164 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
||
14165 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B0
||
14166 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B2
) {
14167 tg3_flag_clear(tp
, WOL_SPEED_100MB
);
14169 tg3_flag_set(tp
, WOL_SPEED_100MB
);
14172 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
14173 tp
->phy_flags
|= TG3_PHYFLG_IS_FET
;
14175 /* A few boards don't want Ethernet@WireSpeed phy feature */
14176 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
14177 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
14178 (tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A0
) &&
14179 (tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A1
)) ||
14180 (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) ||
14181 (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
14182 tp
->phy_flags
|= TG3_PHYFLG_NO_ETH_WIRE_SPEED
;
14184 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5703_AX
||
14185 GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5704_AX
)
14186 tp
->phy_flags
|= TG3_PHYFLG_ADC_BUG
;
14187 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5704_A0
)
14188 tp
->phy_flags
|= TG3_PHYFLG_5704_A0_BUG
;
14190 if (tg3_flag(tp
, 5705_PLUS
) &&
14191 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
14192 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5785
&&
14193 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_57780
&&
14194 !tg3_flag(tp
, 57765_PLUS
)) {
14195 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
14196 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
||
14197 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
14198 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
) {
14199 if (tp
->pdev
->device
!= PCI_DEVICE_ID_TIGON3_5756
&&
14200 tp
->pdev
->device
!= PCI_DEVICE_ID_TIGON3_5722
)
14201 tp
->phy_flags
|= TG3_PHYFLG_JITTER_BUG
;
14202 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5755M
)
14203 tp
->phy_flags
|= TG3_PHYFLG_ADJUST_TRIM
;
14205 tp
->phy_flags
|= TG3_PHYFLG_BER_BUG
;
14208 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
&&
14209 GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5784_AX
) {
14210 tp
->phy_otp
= tg3_read_otp_phycfg(tp
);
14211 if (tp
->phy_otp
== 0)
14212 tp
->phy_otp
= TG3_OTP_DEFAULT
;
14215 if (tg3_flag(tp
, CPMU_PRESENT
))
14216 tp
->mi_mode
= MAC_MI_MODE_500KHZ_CONST
;
14218 tp
->mi_mode
= MAC_MI_MODE_BASE
;
14220 tp
->coalesce_mode
= 0;
14221 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5700_AX
&&
14222 GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5700_BX
)
14223 tp
->coalesce_mode
|= HOSTCC_MODE_32BYTE
;
14225 /* Set these bits to enable statistics workaround. */
14226 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
14227 tp
->pci_chip_rev_id
== CHIPREV_ID_5719_A0
||
14228 tp
->pci_chip_rev_id
== CHIPREV_ID_5720_A0
) {
14229 tp
->coalesce_mode
|= HOSTCC_MODE_ATTN
;
14230 tp
->grc_mode
|= GRC_MODE_IRQ_ON_FLOW_ATTN
;
14233 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
14234 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
)
14235 tg3_flag_set(tp
, USE_PHYLIB
);
14237 err
= tg3_mdio_init(tp
);
14241 /* Initialize data/descriptor byte/word swapping. */
14242 val
= tr32(GRC_MODE
);
14243 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
14244 val
&= (GRC_MODE_BYTE_SWAP_B2HRX_DATA
|
14245 GRC_MODE_WORD_SWAP_B2HRX_DATA
|
14246 GRC_MODE_B2HRX_ENABLE
|
14247 GRC_MODE_HTX2B_ENABLE
|
14248 GRC_MODE_HOST_STACKUP
);
14250 val
&= GRC_MODE_HOST_STACKUP
;
14252 tw32(GRC_MODE
, val
| tp
->grc_mode
);
14254 tg3_switch_clocks(tp
);
14256 /* Clear this out for sanity. */
14257 tw32(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
14259 pci_read_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
14261 if ((pci_state_reg
& PCISTATE_CONV_PCI_MODE
) == 0 &&
14262 !tg3_flag(tp
, PCIX_TARGET_HWBUG
)) {
14263 u32 chiprevid
= GET_CHIP_REV_ID(tp
->misc_host_ctrl
);
14265 if (chiprevid
== CHIPREV_ID_5701_A0
||
14266 chiprevid
== CHIPREV_ID_5701_B0
||
14267 chiprevid
== CHIPREV_ID_5701_B2
||
14268 chiprevid
== CHIPREV_ID_5701_B5
) {
14269 void __iomem
*sram_base
;
14271 /* Write some dummy words into the SRAM status block
14272 * area, see if it reads back correctly. If the return
14273 * value is bad, force enable the PCIX workaround.
14275 sram_base
= tp
->regs
+ NIC_SRAM_WIN_BASE
+ NIC_SRAM_STATS_BLK
;
14277 writel(0x00000000, sram_base
);
14278 writel(0x00000000, sram_base
+ 4);
14279 writel(0xffffffff, sram_base
+ 4);
14280 if (readl(sram_base
) != 0x00000000)
14281 tg3_flag_set(tp
, PCIX_TARGET_HWBUG
);
14286 tg3_nvram_init(tp
);
14288 grc_misc_cfg
= tr32(GRC_MISC_CFG
);
14289 grc_misc_cfg
&= GRC_MISC_CFG_BOARD_ID_MASK
;
14291 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
14292 (grc_misc_cfg
== GRC_MISC_CFG_BOARD_ID_5788
||
14293 grc_misc_cfg
== GRC_MISC_CFG_BOARD_ID_5788M
))
14294 tg3_flag_set(tp
, IS_5788
);
14296 if (!tg3_flag(tp
, IS_5788
) &&
14297 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
)
14298 tg3_flag_set(tp
, TAGGED_STATUS
);
14299 if (tg3_flag(tp
, TAGGED_STATUS
)) {
14300 tp
->coalesce_mode
|= (HOSTCC_MODE_CLRTICK_RXBD
|
14301 HOSTCC_MODE_CLRTICK_TXBD
);
14303 tp
->misc_host_ctrl
|= MISC_HOST_CTRL_TAGGED_STATUS
;
14304 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
14305 tp
->misc_host_ctrl
);
14308 /* Preserve the APE MAC_MODE bits */
14309 if (tg3_flag(tp
, ENABLE_APE
))
14310 tp
->mac_mode
= MAC_MODE_APE_TX_EN
| MAC_MODE_APE_RX_EN
;
14312 tp
->mac_mode
= TG3_DEF_MAC_MODE
;
14314 /* these are limited to 10/100 only */
14315 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
&&
14316 (grc_misc_cfg
== 0x8000 || grc_misc_cfg
== 0x4000)) ||
14317 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
14318 tp
->pdev
->vendor
== PCI_VENDOR_ID_BROADCOM
&&
14319 (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5901
||
14320 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5901_2
||
14321 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5705F
)) ||
14322 (tp
->pdev
->vendor
== PCI_VENDOR_ID_BROADCOM
&&
14323 (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5751F
||
14324 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5753F
||
14325 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5787F
)) ||
14326 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57790
||
14327 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57791
||
14328 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57795
||
14329 (tp
->phy_flags
& TG3_PHYFLG_IS_FET
))
14330 tp
->phy_flags
|= TG3_PHYFLG_10_100_ONLY
;
14332 err
= tg3_phy_probe(tp
);
14334 dev_err(&tp
->pdev
->dev
, "phy probe failed, err %d\n", err
);
14335 /* ... but do not return immediately ... */
14340 tg3_read_fw_ver(tp
);
14342 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
14343 tp
->phy_flags
&= ~TG3_PHYFLG_USE_MI_INTERRUPT
;
14345 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
)
14346 tp
->phy_flags
|= TG3_PHYFLG_USE_MI_INTERRUPT
;
14348 tp
->phy_flags
&= ~TG3_PHYFLG_USE_MI_INTERRUPT
;
14351 /* 5700 {AX,BX} chips have a broken status block link
14352 * change bit implementation, so we must use the
14353 * status register in those cases.
14355 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
)
14356 tg3_flag_set(tp
, USE_LINKCHG_REG
);
14358 tg3_flag_clear(tp
, USE_LINKCHG_REG
);
14360 /* The led_ctrl is set during tg3_phy_probe, here we might
14361 * have to force the link status polling mechanism based
14362 * upon subsystem IDs.
14364 if (tp
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_DELL
&&
14365 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
&&
14366 !(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
14367 tp
->phy_flags
|= TG3_PHYFLG_USE_MI_INTERRUPT
;
14368 tg3_flag_set(tp
, USE_LINKCHG_REG
);
14371 /* For all SERDES we poll the MAC status register. */
14372 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
14373 tg3_flag_set(tp
, POLL_SERDES
);
14375 tg3_flag_clear(tp
, POLL_SERDES
);
14377 tp
->rx_offset
= NET_IP_ALIGN
;
14378 tp
->rx_copy_thresh
= TG3_RX_COPY_THRESHOLD
;
14379 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
&&
14380 tg3_flag(tp
, PCIX_MODE
)) {
14382 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14383 tp
->rx_copy_thresh
= ~(u16
)0;
14387 tp
->rx_std_ring_mask
= TG3_RX_STD_RING_SIZE(tp
) - 1;
14388 tp
->rx_jmb_ring_mask
= TG3_RX_JMB_RING_SIZE(tp
) - 1;
14389 tp
->rx_ret_ring_mask
= tg3_rx_ret_ring_size(tp
) - 1;
14391 tp
->rx_std_max_post
= tp
->rx_std_ring_mask
+ 1;
14393 /* Increment the rx prod index on the rx std ring by at most
14394 * 8 for these chips to workaround hw errata.
14396 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
||
14397 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
||
14398 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
)
14399 tp
->rx_std_max_post
= 8;
14401 if (tg3_flag(tp
, ASPM_WORKAROUND
))
14402 tp
->pwrmgmt_thresh
= tr32(PCIE_PWR_MGMT_THRESH
) &
14403 PCIE_PWR_MGMT_L1_THRESH_MSK
;
14408 #ifdef CONFIG_SPARC
14409 static int __devinit
tg3_get_macaddr_sparc(struct tg3
*tp
)
14411 struct net_device
*dev
= tp
->dev
;
14412 struct pci_dev
*pdev
= tp
->pdev
;
14413 struct device_node
*dp
= pci_device_to_OF_node(pdev
);
14414 const unsigned char *addr
;
14417 addr
= of_get_property(dp
, "local-mac-address", &len
);
14418 if (addr
&& len
== 6) {
14419 memcpy(dev
->dev_addr
, addr
, 6);
14420 memcpy(dev
->perm_addr
, dev
->dev_addr
, 6);
14426 static int __devinit
tg3_get_default_macaddr_sparc(struct tg3
*tp
)
14428 struct net_device
*dev
= tp
->dev
;
14430 memcpy(dev
->dev_addr
, idprom
->id_ethaddr
, 6);
14431 memcpy(dev
->perm_addr
, idprom
->id_ethaddr
, 6);
14436 static int __devinit
tg3_get_device_address(struct tg3
*tp
)
14438 struct net_device
*dev
= tp
->dev
;
14439 u32 hi
, lo
, mac_offset
;
14442 #ifdef CONFIG_SPARC
14443 if (!tg3_get_macaddr_sparc(tp
))
14448 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
14449 tg3_flag(tp
, 5780_CLASS
)) {
14450 if (tr32(TG3PCI_DUAL_MAC_CTRL
) & DUAL_MAC_CTRL_ID
)
14452 if (tg3_nvram_lock(tp
))
14453 tw32_f(NVRAM_CMD
, NVRAM_CMD_RESET
);
14455 tg3_nvram_unlock(tp
);
14456 } else if (tg3_flag(tp
, 5717_PLUS
)) {
14457 if (tp
->pci_fn
& 1)
14459 if (tp
->pci_fn
> 1)
14460 mac_offset
+= 0x18c;
14461 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
14464 /* First try to get it from MAC address mailbox. */
14465 tg3_read_mem(tp
, NIC_SRAM_MAC_ADDR_HIGH_MBOX
, &hi
);
14466 if ((hi
>> 16) == 0x484b) {
14467 dev
->dev_addr
[0] = (hi
>> 8) & 0xff;
14468 dev
->dev_addr
[1] = (hi
>> 0) & 0xff;
14470 tg3_read_mem(tp
, NIC_SRAM_MAC_ADDR_LOW_MBOX
, &lo
);
14471 dev
->dev_addr
[2] = (lo
>> 24) & 0xff;
14472 dev
->dev_addr
[3] = (lo
>> 16) & 0xff;
14473 dev
->dev_addr
[4] = (lo
>> 8) & 0xff;
14474 dev
->dev_addr
[5] = (lo
>> 0) & 0xff;
14476 /* Some old bootcode may report a 0 MAC address in SRAM */
14477 addr_ok
= is_valid_ether_addr(&dev
->dev_addr
[0]);
14480 /* Next, try NVRAM. */
14481 if (!tg3_flag(tp
, NO_NVRAM
) &&
14482 !tg3_nvram_read_be32(tp
, mac_offset
+ 0, &hi
) &&
14483 !tg3_nvram_read_be32(tp
, mac_offset
+ 4, &lo
)) {
14484 memcpy(&dev
->dev_addr
[0], ((char *)&hi
) + 2, 2);
14485 memcpy(&dev
->dev_addr
[2], (char *)&lo
, sizeof(lo
));
14487 /* Finally just fetch it out of the MAC control regs. */
14489 hi
= tr32(MAC_ADDR_0_HIGH
);
14490 lo
= tr32(MAC_ADDR_0_LOW
);
14492 dev
->dev_addr
[5] = lo
& 0xff;
14493 dev
->dev_addr
[4] = (lo
>> 8) & 0xff;
14494 dev
->dev_addr
[3] = (lo
>> 16) & 0xff;
14495 dev
->dev_addr
[2] = (lo
>> 24) & 0xff;
14496 dev
->dev_addr
[1] = hi
& 0xff;
14497 dev
->dev_addr
[0] = (hi
>> 8) & 0xff;
14501 if (!is_valid_ether_addr(&dev
->dev_addr
[0])) {
14502 #ifdef CONFIG_SPARC
14503 if (!tg3_get_default_macaddr_sparc(tp
))
14508 memcpy(dev
->perm_addr
, dev
->dev_addr
, dev
->addr_len
);
14512 #define BOUNDARY_SINGLE_CACHELINE 1
14513 #define BOUNDARY_MULTI_CACHELINE 2
14515 static u32 __devinit
tg3_calc_dma_bndry(struct tg3
*tp
, u32 val
)
14517 int cacheline_size
;
14521 pci_read_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
, &byte
);
14523 cacheline_size
= 1024;
14525 cacheline_size
= (int) byte
* 4;
14527 /* On 5703 and later chips, the boundary bits have no
14530 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
14531 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
&&
14532 !tg3_flag(tp
, PCI_EXPRESS
))
14535 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14536 goal
= BOUNDARY_MULTI_CACHELINE
;
14538 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14539 goal
= BOUNDARY_SINGLE_CACHELINE
;
14545 if (tg3_flag(tp
, 57765_PLUS
)) {
14546 val
= goal
? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT
;
14553 /* PCI controllers on most RISC systems tend to disconnect
14554 * when a device tries to burst across a cache-line boundary.
14555 * Therefore, letting tg3 do so just wastes PCI bandwidth.
14557 * Unfortunately, for PCI-E there are only limited
14558 * write-side controls for this, and thus for reads
14559 * we will still get the disconnects. We'll also waste
14560 * these PCI cycles for both read and write for chips
14561 * other than 5700 and 5701 which do not implement the
14564 if (tg3_flag(tp
, PCIX_MODE
) && !tg3_flag(tp
, PCI_EXPRESS
)) {
14565 switch (cacheline_size
) {
14570 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
14571 val
|= (DMA_RWCTRL_READ_BNDRY_128_PCIX
|
14572 DMA_RWCTRL_WRITE_BNDRY_128_PCIX
);
14574 val
|= (DMA_RWCTRL_READ_BNDRY_384_PCIX
|
14575 DMA_RWCTRL_WRITE_BNDRY_384_PCIX
);
14580 val
|= (DMA_RWCTRL_READ_BNDRY_256_PCIX
|
14581 DMA_RWCTRL_WRITE_BNDRY_256_PCIX
);
14585 val
|= (DMA_RWCTRL_READ_BNDRY_384_PCIX
|
14586 DMA_RWCTRL_WRITE_BNDRY_384_PCIX
);
14589 } else if (tg3_flag(tp
, PCI_EXPRESS
)) {
14590 switch (cacheline_size
) {
14594 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
14595 val
&= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE
;
14596 val
|= DMA_RWCTRL_WRITE_BNDRY_64_PCIE
;
14602 val
&= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE
;
14603 val
|= DMA_RWCTRL_WRITE_BNDRY_128_PCIE
;
14607 switch (cacheline_size
) {
14609 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
14610 val
|= (DMA_RWCTRL_READ_BNDRY_16
|
14611 DMA_RWCTRL_WRITE_BNDRY_16
);
14616 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
14617 val
|= (DMA_RWCTRL_READ_BNDRY_32
|
14618 DMA_RWCTRL_WRITE_BNDRY_32
);
14623 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
14624 val
|= (DMA_RWCTRL_READ_BNDRY_64
|
14625 DMA_RWCTRL_WRITE_BNDRY_64
);
14630 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
14631 val
|= (DMA_RWCTRL_READ_BNDRY_128
|
14632 DMA_RWCTRL_WRITE_BNDRY_128
);
14637 val
|= (DMA_RWCTRL_READ_BNDRY_256
|
14638 DMA_RWCTRL_WRITE_BNDRY_256
);
14641 val
|= (DMA_RWCTRL_READ_BNDRY_512
|
14642 DMA_RWCTRL_WRITE_BNDRY_512
);
14646 val
|= (DMA_RWCTRL_READ_BNDRY_1024
|
14647 DMA_RWCTRL_WRITE_BNDRY_1024
);
14656 static int __devinit
tg3_do_test_dma(struct tg3
*tp
, u32
*buf
, dma_addr_t buf_dma
, int size
, int to_device
)
14658 struct tg3_internal_buffer_desc test_desc
;
14659 u32 sram_dma_descs
;
14662 sram_dma_descs
= NIC_SRAM_DMA_DESC_POOL_BASE
;
14664 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ
, 0);
14665 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ
, 0);
14666 tw32(RDMAC_STATUS
, 0);
14667 tw32(WDMAC_STATUS
, 0);
14669 tw32(BUFMGR_MODE
, 0);
14670 tw32(FTQ_RESET
, 0);
14672 test_desc
.addr_hi
= ((u64
) buf_dma
) >> 32;
14673 test_desc
.addr_lo
= buf_dma
& 0xffffffff;
14674 test_desc
.nic_mbuf
= 0x00002100;
14675 test_desc
.len
= size
;
14678 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14679 * the *second* time the tg3 driver was getting loaded after an
14682 * Broadcom tells me:
14683 * ...the DMA engine is connected to the GRC block and a DMA
14684 * reset may affect the GRC block in some unpredictable way...
14685 * The behavior of resets to individual blocks has not been tested.
14687 * Broadcom noted the GRC reset will also reset all sub-components.
14690 test_desc
.cqid_sqid
= (13 << 8) | 2;
14692 tw32_f(RDMAC_MODE
, RDMAC_MODE_ENABLE
);
14695 test_desc
.cqid_sqid
= (16 << 8) | 7;
14697 tw32_f(WDMAC_MODE
, WDMAC_MODE_ENABLE
);
14700 test_desc
.flags
= 0x00000005;
14702 for (i
= 0; i
< (sizeof(test_desc
) / sizeof(u32
)); i
++) {
14705 val
= *(((u32
*)&test_desc
) + i
);
14706 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
,
14707 sram_dma_descs
+ (i
* sizeof(u32
)));
14708 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
14710 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
14713 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ
, sram_dma_descs
);
14715 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ
, sram_dma_descs
);
14718 for (i
= 0; i
< 40; i
++) {
14722 val
= tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ
);
14724 val
= tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ
);
14725 if ((val
& 0xffff) == sram_dma_descs
) {
14736 #define TEST_BUFFER_SIZE 0x2000
14738 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets
) = {
14739 { PCI_DEVICE(PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_UNI_N_PCI15
) },
14743 static int __devinit
tg3_test_dma(struct tg3
*tp
)
14745 dma_addr_t buf_dma
;
14746 u32
*buf
, saved_dma_rwctrl
;
14749 buf
= dma_alloc_coherent(&tp
->pdev
->dev
, TEST_BUFFER_SIZE
,
14750 &buf_dma
, GFP_KERNEL
);
14756 tp
->dma_rwctrl
= ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT
) |
14757 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT
));
14759 tp
->dma_rwctrl
= tg3_calc_dma_bndry(tp
, tp
->dma_rwctrl
);
14761 if (tg3_flag(tp
, 57765_PLUS
))
14764 if (tg3_flag(tp
, PCI_EXPRESS
)) {
14765 /* DMA read watermark not used on PCIE */
14766 tp
->dma_rwctrl
|= 0x00180000;
14767 } else if (!tg3_flag(tp
, PCIX_MODE
)) {
14768 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
||
14769 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
)
14770 tp
->dma_rwctrl
|= 0x003f0000;
14772 tp
->dma_rwctrl
|= 0x003f000f;
14774 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
14775 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
14776 u32 ccval
= (tr32(TG3PCI_CLOCK_CTRL
) & 0x1f);
14777 u32 read_water
= 0x7;
14779 /* If the 5704 is behind the EPB bridge, we can
14780 * do the less restrictive ONE_DMA workaround for
14781 * better performance.
14783 if (tg3_flag(tp
, 40BIT_DMA_BUG
) &&
14784 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
)
14785 tp
->dma_rwctrl
|= 0x8000;
14786 else if (ccval
== 0x6 || ccval
== 0x7)
14787 tp
->dma_rwctrl
|= DMA_RWCTRL_ONE_DMA
;
14789 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
)
14791 /* Set bit 23 to enable PCIX hw bug fix */
14793 (read_water
<< DMA_RWCTRL_READ_WATER_SHIFT
) |
14794 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT
) |
14796 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5780
) {
14797 /* 5780 always in PCIX mode */
14798 tp
->dma_rwctrl
|= 0x00144000;
14799 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
) {
14800 /* 5714 always in PCIX mode */
14801 tp
->dma_rwctrl
|= 0x00148000;
14803 tp
->dma_rwctrl
|= 0x001b000f;
14807 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
14808 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
)
14809 tp
->dma_rwctrl
&= 0xfffffff0;
14811 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
14812 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
14813 /* Remove this if it causes problems for some boards. */
14814 tp
->dma_rwctrl
|= DMA_RWCTRL_USE_MEM_READ_MULT
;
14816 /* On 5700/5701 chips, we need to set this bit.
14817 * Otherwise the chip will issue cacheline transactions
14818 * to streamable DMA memory with not all the byte
14819 * enables turned on. This is an error on several
14820 * RISC PCI controllers, in particular sparc64.
14822 * On 5703/5704 chips, this bit has been reassigned
14823 * a different meaning. In particular, it is used
14824 * on those chips to enable a PCI-X workaround.
14826 tp
->dma_rwctrl
|= DMA_RWCTRL_ASSERT_ALL_BE
;
14829 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
14832 /* Unneeded, already done by tg3_get_invariants. */
14833 tg3_switch_clocks(tp
);
14836 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
14837 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
)
14840 /* It is best to perform DMA test with maximum write burst size
14841 * to expose the 5700/5701 write DMA bug.
14843 saved_dma_rwctrl
= tp
->dma_rwctrl
;
14844 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
14845 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
14850 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++)
14853 /* Send the buffer to the chip. */
14854 ret
= tg3_do_test_dma(tp
, buf
, buf_dma
, TEST_BUFFER_SIZE
, 1);
14856 dev_err(&tp
->pdev
->dev
,
14857 "%s: Buffer write failed. err = %d\n",
14863 /* validate data reached card RAM correctly. */
14864 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++) {
14866 tg3_read_mem(tp
, 0x2100 + (i
*4), &val
);
14867 if (le32_to_cpu(val
) != p
[i
]) {
14868 dev_err(&tp
->pdev
->dev
,
14869 "%s: Buffer corrupted on device! "
14870 "(%d != %d)\n", __func__
, val
, i
);
14871 /* ret = -ENODEV here? */
14876 /* Now read it back. */
14877 ret
= tg3_do_test_dma(tp
, buf
, buf_dma
, TEST_BUFFER_SIZE
, 0);
14879 dev_err(&tp
->pdev
->dev
, "%s: Buffer read failed. "
14880 "err = %d\n", __func__
, ret
);
14885 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++) {
14889 if ((tp
->dma_rwctrl
& DMA_RWCTRL_WRITE_BNDRY_MASK
) !=
14890 DMA_RWCTRL_WRITE_BNDRY_16
) {
14891 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
14892 tp
->dma_rwctrl
|= DMA_RWCTRL_WRITE_BNDRY_16
;
14893 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
14896 dev_err(&tp
->pdev
->dev
,
14897 "%s: Buffer corrupted on read back! "
14898 "(%d != %d)\n", __func__
, p
[i
], i
);
14904 if (i
== (TEST_BUFFER_SIZE
/ sizeof(u32
))) {
14910 if ((tp
->dma_rwctrl
& DMA_RWCTRL_WRITE_BNDRY_MASK
) !=
14911 DMA_RWCTRL_WRITE_BNDRY_16
) {
14912 /* DMA test passed without adjusting DMA boundary,
14913 * now look for chipsets that are known to expose the
14914 * DMA bug without failing the test.
14916 if (pci_dev_present(tg3_dma_wait_state_chipsets
)) {
14917 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
14918 tp
->dma_rwctrl
|= DMA_RWCTRL_WRITE_BNDRY_16
;
14920 /* Safe to use the calculated DMA boundary. */
14921 tp
->dma_rwctrl
= saved_dma_rwctrl
;
14924 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
14928 dma_free_coherent(&tp
->pdev
->dev
, TEST_BUFFER_SIZE
, buf
, buf_dma
);
14933 static void __devinit
tg3_init_bufmgr_config(struct tg3
*tp
)
14935 if (tg3_flag(tp
, 57765_PLUS
)) {
14936 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
14937 DEFAULT_MB_RDMA_LOW_WATER_5705
;
14938 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
14939 DEFAULT_MB_MACRX_LOW_WATER_57765
;
14940 tp
->bufmgr_config
.mbuf_high_water
=
14941 DEFAULT_MB_HIGH_WATER_57765
;
14943 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
14944 DEFAULT_MB_RDMA_LOW_WATER_5705
;
14945 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
14946 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765
;
14947 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
14948 DEFAULT_MB_HIGH_WATER_JUMBO_57765
;
14949 } else if (tg3_flag(tp
, 5705_PLUS
)) {
14950 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
14951 DEFAULT_MB_RDMA_LOW_WATER_5705
;
14952 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
14953 DEFAULT_MB_MACRX_LOW_WATER_5705
;
14954 tp
->bufmgr_config
.mbuf_high_water
=
14955 DEFAULT_MB_HIGH_WATER_5705
;
14956 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
14957 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
14958 DEFAULT_MB_MACRX_LOW_WATER_5906
;
14959 tp
->bufmgr_config
.mbuf_high_water
=
14960 DEFAULT_MB_HIGH_WATER_5906
;
14963 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
14964 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780
;
14965 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
14966 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780
;
14967 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
14968 DEFAULT_MB_HIGH_WATER_JUMBO_5780
;
14970 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
14971 DEFAULT_MB_RDMA_LOW_WATER
;
14972 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
14973 DEFAULT_MB_MACRX_LOW_WATER
;
14974 tp
->bufmgr_config
.mbuf_high_water
=
14975 DEFAULT_MB_HIGH_WATER
;
14977 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
14978 DEFAULT_MB_RDMA_LOW_WATER_JUMBO
;
14979 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
14980 DEFAULT_MB_MACRX_LOW_WATER_JUMBO
;
14981 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
14982 DEFAULT_MB_HIGH_WATER_JUMBO
;
14985 tp
->bufmgr_config
.dma_low_water
= DEFAULT_DMA_LOW_WATER
;
14986 tp
->bufmgr_config
.dma_high_water
= DEFAULT_DMA_HIGH_WATER
;
14989 static char * __devinit
tg3_phy_string(struct tg3
*tp
)
14991 switch (tp
->phy_id
& TG3_PHY_ID_MASK
) {
14992 case TG3_PHY_ID_BCM5400
: return "5400";
14993 case TG3_PHY_ID_BCM5401
: return "5401";
14994 case TG3_PHY_ID_BCM5411
: return "5411";
14995 case TG3_PHY_ID_BCM5701
: return "5701";
14996 case TG3_PHY_ID_BCM5703
: return "5703";
14997 case TG3_PHY_ID_BCM5704
: return "5704";
14998 case TG3_PHY_ID_BCM5705
: return "5705";
14999 case TG3_PHY_ID_BCM5750
: return "5750";
15000 case TG3_PHY_ID_BCM5752
: return "5752";
15001 case TG3_PHY_ID_BCM5714
: return "5714";
15002 case TG3_PHY_ID_BCM5780
: return "5780";
15003 case TG3_PHY_ID_BCM5755
: return "5755";
15004 case TG3_PHY_ID_BCM5787
: return "5787";
15005 case TG3_PHY_ID_BCM5784
: return "5784";
15006 case TG3_PHY_ID_BCM5756
: return "5722/5756";
15007 case TG3_PHY_ID_BCM5906
: return "5906";
15008 case TG3_PHY_ID_BCM5761
: return "5761";
15009 case TG3_PHY_ID_BCM5718C
: return "5718C";
15010 case TG3_PHY_ID_BCM5718S
: return "5718S";
15011 case TG3_PHY_ID_BCM57765
: return "57765";
15012 case TG3_PHY_ID_BCM5719C
: return "5719C";
15013 case TG3_PHY_ID_BCM5720C
: return "5720C";
15014 case TG3_PHY_ID_BCM8002
: return "8002/serdes";
15015 case 0: return "serdes";
15016 default: return "unknown";
15020 static char * __devinit
tg3_bus_string(struct tg3
*tp
, char *str
)
15022 if (tg3_flag(tp
, PCI_EXPRESS
)) {
15023 strcpy(str
, "PCI Express");
15025 } else if (tg3_flag(tp
, PCIX_MODE
)) {
15026 u32 clock_ctrl
= tr32(TG3PCI_CLOCK_CTRL
) & 0x1f;
15028 strcpy(str
, "PCIX:");
15030 if ((clock_ctrl
== 7) ||
15031 ((tr32(GRC_MISC_CFG
) & GRC_MISC_CFG_BOARD_ID_MASK
) ==
15032 GRC_MISC_CFG_BOARD_ID_5704CIOBE
))
15033 strcat(str
, "133MHz");
15034 else if (clock_ctrl
== 0)
15035 strcat(str
, "33MHz");
15036 else if (clock_ctrl
== 2)
15037 strcat(str
, "50MHz");
15038 else if (clock_ctrl
== 4)
15039 strcat(str
, "66MHz");
15040 else if (clock_ctrl
== 6)
15041 strcat(str
, "100MHz");
15043 strcpy(str
, "PCI:");
15044 if (tg3_flag(tp
, PCI_HIGH_SPEED
))
15045 strcat(str
, "66MHz");
15047 strcat(str
, "33MHz");
15049 if (tg3_flag(tp
, PCI_32BIT
))
15050 strcat(str
, ":32-bit");
15052 strcat(str
, ":64-bit");
15056 static struct pci_dev
* __devinit
tg3_find_peer(struct tg3
*tp
)
15058 struct pci_dev
*peer
;
15059 unsigned int func
, devnr
= tp
->pdev
->devfn
& ~7;
15061 for (func
= 0; func
< 8; func
++) {
15062 peer
= pci_get_slot(tp
->pdev
->bus
, devnr
| func
);
15063 if (peer
&& peer
!= tp
->pdev
)
15067 /* 5704 can be configured in single-port mode, set peer to
15068 * tp->pdev in that case.
15076 * We don't need to keep the refcount elevated; there's no way
15077 * to remove one half of this device without removing the other
15084 static void __devinit
tg3_init_coal(struct tg3
*tp
)
15086 struct ethtool_coalesce
*ec
= &tp
->coal
;
15088 memset(ec
, 0, sizeof(*ec
));
15089 ec
->cmd
= ETHTOOL_GCOALESCE
;
15090 ec
->rx_coalesce_usecs
= LOW_RXCOL_TICKS
;
15091 ec
->tx_coalesce_usecs
= LOW_TXCOL_TICKS
;
15092 ec
->rx_max_coalesced_frames
= LOW_RXMAX_FRAMES
;
15093 ec
->tx_max_coalesced_frames
= LOW_TXMAX_FRAMES
;
15094 ec
->rx_coalesce_usecs_irq
= DEFAULT_RXCOAL_TICK_INT
;
15095 ec
->tx_coalesce_usecs_irq
= DEFAULT_TXCOAL_TICK_INT
;
15096 ec
->rx_max_coalesced_frames_irq
= DEFAULT_RXCOAL_MAXF_INT
;
15097 ec
->tx_max_coalesced_frames_irq
= DEFAULT_TXCOAL_MAXF_INT
;
15098 ec
->stats_block_coalesce_usecs
= DEFAULT_STAT_COAL_TICKS
;
15100 if (tp
->coalesce_mode
& (HOSTCC_MODE_CLRTICK_RXBD
|
15101 HOSTCC_MODE_CLRTICK_TXBD
)) {
15102 ec
->rx_coalesce_usecs
= LOW_RXCOL_TICKS_CLRTCKS
;
15103 ec
->rx_coalesce_usecs_irq
= DEFAULT_RXCOAL_TICK_INT_CLRTCKS
;
15104 ec
->tx_coalesce_usecs
= LOW_TXCOL_TICKS_CLRTCKS
;
15105 ec
->tx_coalesce_usecs_irq
= DEFAULT_TXCOAL_TICK_INT_CLRTCKS
;
15108 if (tg3_flag(tp
, 5705_PLUS
)) {
15109 ec
->rx_coalesce_usecs_irq
= 0;
15110 ec
->tx_coalesce_usecs_irq
= 0;
15111 ec
->stats_block_coalesce_usecs
= 0;
15115 static const struct net_device_ops tg3_netdev_ops
= {
15116 .ndo_open
= tg3_open
,
15117 .ndo_stop
= tg3_close
,
15118 .ndo_start_xmit
= tg3_start_xmit
,
15119 .ndo_get_stats64
= tg3_get_stats64
,
15120 .ndo_validate_addr
= eth_validate_addr
,
15121 .ndo_set_multicast_list
= tg3_set_rx_mode
,
15122 .ndo_set_mac_address
= tg3_set_mac_addr
,
15123 .ndo_do_ioctl
= tg3_ioctl
,
15124 .ndo_tx_timeout
= tg3_tx_timeout
,
15125 .ndo_change_mtu
= tg3_change_mtu
,
15126 .ndo_fix_features
= tg3_fix_features
,
15127 .ndo_set_features
= tg3_set_features
,
15128 #ifdef CONFIG_NET_POLL_CONTROLLER
15129 .ndo_poll_controller
= tg3_poll_controller
,
15133 static int __devinit
tg3_init_one(struct pci_dev
*pdev
,
15134 const struct pci_device_id
*ent
)
15136 struct net_device
*dev
;
15138 int i
, err
, pm_cap
;
15139 u32 sndmbx
, rcvmbx
, intmbx
;
15141 u64 dma_mask
, persist_dma_mask
;
15144 printk_once(KERN_INFO
"%s\n", version
);
15146 err
= pci_enable_device(pdev
);
15148 dev_err(&pdev
->dev
, "Cannot enable PCI device, aborting\n");
15152 err
= pci_request_regions(pdev
, DRV_MODULE_NAME
);
15154 dev_err(&pdev
->dev
, "Cannot obtain PCI resources, aborting\n");
15155 goto err_out_disable_pdev
;
15158 pci_set_master(pdev
);
15160 /* Find power-management capability. */
15161 pm_cap
= pci_find_capability(pdev
, PCI_CAP_ID_PM
);
15163 dev_err(&pdev
->dev
,
15164 "Cannot find Power Management capability, aborting\n");
15166 goto err_out_free_res
;
15169 err
= pci_set_power_state(pdev
, PCI_D0
);
15171 dev_err(&pdev
->dev
, "Transition to D0 failed, aborting\n");
15172 goto err_out_free_res
;
15175 dev
= alloc_etherdev_mq(sizeof(*tp
), TG3_IRQ_MAX_VECS
);
15177 dev_err(&pdev
->dev
, "Etherdev alloc failed, aborting\n");
15179 goto err_out_power_down
;
15182 SET_NETDEV_DEV(dev
, &pdev
->dev
);
15184 tp
= netdev_priv(dev
);
15187 tp
->pm_cap
= pm_cap
;
15188 tp
->rx_mode
= TG3_DEF_RX_MODE
;
15189 tp
->tx_mode
= TG3_DEF_TX_MODE
;
15192 tp
->msg_enable
= tg3_debug
;
15194 tp
->msg_enable
= TG3_DEF_MSG_ENABLE
;
15196 /* The word/byte swap controls here control register access byte
15197 * swapping. DMA data byte swapping is controlled in the GRC_MODE
15200 tp
->misc_host_ctrl
=
15201 MISC_HOST_CTRL_MASK_PCI_INT
|
15202 MISC_HOST_CTRL_WORD_SWAP
|
15203 MISC_HOST_CTRL_INDIR_ACCESS
|
15204 MISC_HOST_CTRL_PCISTATE_RW
;
15206 /* The NONFRM (non-frame) byte/word swap controls take effect
15207 * on descriptor entries, anything which isn't packet data.
15209 * The StrongARM chips on the board (one for tx, one for rx)
15210 * are running in big-endian mode.
15212 tp
->grc_mode
= (GRC_MODE_WSWAP_DATA
| GRC_MODE_BSWAP_DATA
|
15213 GRC_MODE_WSWAP_NONFRM_DATA
);
15214 #ifdef __BIG_ENDIAN
15215 tp
->grc_mode
|= GRC_MODE_BSWAP_NONFRM_DATA
;
15217 spin_lock_init(&tp
->lock
);
15218 spin_lock_init(&tp
->indirect_lock
);
15219 INIT_WORK(&tp
->reset_task
, tg3_reset_task
);
15221 tp
->regs
= pci_ioremap_bar(pdev
, BAR_0
);
15223 dev_err(&pdev
->dev
, "Cannot map device registers, aborting\n");
15225 goto err_out_free_dev
;
15228 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
15229 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761E
||
15230 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
||
15231 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761SE
||
15232 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
||
15233 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
||
15234 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5719
||
15235 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5720
) {
15236 tg3_flag_set(tp
, ENABLE_APE
);
15237 tp
->aperegs
= pci_ioremap_bar(pdev
, BAR_2
);
15238 if (!tp
->aperegs
) {
15239 dev_err(&pdev
->dev
,
15240 "Cannot map APE registers, aborting\n");
15242 goto err_out_iounmap
;
15246 tp
->rx_pending
= TG3_DEF_RX_RING_PENDING
;
15247 tp
->rx_jumbo_pending
= TG3_DEF_RX_JUMBO_RING_PENDING
;
15249 dev
->ethtool_ops
= &tg3_ethtool_ops
;
15250 dev
->watchdog_timeo
= TG3_TX_TIMEOUT
;
15251 dev
->netdev_ops
= &tg3_netdev_ops
;
15252 dev
->irq
= pdev
->irq
;
15254 err
= tg3_get_invariants(tp
);
15256 dev_err(&pdev
->dev
,
15257 "Problem fetching invariants of chip, aborting\n");
15258 goto err_out_apeunmap
;
15261 /* The EPB bridge inside 5714, 5715, and 5780 and any
15262 * device behind the EPB cannot support DMA addresses > 40-bit.
15263 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15264 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15265 * do DMA address check in tg3_start_xmit().
15267 if (tg3_flag(tp
, IS_5788
))
15268 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(32);
15269 else if (tg3_flag(tp
, 40BIT_DMA_BUG
)) {
15270 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(40);
15271 #ifdef CONFIG_HIGHMEM
15272 dma_mask
= DMA_BIT_MASK(64);
15275 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(64);
15277 /* Configure DMA attributes. */
15278 if (dma_mask
> DMA_BIT_MASK(32)) {
15279 err
= pci_set_dma_mask(pdev
, dma_mask
);
15281 features
|= NETIF_F_HIGHDMA
;
15282 err
= pci_set_consistent_dma_mask(pdev
,
15285 dev_err(&pdev
->dev
, "Unable to obtain 64 bit "
15286 "DMA for consistent allocations\n");
15287 goto err_out_apeunmap
;
15291 if (err
|| dma_mask
== DMA_BIT_MASK(32)) {
15292 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
15294 dev_err(&pdev
->dev
,
15295 "No usable DMA configuration, aborting\n");
15296 goto err_out_apeunmap
;
15300 tg3_init_bufmgr_config(tp
);
15302 features
|= NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
;
15304 /* 5700 B0 chips do not support checksumming correctly due
15305 * to hardware bugs.
15307 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5700_B0
) {
15308 features
|= NETIF_F_SG
| NETIF_F_IP_CSUM
| NETIF_F_RXCSUM
;
15310 if (tg3_flag(tp
, 5755_PLUS
))
15311 features
|= NETIF_F_IPV6_CSUM
;
15314 /* TSO is on by default on chips that support hardware TSO.
15315 * Firmware TSO on older chips gives lower performance, so it
15316 * is off by default, but can be enabled using ethtool.
15318 if ((tg3_flag(tp
, HW_TSO_1
) ||
15319 tg3_flag(tp
, HW_TSO_2
) ||
15320 tg3_flag(tp
, HW_TSO_3
)) &&
15321 (features
& NETIF_F_IP_CSUM
))
15322 features
|= NETIF_F_TSO
;
15323 if (tg3_flag(tp
, HW_TSO_2
) || tg3_flag(tp
, HW_TSO_3
)) {
15324 if (features
& NETIF_F_IPV6_CSUM
)
15325 features
|= NETIF_F_TSO6
;
15326 if (tg3_flag(tp
, HW_TSO_3
) ||
15327 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
||
15328 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
&&
15329 GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5784_AX
) ||
15330 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
15331 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
)
15332 features
|= NETIF_F_TSO_ECN
;
15335 dev
->features
|= features
;
15336 dev
->vlan_features
|= features
;
15339 * Add loopback capability only for a subset of devices that support
15340 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15341 * loopback for the remaining devices.
15343 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5780
&&
15344 !tg3_flag(tp
, CPMU_PRESENT
))
15345 /* Add the loopback capability */
15346 features
|= NETIF_F_LOOPBACK
;
15348 dev
->hw_features
|= features
;
15350 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A1
&&
15351 !tg3_flag(tp
, TSO_CAPABLE
) &&
15352 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
)) {
15353 tg3_flag_set(tp
, MAX_RXPEND_64
);
15354 tp
->rx_pending
= 63;
15357 err
= tg3_get_device_address(tp
);
15359 dev_err(&pdev
->dev
,
15360 "Could not obtain valid ethernet address, aborting\n");
15361 goto err_out_apeunmap
;
15365 * Reset chip in case UNDI or EFI driver did not shutdown
15366 * DMA self test will enable WDMAC and we'll see (spurious)
15367 * pending DMA on the PCI bus at that point.
15369 if ((tr32(HOSTCC_MODE
) & HOSTCC_MODE_ENABLE
) ||
15370 (tr32(WDMAC_MODE
) & WDMAC_MODE_ENABLE
)) {
15371 tw32(MEMARB_MODE
, MEMARB_MODE_ENABLE
);
15372 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
15375 err
= tg3_test_dma(tp
);
15377 dev_err(&pdev
->dev
, "DMA engine test failed, aborting\n");
15378 goto err_out_apeunmap
;
15381 intmbx
= MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
;
15382 rcvmbx
= MAILBOX_RCVRET_CON_IDX_0
+ TG3_64BIT_REG_LOW
;
15383 sndmbx
= MAILBOX_SNDHOST_PROD_IDX_0
+ TG3_64BIT_REG_LOW
;
15384 for (i
= 0; i
< tp
->irq_max
; i
++) {
15385 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
15388 tnapi
->tx_pending
= TG3_DEF_TX_RING_PENDING
;
15390 tnapi
->int_mbox
= intmbx
;
15396 tnapi
->consmbox
= rcvmbx
;
15397 tnapi
->prodmbox
= sndmbx
;
15400 tnapi
->coal_now
= HOSTCC_MODE_COAL_VEC1_NOW
<< (i
- 1);
15402 tnapi
->coal_now
= HOSTCC_MODE_NOW
;
15404 if (!tg3_flag(tp
, SUPPORT_MSIX
))
15408 * If we support MSIX, we'll be using RSS. If we're using
15409 * RSS, the first vector only handles link interrupts and the
15410 * remaining vectors handle rx and tx interrupts. Reuse the
15411 * mailbox values for the next iteration. The values we setup
15412 * above are still useful for the single vectored mode.
15427 pci_set_drvdata(pdev
, dev
);
15429 if (tg3_flag(tp
, 5717_PLUS
)) {
15430 /* Resume a low-power mode */
15431 tg3_frob_aux_power(tp
, false);
15434 err
= register_netdev(dev
);
15436 dev_err(&pdev
->dev
, "Cannot register net device, aborting\n");
15437 goto err_out_apeunmap
;
15440 netdev_info(dev
, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15441 tp
->board_part_number
,
15442 tp
->pci_chip_rev_id
,
15443 tg3_bus_string(tp
, str
),
15446 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
15447 struct phy_device
*phydev
;
15448 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
15450 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15451 phydev
->drv
->name
, dev_name(&phydev
->dev
));
15455 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
15456 ethtype
= "10/100Base-TX";
15457 else if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
15458 ethtype
= "1000Base-SX";
15460 ethtype
= "10/100/1000Base-T";
15462 netdev_info(dev
, "attached PHY is %s (%s Ethernet) "
15463 "(WireSpeed[%d], EEE[%d])\n",
15464 tg3_phy_string(tp
), ethtype
,
15465 (tp
->phy_flags
& TG3_PHYFLG_NO_ETH_WIRE_SPEED
) == 0,
15466 (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
) != 0);
15469 netdev_info(dev
, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15470 (dev
->features
& NETIF_F_RXCSUM
) != 0,
15471 tg3_flag(tp
, USE_LINKCHG_REG
) != 0,
15472 (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
) != 0,
15473 tg3_flag(tp
, ENABLE_ASF
) != 0,
15474 tg3_flag(tp
, TSO_CAPABLE
) != 0);
15475 netdev_info(dev
, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15477 pdev
->dma_mask
== DMA_BIT_MASK(32) ? 32 :
15478 ((u64
)pdev
->dma_mask
) == DMA_BIT_MASK(40) ? 40 : 64);
15480 pci_save_state(pdev
);
15486 iounmap(tp
->aperegs
);
15487 tp
->aperegs
= NULL
;
15499 err_out_power_down
:
15500 pci_set_power_state(pdev
, PCI_D3hot
);
15503 pci_release_regions(pdev
);
15505 err_out_disable_pdev
:
15506 pci_disable_device(pdev
);
15507 pci_set_drvdata(pdev
, NULL
);
15511 static void __devexit
tg3_remove_one(struct pci_dev
*pdev
)
15513 struct net_device
*dev
= pci_get_drvdata(pdev
);
15516 struct tg3
*tp
= netdev_priv(dev
);
15519 release_firmware(tp
->fw
);
15521 cancel_work_sync(&tp
->reset_task
);
15523 if (!tg3_flag(tp
, USE_PHYLIB
)) {
15528 unregister_netdev(dev
);
15530 iounmap(tp
->aperegs
);
15531 tp
->aperegs
= NULL
;
15538 pci_release_regions(pdev
);
15539 pci_disable_device(pdev
);
15540 pci_set_drvdata(pdev
, NULL
);
15544 #ifdef CONFIG_PM_SLEEP
15545 static int tg3_suspend(struct device
*device
)
15547 struct pci_dev
*pdev
= to_pci_dev(device
);
15548 struct net_device
*dev
= pci_get_drvdata(pdev
);
15549 struct tg3
*tp
= netdev_priv(dev
);
15552 if (!netif_running(dev
))
15555 flush_work_sync(&tp
->reset_task
);
15557 tg3_netif_stop(tp
);
15559 del_timer_sync(&tp
->timer
);
15561 tg3_full_lock(tp
, 1);
15562 tg3_disable_ints(tp
);
15563 tg3_full_unlock(tp
);
15565 netif_device_detach(dev
);
15567 tg3_full_lock(tp
, 0);
15568 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
15569 tg3_flag_clear(tp
, INIT_COMPLETE
);
15570 tg3_full_unlock(tp
);
15572 err
= tg3_power_down_prepare(tp
);
15576 tg3_full_lock(tp
, 0);
15578 tg3_flag_set(tp
, INIT_COMPLETE
);
15579 err2
= tg3_restart_hw(tp
, 1);
15583 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
15584 add_timer(&tp
->timer
);
15586 netif_device_attach(dev
);
15587 tg3_netif_start(tp
);
15590 tg3_full_unlock(tp
);
15599 static int tg3_resume(struct device
*device
)
15601 struct pci_dev
*pdev
= to_pci_dev(device
);
15602 struct net_device
*dev
= pci_get_drvdata(pdev
);
15603 struct tg3
*tp
= netdev_priv(dev
);
15606 if (!netif_running(dev
))
15609 netif_device_attach(dev
);
15611 tg3_full_lock(tp
, 0);
15613 tg3_flag_set(tp
, INIT_COMPLETE
);
15614 err
= tg3_restart_hw(tp
, 1);
15618 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
15619 add_timer(&tp
->timer
);
15621 tg3_netif_start(tp
);
15624 tg3_full_unlock(tp
);
15632 static SIMPLE_DEV_PM_OPS(tg3_pm_ops
, tg3_suspend
, tg3_resume
);
15633 #define TG3_PM_OPS (&tg3_pm_ops)
15637 #define TG3_PM_OPS NULL
15639 #endif /* CONFIG_PM_SLEEP */
15642 * tg3_io_error_detected - called when PCI error is detected
15643 * @pdev: Pointer to PCI device
15644 * @state: The current pci connection state
15646 * This function is called after a PCI bus error affecting
15647 * this device has been detected.
15649 static pci_ers_result_t
tg3_io_error_detected(struct pci_dev
*pdev
,
15650 pci_channel_state_t state
)
15652 struct net_device
*netdev
= pci_get_drvdata(pdev
);
15653 struct tg3
*tp
= netdev_priv(netdev
);
15654 pci_ers_result_t err
= PCI_ERS_RESULT_NEED_RESET
;
15656 netdev_info(netdev
, "PCI I/O error detected\n");
15660 if (!netif_running(netdev
))
15665 tg3_netif_stop(tp
);
15667 del_timer_sync(&tp
->timer
);
15668 tg3_flag_clear(tp
, RESTART_TIMER
);
15670 /* Want to make sure that the reset task doesn't run */
15671 cancel_work_sync(&tp
->reset_task
);
15672 tg3_flag_clear(tp
, TX_RECOVERY_PENDING
);
15673 tg3_flag_clear(tp
, RESTART_TIMER
);
15675 netif_device_detach(netdev
);
15677 /* Clean up software state, even if MMIO is blocked */
15678 tg3_full_lock(tp
, 0);
15679 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 0);
15680 tg3_full_unlock(tp
);
15683 if (state
== pci_channel_io_perm_failure
)
15684 err
= PCI_ERS_RESULT_DISCONNECT
;
15686 pci_disable_device(pdev
);
15694 * tg3_io_slot_reset - called after the pci bus has been reset.
15695 * @pdev: Pointer to PCI device
15697 * Restart the card from scratch, as if from a cold-boot.
15698 * At this point, the card has exprienced a hard reset,
15699 * followed by fixups by BIOS, and has its config space
15700 * set up identically to what it was at cold boot.
15702 static pci_ers_result_t
tg3_io_slot_reset(struct pci_dev
*pdev
)
15704 struct net_device
*netdev
= pci_get_drvdata(pdev
);
15705 struct tg3
*tp
= netdev_priv(netdev
);
15706 pci_ers_result_t rc
= PCI_ERS_RESULT_DISCONNECT
;
15711 if (pci_enable_device(pdev
)) {
15712 netdev_err(netdev
, "Cannot re-enable PCI device after reset.\n");
15716 pci_set_master(pdev
);
15717 pci_restore_state(pdev
);
15718 pci_save_state(pdev
);
15720 if (!netif_running(netdev
)) {
15721 rc
= PCI_ERS_RESULT_RECOVERED
;
15725 err
= tg3_power_up(tp
);
15729 rc
= PCI_ERS_RESULT_RECOVERED
;
15738 * tg3_io_resume - called when traffic can start flowing again.
15739 * @pdev: Pointer to PCI device
15741 * This callback is called when the error recovery driver tells
15742 * us that its OK to resume normal operation.
15744 static void tg3_io_resume(struct pci_dev
*pdev
)
15746 struct net_device
*netdev
= pci_get_drvdata(pdev
);
15747 struct tg3
*tp
= netdev_priv(netdev
);
15752 if (!netif_running(netdev
))
15755 tg3_full_lock(tp
, 0);
15756 tg3_flag_set(tp
, INIT_COMPLETE
);
15757 err
= tg3_restart_hw(tp
, 1);
15758 tg3_full_unlock(tp
);
15760 netdev_err(netdev
, "Cannot restart hardware after reset.\n");
15764 netif_device_attach(netdev
);
15766 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
15767 add_timer(&tp
->timer
);
15769 tg3_netif_start(tp
);
15777 static struct pci_error_handlers tg3_err_handler
= {
15778 .error_detected
= tg3_io_error_detected
,
15779 .slot_reset
= tg3_io_slot_reset
,
15780 .resume
= tg3_io_resume
15783 static struct pci_driver tg3_driver
= {
15784 .name
= DRV_MODULE_NAME
,
15785 .id_table
= tg3_pci_tbl
,
15786 .probe
= tg3_init_one
,
15787 .remove
= __devexit_p(tg3_remove_one
),
15788 .err_handler
= &tg3_err_handler
,
15789 .driver
.pm
= TG3_PM_OPS
,
15792 static int __init
tg3_init(void)
15794 return pci_register_driver(&tg3_driver
);
15797 static void __exit
tg3_cleanup(void)
15799 pci_unregister_driver(&tg3_driver
);
15802 module_init(tg3_init
);
15803 module_exit(tg3_cleanup
);