2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2011 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/ioport.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/ethtool.h>
35 #include <linux/mdio.h>
36 #include <linux/mii.h>
37 #include <linux/phy.h>
38 #include <linux/brcmphy.h>
39 #include <linux/if_vlan.h>
41 #include <linux/tcp.h>
42 #include <linux/workqueue.h>
43 #include <linux/prefetch.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/firmware.h>
47 #include <net/checksum.h>
50 #include <asm/system.h>
52 #include <asm/byteorder.h>
53 #include <linux/uaccess.h>
56 #include <asm/idprom.h>
65 /* Functions & macros to verify TG3_FLAGS types */
67 static inline int _tg3_flag(enum TG3_FLAGS flag
, unsigned long *bits
)
69 return test_bit(flag
, bits
);
72 static inline void _tg3_flag_set(enum TG3_FLAGS flag
, unsigned long *bits
)
77 static inline void _tg3_flag_clear(enum TG3_FLAGS flag
, unsigned long *bits
)
79 clear_bit(flag
, bits
);
82 #define tg3_flag(tp, flag) \
83 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
84 #define tg3_flag_set(tp, flag) \
85 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
86 #define tg3_flag_clear(tp, flag) \
87 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
89 #define DRV_MODULE_NAME "tg3"
91 #define TG3_MIN_NUM 119
92 #define DRV_MODULE_VERSION \
93 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
94 #define DRV_MODULE_RELDATE "May 18, 2011"
96 #define TG3_DEF_MAC_MODE 0
97 #define TG3_DEF_RX_MODE 0
98 #define TG3_DEF_TX_MODE 0
99 #define TG3_DEF_MSG_ENABLE \
109 /* length of time before we decide the hardware is borked,
110 * and dev->tx_timeout() should be called to fix the problem
113 #define TG3_TX_TIMEOUT (5 * HZ)
115 /* hardware minimum and maximum for a single frame's data payload */
116 #define TG3_MIN_MTU 60
117 #define TG3_MAX_MTU(tp) \
118 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
120 /* These numbers seem to be hard coded in the NIC firmware somehow.
121 * You can't change the ring sizes, but you can change where you place
122 * them in the NIC onboard memory.
124 #define TG3_RX_STD_RING_SIZE(tp) \
125 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
126 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
127 #define TG3_DEF_RX_RING_PENDING 200
128 #define TG3_RX_JMB_RING_SIZE(tp) \
129 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
130 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
131 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
132 #define TG3_RSS_INDIR_TBL_SIZE 128
134 /* Do not place this n-ring entries value into the tp struct itself,
135 * we really want to expose these constants to GCC so that modulo et
136 * al. operations are done with shifts and masks instead of with
137 * hw multiply/modulo instructions. Another solution would be to
138 * replace things like '% foo' with '& (foo - 1)'.
141 #define TG3_TX_RING_SIZE 512
142 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
144 #define TG3_RX_STD_RING_BYTES(tp) \
145 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
146 #define TG3_RX_JMB_RING_BYTES(tp) \
147 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
148 #define TG3_RX_RCB_RING_BYTES(tp) \
149 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
150 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
152 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
154 #define TG3_DMA_BYTE_ENAB 64
156 #define TG3_RX_STD_DMA_SZ 1536
157 #define TG3_RX_JMB_DMA_SZ 9046
159 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
161 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
162 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
164 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
165 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
167 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
168 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
170 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
171 * that are at least dword aligned when used in PCIX mode. The driver
172 * works around this bug by double copying the packet. This workaround
173 * is built into the normal double copy length check for efficiency.
175 * However, the double copy is only necessary on those architectures
176 * where unaligned memory accesses are inefficient. For those architectures
177 * where unaligned memory accesses incur little penalty, we can reintegrate
178 * the 5701 in the normal rx path. Doing so saves a device structure
179 * dereference by hardcoding the double copy threshold in place.
181 #define TG3_RX_COPY_THRESHOLD 256
182 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
183 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
185 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
188 /* minimum number of free TX descriptors required to wake up TX process */
189 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
191 #define TG3_RAW_IP_ALIGN 2
193 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
195 #define FIRMWARE_TG3 "tigon/tg3.bin"
196 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
197 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
199 static char version
[] __devinitdata
=
200 DRV_MODULE_NAME
".c:v" DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")";
202 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
203 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
204 MODULE_LICENSE("GPL");
205 MODULE_VERSION(DRV_MODULE_VERSION
);
206 MODULE_FIRMWARE(FIRMWARE_TG3
);
207 MODULE_FIRMWARE(FIRMWARE_TG3TSO
);
208 MODULE_FIRMWARE(FIRMWARE_TG3TSO5
);
210 static int tg3_debug
= -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
211 module_param(tg3_debug
, int, 0);
212 MODULE_PARM_DESC(tg3_debug
, "Tigon3 bitmapped debugging message enable value");
214 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl
) = {
215 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5700
)},
216 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5701
)},
217 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702
)},
218 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703
)},
219 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704
)},
220 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702FE
)},
221 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705
)},
222 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705_2
)},
223 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705M
)},
224 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705M_2
)},
225 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702X
)},
226 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703X
)},
227 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704S
)},
228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702A3
)},
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703A3
)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5782
)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5788
)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5789
)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5901
)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5901_2
)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704S_2
)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705F
)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5721
)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5722
)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751
)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751M
)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751F
)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5752
)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5752M
)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753
)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753M
)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753F
)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5754
)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5754M
)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5755
)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5755M
)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5756
)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5786
)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787
)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787M
)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787F
)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5714
)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5714S
)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5715
)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5715S
)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5780
)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5780S
)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5781
)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5906
)},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5906M
)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5784
)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5764
)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5723
)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5761
)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5761E
)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5761S
)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5761SE
)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5785_G
)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5785_F
)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57780
)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57760
)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57790
)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57788
)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5717
)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5718
)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57781
)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57785
)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57761
)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57765
)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57791
)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57795
)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5719
)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5720
)},
288 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_9DXX
)},
289 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_9MXX
)},
290 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1000
)},
291 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1001
)},
292 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1003
)},
293 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC9100
)},
294 {PCI_DEVICE(PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_TIGON3
)},
298 MODULE_DEVICE_TABLE(pci
, tg3_pci_tbl
);
300 static const struct {
301 const char string
[ETH_GSTRING_LEN
];
302 } ethtool_stats_keys
[] = {
305 { "rx_ucast_packets" },
306 { "rx_mcast_packets" },
307 { "rx_bcast_packets" },
309 { "rx_align_errors" },
310 { "rx_xon_pause_rcvd" },
311 { "rx_xoff_pause_rcvd" },
312 { "rx_mac_ctrl_rcvd" },
313 { "rx_xoff_entered" },
314 { "rx_frame_too_long_errors" },
316 { "rx_undersize_packets" },
317 { "rx_in_length_errors" },
318 { "rx_out_length_errors" },
319 { "rx_64_or_less_octet_packets" },
320 { "rx_65_to_127_octet_packets" },
321 { "rx_128_to_255_octet_packets" },
322 { "rx_256_to_511_octet_packets" },
323 { "rx_512_to_1023_octet_packets" },
324 { "rx_1024_to_1522_octet_packets" },
325 { "rx_1523_to_2047_octet_packets" },
326 { "rx_2048_to_4095_octet_packets" },
327 { "rx_4096_to_8191_octet_packets" },
328 { "rx_8192_to_9022_octet_packets" },
335 { "tx_flow_control" },
337 { "tx_single_collisions" },
338 { "tx_mult_collisions" },
340 { "tx_excessive_collisions" },
341 { "tx_late_collisions" },
342 { "tx_collide_2times" },
343 { "tx_collide_3times" },
344 { "tx_collide_4times" },
345 { "tx_collide_5times" },
346 { "tx_collide_6times" },
347 { "tx_collide_7times" },
348 { "tx_collide_8times" },
349 { "tx_collide_9times" },
350 { "tx_collide_10times" },
351 { "tx_collide_11times" },
352 { "tx_collide_12times" },
353 { "tx_collide_13times" },
354 { "tx_collide_14times" },
355 { "tx_collide_15times" },
356 { "tx_ucast_packets" },
357 { "tx_mcast_packets" },
358 { "tx_bcast_packets" },
359 { "tx_carrier_sense_errors" },
363 { "dma_writeq_full" },
364 { "dma_write_prioq_full" },
368 { "rx_threshold_hit" },
370 { "dma_readq_full" },
371 { "dma_read_prioq_full" },
372 { "tx_comp_queue_full" },
374 { "ring_set_send_prod_index" },
375 { "ring_status_update" },
377 { "nic_avoided_irqs" },
378 { "nic_tx_threshold_hit" },
380 { "mbuf_lwm_thresh_hit" },
383 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
386 static const struct {
387 const char string
[ETH_GSTRING_LEN
];
388 } ethtool_test_keys
[] = {
389 { "nvram test (online) " },
390 { "link test (online) " },
391 { "register test (offline)" },
392 { "memory test (offline)" },
393 { "loopback test (offline)" },
394 { "interrupt test (offline)" },
397 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
400 static void tg3_write32(struct tg3
*tp
, u32 off
, u32 val
)
402 writel(val
, tp
->regs
+ off
);
405 static u32
tg3_read32(struct tg3
*tp
, u32 off
)
407 return readl(tp
->regs
+ off
);
410 static void tg3_ape_write32(struct tg3
*tp
, u32 off
, u32 val
)
412 writel(val
, tp
->aperegs
+ off
);
415 static u32
tg3_ape_read32(struct tg3
*tp
, u32 off
)
417 return readl(tp
->aperegs
+ off
);
420 static void tg3_write_indirect_reg32(struct tg3
*tp
, u32 off
, u32 val
)
424 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
425 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
);
426 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, val
);
427 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
430 static void tg3_write_flush_reg32(struct tg3
*tp
, u32 off
, u32 val
)
432 writel(val
, tp
->regs
+ off
);
433 readl(tp
->regs
+ off
);
436 static u32
tg3_read_indirect_reg32(struct tg3
*tp
, u32 off
)
441 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
442 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
);
443 pci_read_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, &val
);
444 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
448 static void tg3_write_indirect_mbox(struct tg3
*tp
, u32 off
, u32 val
)
452 if (off
== (MAILBOX_RCVRET_CON_IDX_0
+ TG3_64BIT_REG_LOW
)) {
453 pci_write_config_dword(tp
->pdev
, TG3PCI_RCV_RET_RING_CON_IDX
+
454 TG3_64BIT_REG_LOW
, val
);
457 if (off
== TG3_RX_STD_PROD_IDX_REG
) {
458 pci_write_config_dword(tp
->pdev
, TG3PCI_STD_RING_PROD_IDX
+
459 TG3_64BIT_REG_LOW
, val
);
463 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
464 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
+ 0x5600);
465 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, val
);
466 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
468 /* In indirect mode when disabling interrupts, we also need
469 * to clear the interrupt bit in the GRC local ctrl register.
471 if ((off
== (MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
)) &&
473 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_LOCAL_CTRL
,
474 tp
->grc_local_ctrl
|GRC_LCLCTRL_CLEARINT
);
478 static u32
tg3_read_indirect_mbox(struct tg3
*tp
, u32 off
)
483 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
484 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
+ 0x5600);
485 pci_read_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, &val
);
486 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
490 /* usec_wait specifies the wait time in usec when writing to certain registers
491 * where it is unsafe to read back the register without some delay.
492 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
493 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
495 static void _tw32_flush(struct tg3
*tp
, u32 off
, u32 val
, u32 usec_wait
)
497 if (tg3_flag(tp
, PCIX_TARGET_HWBUG
) || tg3_flag(tp
, ICH_WORKAROUND
))
498 /* Non-posted methods */
499 tp
->write32(tp
, off
, val
);
502 tg3_write32(tp
, off
, val
);
507 /* Wait again after the read for the posted method to guarantee that
508 * the wait time is met.
514 static inline void tw32_mailbox_flush(struct tg3
*tp
, u32 off
, u32 val
)
516 tp
->write32_mbox(tp
, off
, val
);
517 if (!tg3_flag(tp
, MBOX_WRITE_REORDER
) && !tg3_flag(tp
, ICH_WORKAROUND
))
518 tp
->read32_mbox(tp
, off
);
521 static void tg3_write32_tx_mbox(struct tg3
*tp
, u32 off
, u32 val
)
523 void __iomem
*mbox
= tp
->regs
+ off
;
525 if (tg3_flag(tp
, TXD_MBOX_HWBUG
))
527 if (tg3_flag(tp
, MBOX_WRITE_REORDER
))
531 static u32
tg3_read32_mbox_5906(struct tg3
*tp
, u32 off
)
533 return readl(tp
->regs
+ off
+ GRCMBOX_BASE
);
536 static void tg3_write32_mbox_5906(struct tg3
*tp
, u32 off
, u32 val
)
538 writel(val
, tp
->regs
+ off
+ GRCMBOX_BASE
);
541 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
542 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
543 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
544 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
545 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
547 #define tw32(reg, val) tp->write32(tp, reg, val)
548 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
549 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
550 #define tr32(reg) tp->read32(tp, reg)
552 static void tg3_write_mem(struct tg3
*tp
, u32 off
, u32 val
)
556 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
&&
557 (off
>= NIC_SRAM_STATS_BLK
) && (off
< NIC_SRAM_TX_BUFFER_DESC
))
560 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
561 if (tg3_flag(tp
, SRAM_USE_CONFIG
)) {
562 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, off
);
563 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
565 /* Always leave this as zero. */
566 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
568 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, off
);
569 tw32_f(TG3PCI_MEM_WIN_DATA
, val
);
571 /* Always leave this as zero. */
572 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
574 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
577 static void tg3_read_mem(struct tg3
*tp
, u32 off
, u32
*val
)
581 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
&&
582 (off
>= NIC_SRAM_STATS_BLK
) && (off
< NIC_SRAM_TX_BUFFER_DESC
)) {
587 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
588 if (tg3_flag(tp
, SRAM_USE_CONFIG
)) {
589 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, off
);
590 pci_read_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
592 /* Always leave this as zero. */
593 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
595 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, off
);
596 *val
= tr32(TG3PCI_MEM_WIN_DATA
);
598 /* Always leave this as zero. */
599 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
601 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
604 static void tg3_ape_lock_init(struct tg3
*tp
)
609 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
610 regbase
= TG3_APE_LOCK_GRANT
;
612 regbase
= TG3_APE_PER_LOCK_GRANT
;
614 /* Make sure the driver hasn't any stale locks. */
615 for (i
= 0; i
< 8; i
++)
616 tg3_ape_write32(tp
, regbase
+ 4 * i
, APE_LOCK_GRANT_DRIVER
);
619 static int tg3_ape_lock(struct tg3
*tp
, int locknum
)
623 u32 status
, req
, gnt
;
625 if (!tg3_flag(tp
, ENABLE_APE
))
629 case TG3_APE_LOCK_GRC
:
630 case TG3_APE_LOCK_MEM
:
636 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
) {
637 req
= TG3_APE_LOCK_REQ
;
638 gnt
= TG3_APE_LOCK_GRANT
;
640 req
= TG3_APE_PER_LOCK_REQ
;
641 gnt
= TG3_APE_PER_LOCK_GRANT
;
646 tg3_ape_write32(tp
, req
+ off
, APE_LOCK_REQ_DRIVER
);
648 /* Wait for up to 1 millisecond to acquire lock. */
649 for (i
= 0; i
< 100; i
++) {
650 status
= tg3_ape_read32(tp
, gnt
+ off
);
651 if (status
== APE_LOCK_GRANT_DRIVER
)
656 if (status
!= APE_LOCK_GRANT_DRIVER
) {
657 /* Revoke the lock request. */
658 tg3_ape_write32(tp
, gnt
+ off
,
659 APE_LOCK_GRANT_DRIVER
);
667 static void tg3_ape_unlock(struct tg3
*tp
, int locknum
)
671 if (!tg3_flag(tp
, ENABLE_APE
))
675 case TG3_APE_LOCK_GRC
:
676 case TG3_APE_LOCK_MEM
:
682 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
683 gnt
= TG3_APE_LOCK_GRANT
;
685 gnt
= TG3_APE_PER_LOCK_GRANT
;
687 tg3_ape_write32(tp
, gnt
+ 4 * locknum
, APE_LOCK_GRANT_DRIVER
);
690 static void tg3_disable_ints(struct tg3
*tp
)
694 tw32(TG3PCI_MISC_HOST_CTRL
,
695 (tp
->misc_host_ctrl
| MISC_HOST_CTRL_MASK_PCI_INT
));
696 for (i
= 0; i
< tp
->irq_max
; i
++)
697 tw32_mailbox_f(tp
->napi
[i
].int_mbox
, 0x00000001);
700 static void tg3_enable_ints(struct tg3
*tp
)
707 tw32(TG3PCI_MISC_HOST_CTRL
,
708 (tp
->misc_host_ctrl
& ~MISC_HOST_CTRL_MASK_PCI_INT
));
710 tp
->coal_now
= tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
;
711 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
712 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
714 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
715 if (tg3_flag(tp
, 1SHOT_MSI
))
716 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
718 tp
->coal_now
|= tnapi
->coal_now
;
721 /* Force an initial interrupt */
722 if (!tg3_flag(tp
, TAGGED_STATUS
) &&
723 (tp
->napi
[0].hw_status
->status
& SD_STATUS_UPDATED
))
724 tw32(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
| GRC_LCLCTRL_SETINT
);
726 tw32(HOSTCC_MODE
, tp
->coal_now
);
728 tp
->coal_now
&= ~(tp
->napi
[0].coal_now
| tp
->napi
[1].coal_now
);
731 static inline unsigned int tg3_has_work(struct tg3_napi
*tnapi
)
733 struct tg3
*tp
= tnapi
->tp
;
734 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
735 unsigned int work_exists
= 0;
737 /* check for phy events */
738 if (!(tg3_flag(tp
, USE_LINKCHG_REG
) || tg3_flag(tp
, POLL_SERDES
))) {
739 if (sblk
->status
& SD_STATUS_LINK_CHG
)
742 /* check for RX/TX work to do */
743 if (sblk
->idx
[0].tx_consumer
!= tnapi
->tx_cons
||
744 *(tnapi
->rx_rcb_prod_idx
) != tnapi
->rx_rcb_ptr
)
751 * similar to tg3_enable_ints, but it accurately determines whether there
752 * is new work pending and can return without flushing the PIO write
753 * which reenables interrupts
755 static void tg3_int_reenable(struct tg3_napi
*tnapi
)
757 struct tg3
*tp
= tnapi
->tp
;
759 tw32_mailbox(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
762 /* When doing tagged status, this work check is unnecessary.
763 * The last_tag we write above tells the chip which piece of
764 * work we've completed.
766 if (!tg3_flag(tp
, TAGGED_STATUS
) && tg3_has_work(tnapi
))
767 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
768 HOSTCC_MODE_ENABLE
| tnapi
->coal_now
);
771 static void tg3_switch_clocks(struct tg3
*tp
)
776 if (tg3_flag(tp
, CPMU_PRESENT
) || tg3_flag(tp
, 5780_CLASS
))
779 clock_ctrl
= tr32(TG3PCI_CLOCK_CTRL
);
781 orig_clock_ctrl
= clock_ctrl
;
782 clock_ctrl
&= (CLOCK_CTRL_FORCE_CLKRUN
|
783 CLOCK_CTRL_CLKRUN_OENABLE
|
785 tp
->pci_clock_ctrl
= clock_ctrl
;
787 if (tg3_flag(tp
, 5705_PLUS
)) {
788 if (orig_clock_ctrl
& CLOCK_CTRL_625_CORE
) {
789 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
790 clock_ctrl
| CLOCK_CTRL_625_CORE
, 40);
792 } else if ((orig_clock_ctrl
& CLOCK_CTRL_44MHZ_CORE
) != 0) {
793 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
795 (CLOCK_CTRL_44MHZ_CORE
| CLOCK_CTRL_ALTCLK
),
797 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
798 clock_ctrl
| (CLOCK_CTRL_ALTCLK
),
801 tw32_wait_f(TG3PCI_CLOCK_CTRL
, clock_ctrl
, 40);
804 #define PHY_BUSY_LOOPS 5000
806 static int tg3_readphy(struct tg3
*tp
, int reg
, u32
*val
)
812 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
814 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
820 frame_val
= ((tp
->phy_addr
<< MI_COM_PHY_ADDR_SHIFT
) &
821 MI_COM_PHY_ADDR_MASK
);
822 frame_val
|= ((reg
<< MI_COM_REG_ADDR_SHIFT
) &
823 MI_COM_REG_ADDR_MASK
);
824 frame_val
|= (MI_COM_CMD_READ
| MI_COM_START
);
826 tw32_f(MAC_MI_COM
, frame_val
);
828 loops
= PHY_BUSY_LOOPS
;
831 frame_val
= tr32(MAC_MI_COM
);
833 if ((frame_val
& MI_COM_BUSY
) == 0) {
835 frame_val
= tr32(MAC_MI_COM
);
843 *val
= frame_val
& MI_COM_DATA_MASK
;
847 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
848 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
855 static int tg3_writephy(struct tg3
*tp
, int reg
, u32 val
)
861 if ((tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
862 (reg
== MII_TG3_CTRL
|| reg
== MII_TG3_AUX_CTRL
))
865 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
867 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
871 frame_val
= ((tp
->phy_addr
<< MI_COM_PHY_ADDR_SHIFT
) &
872 MI_COM_PHY_ADDR_MASK
);
873 frame_val
|= ((reg
<< MI_COM_REG_ADDR_SHIFT
) &
874 MI_COM_REG_ADDR_MASK
);
875 frame_val
|= (val
& MI_COM_DATA_MASK
);
876 frame_val
|= (MI_COM_CMD_WRITE
| MI_COM_START
);
878 tw32_f(MAC_MI_COM
, frame_val
);
880 loops
= PHY_BUSY_LOOPS
;
883 frame_val
= tr32(MAC_MI_COM
);
884 if ((frame_val
& MI_COM_BUSY
) == 0) {
886 frame_val
= tr32(MAC_MI_COM
);
896 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
897 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
904 static int tg3_phy_cl45_write(struct tg3
*tp
, u32 devad
, u32 addr
, u32 val
)
908 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
, devad
);
912 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, addr
);
916 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
,
917 MII_TG3_MMD_CTRL_DATA_NOINC
| devad
);
921 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, val
);
927 static int tg3_phy_cl45_read(struct tg3
*tp
, u32 devad
, u32 addr
, u32
*val
)
931 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
, devad
);
935 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, addr
);
939 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
,
940 MII_TG3_MMD_CTRL_DATA_NOINC
| devad
);
944 err
= tg3_readphy(tp
, MII_TG3_MMD_ADDRESS
, val
);
950 static int tg3_phydsp_read(struct tg3
*tp
, u32 reg
, u32
*val
)
954 err
= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, reg
);
956 err
= tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, val
);
961 static int tg3_phydsp_write(struct tg3
*tp
, u32 reg
, u32 val
)
965 err
= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, reg
);
967 err
= tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, val
);
972 static int tg3_phy_auxctl_read(struct tg3
*tp
, int reg
, u32
*val
)
976 err
= tg3_writephy(tp
, MII_TG3_AUX_CTRL
,
977 (reg
<< MII_TG3_AUXCTL_MISC_RDSEL_SHIFT
) |
978 MII_TG3_AUXCTL_SHDWSEL_MISC
);
980 err
= tg3_readphy(tp
, MII_TG3_AUX_CTRL
, val
);
985 static int tg3_phy_auxctl_write(struct tg3
*tp
, int reg
, u32 set
)
987 if (reg
== MII_TG3_AUXCTL_SHDWSEL_MISC
)
988 set
|= MII_TG3_AUXCTL_MISC_WREN
;
990 return tg3_writephy(tp
, MII_TG3_AUX_CTRL
, set
| reg
);
993 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
994 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
995 MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
996 MII_TG3_AUXCTL_ACTL_TX_6DB)
998 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
999 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1000 MII_TG3_AUXCTL_ACTL_TX_6DB);
1002 static int tg3_bmcr_reset(struct tg3
*tp
)
1007 /* OK, reset it, and poll the BMCR_RESET bit until it
1008 * clears or we time out.
1010 phy_control
= BMCR_RESET
;
1011 err
= tg3_writephy(tp
, MII_BMCR
, phy_control
);
1017 err
= tg3_readphy(tp
, MII_BMCR
, &phy_control
);
1021 if ((phy_control
& BMCR_RESET
) == 0) {
1033 static int tg3_mdio_read(struct mii_bus
*bp
, int mii_id
, int reg
)
1035 struct tg3
*tp
= bp
->priv
;
1038 spin_lock_bh(&tp
->lock
);
1040 if (tg3_readphy(tp
, reg
, &val
))
1043 spin_unlock_bh(&tp
->lock
);
1048 static int tg3_mdio_write(struct mii_bus
*bp
, int mii_id
, int reg
, u16 val
)
1050 struct tg3
*tp
= bp
->priv
;
1053 spin_lock_bh(&tp
->lock
);
1055 if (tg3_writephy(tp
, reg
, val
))
1058 spin_unlock_bh(&tp
->lock
);
1063 static int tg3_mdio_reset(struct mii_bus
*bp
)
1068 static void tg3_mdio_config_5785(struct tg3
*tp
)
1071 struct phy_device
*phydev
;
1073 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1074 switch (phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
) {
1075 case PHY_ID_BCM50610
:
1076 case PHY_ID_BCM50610M
:
1077 val
= MAC_PHYCFG2_50610_LED_MODES
;
1079 case PHY_ID_BCMAC131
:
1080 val
= MAC_PHYCFG2_AC131_LED_MODES
;
1082 case PHY_ID_RTL8211C
:
1083 val
= MAC_PHYCFG2_RTL8211C_LED_MODES
;
1085 case PHY_ID_RTL8201E
:
1086 val
= MAC_PHYCFG2_RTL8201E_LED_MODES
;
1092 if (phydev
->interface
!= PHY_INTERFACE_MODE_RGMII
) {
1093 tw32(MAC_PHYCFG2
, val
);
1095 val
= tr32(MAC_PHYCFG1
);
1096 val
&= ~(MAC_PHYCFG1_RGMII_INT
|
1097 MAC_PHYCFG1_RXCLK_TO_MASK
| MAC_PHYCFG1_TXCLK_TO_MASK
);
1098 val
|= MAC_PHYCFG1_RXCLK_TIMEOUT
| MAC_PHYCFG1_TXCLK_TIMEOUT
;
1099 tw32(MAC_PHYCFG1
, val
);
1104 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
))
1105 val
|= MAC_PHYCFG2_EMODE_MASK_MASK
|
1106 MAC_PHYCFG2_FMODE_MASK_MASK
|
1107 MAC_PHYCFG2_GMODE_MASK_MASK
|
1108 MAC_PHYCFG2_ACT_MASK_MASK
|
1109 MAC_PHYCFG2_QUAL_MASK_MASK
|
1110 MAC_PHYCFG2_INBAND_ENABLE
;
1112 tw32(MAC_PHYCFG2
, val
);
1114 val
= tr32(MAC_PHYCFG1
);
1115 val
&= ~(MAC_PHYCFG1_RXCLK_TO_MASK
| MAC_PHYCFG1_TXCLK_TO_MASK
|
1116 MAC_PHYCFG1_RGMII_EXT_RX_DEC
| MAC_PHYCFG1_RGMII_SND_STAT_EN
);
1117 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
)) {
1118 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1119 val
|= MAC_PHYCFG1_RGMII_EXT_RX_DEC
;
1120 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1121 val
|= MAC_PHYCFG1_RGMII_SND_STAT_EN
;
1123 val
|= MAC_PHYCFG1_RXCLK_TIMEOUT
| MAC_PHYCFG1_TXCLK_TIMEOUT
|
1124 MAC_PHYCFG1_RGMII_INT
| MAC_PHYCFG1_TXC_DRV
;
1125 tw32(MAC_PHYCFG1
, val
);
1127 val
= tr32(MAC_EXT_RGMII_MODE
);
1128 val
&= ~(MAC_RGMII_MODE_RX_INT_B
|
1129 MAC_RGMII_MODE_RX_QUALITY
|
1130 MAC_RGMII_MODE_RX_ACTIVITY
|
1131 MAC_RGMII_MODE_RX_ENG_DET
|
1132 MAC_RGMII_MODE_TX_ENABLE
|
1133 MAC_RGMII_MODE_TX_LOWPWR
|
1134 MAC_RGMII_MODE_TX_RESET
);
1135 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
)) {
1136 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1137 val
|= MAC_RGMII_MODE_RX_INT_B
|
1138 MAC_RGMII_MODE_RX_QUALITY
|
1139 MAC_RGMII_MODE_RX_ACTIVITY
|
1140 MAC_RGMII_MODE_RX_ENG_DET
;
1141 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1142 val
|= MAC_RGMII_MODE_TX_ENABLE
|
1143 MAC_RGMII_MODE_TX_LOWPWR
|
1144 MAC_RGMII_MODE_TX_RESET
;
1146 tw32(MAC_EXT_RGMII_MODE
, val
);
1149 static void tg3_mdio_start(struct tg3
*tp
)
1151 tp
->mi_mode
&= ~MAC_MI_MODE_AUTO_POLL
;
1152 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1155 if (tg3_flag(tp
, MDIOBUS_INITED
) &&
1156 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
)
1157 tg3_mdio_config_5785(tp
);
1160 static int tg3_mdio_init(struct tg3
*tp
)
1164 struct phy_device
*phydev
;
1166 if (tg3_flag(tp
, 5717_PLUS
)) {
1169 tp
->phy_addr
= PCI_FUNC(tp
->pdev
->devfn
) + 1;
1171 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5717_A0
)
1172 is_serdes
= tr32(SG_DIG_STATUS
) & SG_DIG_IS_SERDES
;
1174 is_serdes
= tr32(TG3_CPMU_PHY_STRAP
) &
1175 TG3_CPMU_PHY_STRAP_IS_SERDES
;
1179 tp
->phy_addr
= TG3_PHY_MII_ADDR
;
1183 if (!tg3_flag(tp
, USE_PHYLIB
) || tg3_flag(tp
, MDIOBUS_INITED
))
1186 tp
->mdio_bus
= mdiobus_alloc();
1187 if (tp
->mdio_bus
== NULL
)
1190 tp
->mdio_bus
->name
= "tg3 mdio bus";
1191 snprintf(tp
->mdio_bus
->id
, MII_BUS_ID_SIZE
, "%x",
1192 (tp
->pdev
->bus
->number
<< 8) | tp
->pdev
->devfn
);
1193 tp
->mdio_bus
->priv
= tp
;
1194 tp
->mdio_bus
->parent
= &tp
->pdev
->dev
;
1195 tp
->mdio_bus
->read
= &tg3_mdio_read
;
1196 tp
->mdio_bus
->write
= &tg3_mdio_write
;
1197 tp
->mdio_bus
->reset
= &tg3_mdio_reset
;
1198 tp
->mdio_bus
->phy_mask
= ~(1 << TG3_PHY_MII_ADDR
);
1199 tp
->mdio_bus
->irq
= &tp
->mdio_irq
[0];
1201 for (i
= 0; i
< PHY_MAX_ADDR
; i
++)
1202 tp
->mdio_bus
->irq
[i
] = PHY_POLL
;
1204 /* The bus registration will look for all the PHYs on the mdio bus.
1205 * Unfortunately, it does not ensure the PHY is powered up before
1206 * accessing the PHY ID registers. A chip reset is the
1207 * quickest way to bring the device back to an operational state..
1209 if (tg3_readphy(tp
, MII_BMCR
, ®
) || (reg
& BMCR_PDOWN
))
1212 i
= mdiobus_register(tp
->mdio_bus
);
1214 dev_warn(&tp
->pdev
->dev
, "mdiobus_reg failed (0x%x)\n", i
);
1215 mdiobus_free(tp
->mdio_bus
);
1219 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1221 if (!phydev
|| !phydev
->drv
) {
1222 dev_warn(&tp
->pdev
->dev
, "No PHY devices\n");
1223 mdiobus_unregister(tp
->mdio_bus
);
1224 mdiobus_free(tp
->mdio_bus
);
1228 switch (phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
) {
1229 case PHY_ID_BCM57780
:
1230 phydev
->interface
= PHY_INTERFACE_MODE_GMII
;
1231 phydev
->dev_flags
|= PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1233 case PHY_ID_BCM50610
:
1234 case PHY_ID_BCM50610M
:
1235 phydev
->dev_flags
|= PHY_BRCM_CLEAR_RGMII_MODE
|
1236 PHY_BRCM_RX_REFCLK_UNUSED
|
1237 PHY_BRCM_DIS_TXCRXC_NOENRGY
|
1238 PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1239 if (tg3_flag(tp
, RGMII_INBAND_DISABLE
))
1240 phydev
->dev_flags
|= PHY_BRCM_STD_IBND_DISABLE
;
1241 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1242 phydev
->dev_flags
|= PHY_BRCM_EXT_IBND_RX_ENABLE
;
1243 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1244 phydev
->dev_flags
|= PHY_BRCM_EXT_IBND_TX_ENABLE
;
1246 case PHY_ID_RTL8211C
:
1247 phydev
->interface
= PHY_INTERFACE_MODE_RGMII
;
1249 case PHY_ID_RTL8201E
:
1250 case PHY_ID_BCMAC131
:
1251 phydev
->interface
= PHY_INTERFACE_MODE_MII
;
1252 phydev
->dev_flags
|= PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1253 tp
->phy_flags
|= TG3_PHYFLG_IS_FET
;
1257 tg3_flag_set(tp
, MDIOBUS_INITED
);
1259 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
)
1260 tg3_mdio_config_5785(tp
);
1265 static void tg3_mdio_fini(struct tg3
*tp
)
1267 if (tg3_flag(tp
, MDIOBUS_INITED
)) {
1268 tg3_flag_clear(tp
, MDIOBUS_INITED
);
1269 mdiobus_unregister(tp
->mdio_bus
);
1270 mdiobus_free(tp
->mdio_bus
);
1274 /* tp->lock is held. */
1275 static inline void tg3_generate_fw_event(struct tg3
*tp
)
1279 val
= tr32(GRC_RX_CPU_EVENT
);
1280 val
|= GRC_RX_CPU_DRIVER_EVENT
;
1281 tw32_f(GRC_RX_CPU_EVENT
, val
);
1283 tp
->last_event_jiffies
= jiffies
;
1286 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1288 /* tp->lock is held. */
1289 static void tg3_wait_for_event_ack(struct tg3
*tp
)
1292 unsigned int delay_cnt
;
1295 /* If enough time has passed, no wait is necessary. */
1296 time_remain
= (long)(tp
->last_event_jiffies
+ 1 +
1297 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC
)) -
1299 if (time_remain
< 0)
1302 /* Check if we can shorten the wait time. */
1303 delay_cnt
= jiffies_to_usecs(time_remain
);
1304 if (delay_cnt
> TG3_FW_EVENT_TIMEOUT_USEC
)
1305 delay_cnt
= TG3_FW_EVENT_TIMEOUT_USEC
;
1306 delay_cnt
= (delay_cnt
>> 3) + 1;
1308 for (i
= 0; i
< delay_cnt
; i
++) {
1309 if (!(tr32(GRC_RX_CPU_EVENT
) & GRC_RX_CPU_DRIVER_EVENT
))
1315 /* tp->lock is held. */
1316 static void tg3_ump_link_report(struct tg3
*tp
)
1321 if (!tg3_flag(tp
, 5780_CLASS
) || !tg3_flag(tp
, ENABLE_ASF
))
1324 tg3_wait_for_event_ack(tp
);
1326 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
, FWCMD_NICDRV_LINK_UPDATE
);
1328 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_LEN_MBOX
, 14);
1331 if (!tg3_readphy(tp
, MII_BMCR
, ®
))
1333 if (!tg3_readphy(tp
, MII_BMSR
, ®
))
1334 val
|= (reg
& 0xffff);
1335 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
, val
);
1338 if (!tg3_readphy(tp
, MII_ADVERTISE
, ®
))
1340 if (!tg3_readphy(tp
, MII_LPA
, ®
))
1341 val
|= (reg
& 0xffff);
1342 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 4, val
);
1345 if (!(tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)) {
1346 if (!tg3_readphy(tp
, MII_CTRL1000
, ®
))
1348 if (!tg3_readphy(tp
, MII_STAT1000
, ®
))
1349 val
|= (reg
& 0xffff);
1351 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 8, val
);
1353 if (!tg3_readphy(tp
, MII_PHYADDR
, ®
))
1357 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 12, val
);
1359 tg3_generate_fw_event(tp
);
1362 static void tg3_link_report(struct tg3
*tp
)
1364 if (!netif_carrier_ok(tp
->dev
)) {
1365 netif_info(tp
, link
, tp
->dev
, "Link is down\n");
1366 tg3_ump_link_report(tp
);
1367 } else if (netif_msg_link(tp
)) {
1368 netdev_info(tp
->dev
, "Link is up at %d Mbps, %s duplex\n",
1369 (tp
->link_config
.active_speed
== SPEED_1000
?
1371 (tp
->link_config
.active_speed
== SPEED_100
?
1373 (tp
->link_config
.active_duplex
== DUPLEX_FULL
?
1376 netdev_info(tp
->dev
, "Flow control is %s for TX and %s for RX\n",
1377 (tp
->link_config
.active_flowctrl
& FLOW_CTRL_TX
) ?
1379 (tp
->link_config
.active_flowctrl
& FLOW_CTRL_RX
) ?
1382 if (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
)
1383 netdev_info(tp
->dev
, "EEE is %s\n",
1384 tp
->setlpicnt
? "enabled" : "disabled");
1386 tg3_ump_link_report(tp
);
1390 static u16
tg3_advert_flowctrl_1000T(u8 flow_ctrl
)
1394 if ((flow_ctrl
& FLOW_CTRL_TX
) && (flow_ctrl
& FLOW_CTRL_RX
))
1395 miireg
= ADVERTISE_PAUSE_CAP
;
1396 else if (flow_ctrl
& FLOW_CTRL_TX
)
1397 miireg
= ADVERTISE_PAUSE_ASYM
;
1398 else if (flow_ctrl
& FLOW_CTRL_RX
)
1399 miireg
= ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
;
1406 static u16
tg3_advert_flowctrl_1000X(u8 flow_ctrl
)
1410 if ((flow_ctrl
& FLOW_CTRL_TX
) && (flow_ctrl
& FLOW_CTRL_RX
))
1411 miireg
= ADVERTISE_1000XPAUSE
;
1412 else if (flow_ctrl
& FLOW_CTRL_TX
)
1413 miireg
= ADVERTISE_1000XPSE_ASYM
;
1414 else if (flow_ctrl
& FLOW_CTRL_RX
)
1415 miireg
= ADVERTISE_1000XPAUSE
| ADVERTISE_1000XPSE_ASYM
;
1422 static u8
tg3_resolve_flowctrl_1000X(u16 lcladv
, u16 rmtadv
)
1426 if (lcladv
& ADVERTISE_1000XPAUSE
) {
1427 if (lcladv
& ADVERTISE_1000XPSE_ASYM
) {
1428 if (rmtadv
& LPA_1000XPAUSE
)
1429 cap
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
1430 else if (rmtadv
& LPA_1000XPAUSE_ASYM
)
1433 if (rmtadv
& LPA_1000XPAUSE
)
1434 cap
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
1436 } else if (lcladv
& ADVERTISE_1000XPSE_ASYM
) {
1437 if ((rmtadv
& LPA_1000XPAUSE
) && (rmtadv
& LPA_1000XPAUSE_ASYM
))
1444 static void tg3_setup_flow_control(struct tg3
*tp
, u32 lcladv
, u32 rmtadv
)
1448 u32 old_rx_mode
= tp
->rx_mode
;
1449 u32 old_tx_mode
= tp
->tx_mode
;
1451 if (tg3_flag(tp
, USE_PHYLIB
))
1452 autoneg
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]->autoneg
;
1454 autoneg
= tp
->link_config
.autoneg
;
1456 if (autoneg
== AUTONEG_ENABLE
&& tg3_flag(tp
, PAUSE_AUTONEG
)) {
1457 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
1458 flowctrl
= tg3_resolve_flowctrl_1000X(lcladv
, rmtadv
);
1460 flowctrl
= mii_resolve_flowctrl_fdx(lcladv
, rmtadv
);
1462 flowctrl
= tp
->link_config
.flowctrl
;
1464 tp
->link_config
.active_flowctrl
= flowctrl
;
1466 if (flowctrl
& FLOW_CTRL_RX
)
1467 tp
->rx_mode
|= RX_MODE_FLOW_CTRL_ENABLE
;
1469 tp
->rx_mode
&= ~RX_MODE_FLOW_CTRL_ENABLE
;
1471 if (old_rx_mode
!= tp
->rx_mode
)
1472 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
1474 if (flowctrl
& FLOW_CTRL_TX
)
1475 tp
->tx_mode
|= TX_MODE_FLOW_CTRL_ENABLE
;
1477 tp
->tx_mode
&= ~TX_MODE_FLOW_CTRL_ENABLE
;
1479 if (old_tx_mode
!= tp
->tx_mode
)
1480 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
1483 static void tg3_adjust_link(struct net_device
*dev
)
1485 u8 oldflowctrl
, linkmesg
= 0;
1486 u32 mac_mode
, lcl_adv
, rmt_adv
;
1487 struct tg3
*tp
= netdev_priv(dev
);
1488 struct phy_device
*phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1490 spin_lock_bh(&tp
->lock
);
1492 mac_mode
= tp
->mac_mode
& ~(MAC_MODE_PORT_MODE_MASK
|
1493 MAC_MODE_HALF_DUPLEX
);
1495 oldflowctrl
= tp
->link_config
.active_flowctrl
;
1501 if (phydev
->speed
== SPEED_100
|| phydev
->speed
== SPEED_10
)
1502 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
1503 else if (phydev
->speed
== SPEED_1000
||
1504 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5785
)
1505 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
1507 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
1509 if (phydev
->duplex
== DUPLEX_HALF
)
1510 mac_mode
|= MAC_MODE_HALF_DUPLEX
;
1512 lcl_adv
= tg3_advert_flowctrl_1000T(
1513 tp
->link_config
.flowctrl
);
1516 rmt_adv
= LPA_PAUSE_CAP
;
1517 if (phydev
->asym_pause
)
1518 rmt_adv
|= LPA_PAUSE_ASYM
;
1521 tg3_setup_flow_control(tp
, lcl_adv
, rmt_adv
);
1523 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
1525 if (mac_mode
!= tp
->mac_mode
) {
1526 tp
->mac_mode
= mac_mode
;
1527 tw32_f(MAC_MODE
, tp
->mac_mode
);
1531 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
) {
1532 if (phydev
->speed
== SPEED_10
)
1534 MAC_MI_STAT_10MBPS_MODE
|
1535 MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
1537 tw32(MAC_MI_STAT
, MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
1540 if (phydev
->speed
== SPEED_1000
&& phydev
->duplex
== DUPLEX_HALF
)
1541 tw32(MAC_TX_LENGTHS
,
1542 ((2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
1543 (6 << TX_LENGTHS_IPG_SHIFT
) |
1544 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT
)));
1546 tw32(MAC_TX_LENGTHS
,
1547 ((2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
1548 (6 << TX_LENGTHS_IPG_SHIFT
) |
1549 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
)));
1551 if ((phydev
->link
&& tp
->link_config
.active_speed
== SPEED_INVALID
) ||
1552 (!phydev
->link
&& tp
->link_config
.active_speed
!= SPEED_INVALID
) ||
1553 phydev
->speed
!= tp
->link_config
.active_speed
||
1554 phydev
->duplex
!= tp
->link_config
.active_duplex
||
1555 oldflowctrl
!= tp
->link_config
.active_flowctrl
)
1558 tp
->link_config
.active_speed
= phydev
->speed
;
1559 tp
->link_config
.active_duplex
= phydev
->duplex
;
1561 spin_unlock_bh(&tp
->lock
);
1564 tg3_link_report(tp
);
1567 static int tg3_phy_init(struct tg3
*tp
)
1569 struct phy_device
*phydev
;
1571 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
)
1574 /* Bring the PHY back to a known state. */
1577 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1579 /* Attach the MAC to the PHY. */
1580 phydev
= phy_connect(tp
->dev
, dev_name(&phydev
->dev
), tg3_adjust_link
,
1581 phydev
->dev_flags
, phydev
->interface
);
1582 if (IS_ERR(phydev
)) {
1583 dev_err(&tp
->pdev
->dev
, "Could not attach to PHY\n");
1584 return PTR_ERR(phydev
);
1587 /* Mask with MAC supported features. */
1588 switch (phydev
->interface
) {
1589 case PHY_INTERFACE_MODE_GMII
:
1590 case PHY_INTERFACE_MODE_RGMII
:
1591 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
1592 phydev
->supported
&= (PHY_GBIT_FEATURES
|
1594 SUPPORTED_Asym_Pause
);
1598 case PHY_INTERFACE_MODE_MII
:
1599 phydev
->supported
&= (PHY_BASIC_FEATURES
|
1601 SUPPORTED_Asym_Pause
);
1604 phy_disconnect(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
1608 tp
->phy_flags
|= TG3_PHYFLG_IS_CONNECTED
;
1610 phydev
->advertising
= phydev
->supported
;
1615 static void tg3_phy_start(struct tg3
*tp
)
1617 struct phy_device
*phydev
;
1619 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
1622 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1624 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) {
1625 tp
->phy_flags
&= ~TG3_PHYFLG_IS_LOW_POWER
;
1626 phydev
->speed
= tp
->link_config
.orig_speed
;
1627 phydev
->duplex
= tp
->link_config
.orig_duplex
;
1628 phydev
->autoneg
= tp
->link_config
.orig_autoneg
;
1629 phydev
->advertising
= tp
->link_config
.orig_advertising
;
1634 phy_start_aneg(phydev
);
1637 static void tg3_phy_stop(struct tg3
*tp
)
1639 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
1642 phy_stop(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
1645 static void tg3_phy_fini(struct tg3
*tp
)
1647 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
1648 phy_disconnect(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
1649 tp
->phy_flags
&= ~TG3_PHYFLG_IS_CONNECTED
;
1653 static void tg3_phy_fet_toggle_apd(struct tg3
*tp
, bool enable
)
1657 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &phytest
)) {
1660 tg3_writephy(tp
, MII_TG3_FET_TEST
,
1661 phytest
| MII_TG3_FET_SHADOW_EN
);
1662 if (!tg3_readphy(tp
, MII_TG3_FET_SHDW_AUXSTAT2
, &phy
)) {
1664 phy
|= MII_TG3_FET_SHDW_AUXSTAT2_APD
;
1666 phy
&= ~MII_TG3_FET_SHDW_AUXSTAT2_APD
;
1667 tg3_writephy(tp
, MII_TG3_FET_SHDW_AUXSTAT2
, phy
);
1669 tg3_writephy(tp
, MII_TG3_FET_TEST
, phytest
);
1673 static void tg3_phy_toggle_apd(struct tg3
*tp
, bool enable
)
1677 if (!tg3_flag(tp
, 5705_PLUS
) ||
1678 (tg3_flag(tp
, 5717_PLUS
) &&
1679 (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)))
1682 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
1683 tg3_phy_fet_toggle_apd(tp
, enable
);
1687 reg
= MII_TG3_MISC_SHDW_WREN
|
1688 MII_TG3_MISC_SHDW_SCR5_SEL
|
1689 MII_TG3_MISC_SHDW_SCR5_LPED
|
1690 MII_TG3_MISC_SHDW_SCR5_DLPTLM
|
1691 MII_TG3_MISC_SHDW_SCR5_SDTL
|
1692 MII_TG3_MISC_SHDW_SCR5_C125OE
;
1693 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5784
|| !enable
)
1694 reg
|= MII_TG3_MISC_SHDW_SCR5_DLLAPD
;
1696 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, reg
);
1699 reg
= MII_TG3_MISC_SHDW_WREN
|
1700 MII_TG3_MISC_SHDW_APD_SEL
|
1701 MII_TG3_MISC_SHDW_APD_WKTM_84MS
;
1703 reg
|= MII_TG3_MISC_SHDW_APD_ENABLE
;
1705 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, reg
);
1708 static void tg3_phy_toggle_automdix(struct tg3
*tp
, int enable
)
1712 if (!tg3_flag(tp
, 5705_PLUS
) ||
1713 (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
1716 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
1719 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &ephy
)) {
1720 u32 reg
= MII_TG3_FET_SHDW_MISCCTRL
;
1722 tg3_writephy(tp
, MII_TG3_FET_TEST
,
1723 ephy
| MII_TG3_FET_SHADOW_EN
);
1724 if (!tg3_readphy(tp
, reg
, &phy
)) {
1726 phy
|= MII_TG3_FET_SHDW_MISCCTRL_MDIX
;
1728 phy
&= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX
;
1729 tg3_writephy(tp
, reg
, phy
);
1731 tg3_writephy(tp
, MII_TG3_FET_TEST
, ephy
);
1736 ret
= tg3_phy_auxctl_read(tp
,
1737 MII_TG3_AUXCTL_SHDWSEL_MISC
, &phy
);
1740 phy
|= MII_TG3_AUXCTL_MISC_FORCE_AMDIX
;
1742 phy
&= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX
;
1743 tg3_phy_auxctl_write(tp
,
1744 MII_TG3_AUXCTL_SHDWSEL_MISC
, phy
);
1749 static void tg3_phy_set_wirespeed(struct tg3
*tp
)
1754 if (tp
->phy_flags
& TG3_PHYFLG_NO_ETH_WIRE_SPEED
)
1757 ret
= tg3_phy_auxctl_read(tp
, MII_TG3_AUXCTL_SHDWSEL_MISC
, &val
);
1759 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_MISC
,
1760 val
| MII_TG3_AUXCTL_MISC_WIRESPD_EN
);
1763 static void tg3_phy_apply_otp(struct tg3
*tp
)
1772 if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp
))
1775 phy
= ((otp
& TG3_OTP_AGCTGT_MASK
) >> TG3_OTP_AGCTGT_SHIFT
);
1776 phy
|= MII_TG3_DSP_TAP1_AGCTGT_DFLT
;
1777 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP1
, phy
);
1779 phy
= ((otp
& TG3_OTP_HPFFLTR_MASK
) >> TG3_OTP_HPFFLTR_SHIFT
) |
1780 ((otp
& TG3_OTP_HPFOVER_MASK
) >> TG3_OTP_HPFOVER_SHIFT
);
1781 tg3_phydsp_write(tp
, MII_TG3_DSP_AADJ1CH0
, phy
);
1783 phy
= ((otp
& TG3_OTP_LPFDIS_MASK
) >> TG3_OTP_LPFDIS_SHIFT
);
1784 phy
|= MII_TG3_DSP_AADJ1CH3_ADCCKADJ
;
1785 tg3_phydsp_write(tp
, MII_TG3_DSP_AADJ1CH3
, phy
);
1787 phy
= ((otp
& TG3_OTP_VDAC_MASK
) >> TG3_OTP_VDAC_SHIFT
);
1788 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP75
, phy
);
1790 phy
= ((otp
& TG3_OTP_10BTAMP_MASK
) >> TG3_OTP_10BTAMP_SHIFT
);
1791 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP96
, phy
);
1793 phy
= ((otp
& TG3_OTP_ROFF_MASK
) >> TG3_OTP_ROFF_SHIFT
) |
1794 ((otp
& TG3_OTP_RCOFF_MASK
) >> TG3_OTP_RCOFF_SHIFT
);
1795 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP97
, phy
);
1797 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp
);
1800 static void tg3_phy_eee_adjust(struct tg3
*tp
, u32 current_link_up
)
1804 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
))
1809 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
&&
1810 current_link_up
== 1 &&
1811 tp
->link_config
.active_duplex
== DUPLEX_FULL
&&
1812 (tp
->link_config
.active_speed
== SPEED_100
||
1813 tp
->link_config
.active_speed
== SPEED_1000
)) {
1816 if (tp
->link_config
.active_speed
== SPEED_1000
)
1817 eeectl
= TG3_CPMU_EEE_CTRL_EXIT_16_5_US
;
1819 eeectl
= TG3_CPMU_EEE_CTRL_EXIT_36_US
;
1821 tw32(TG3_CPMU_EEE_CTRL
, eeectl
);
1823 tg3_phy_cl45_read(tp
, MDIO_MMD_AN
,
1824 TG3_CL45_D7_EEERES_STAT
, &val
);
1826 if (val
== TG3_CL45_D7_EEERES_STAT_LP_1000T
||
1827 val
== TG3_CL45_D7_EEERES_STAT_LP_100TX
)
1831 if (!tp
->setlpicnt
) {
1832 val
= tr32(TG3_CPMU_EEE_MODE
);
1833 tw32(TG3_CPMU_EEE_MODE
, val
& ~TG3_CPMU_EEEMD_LPI_ENABLE
);
1837 static void tg3_phy_eee_enable(struct tg3
*tp
)
1841 if (tp
->link_config
.active_speed
== SPEED_1000
&&
1842 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
1843 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
1844 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
) &&
1845 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp
)) {
1846 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, 0x0003);
1847 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp
);
1850 val
= tr32(TG3_CPMU_EEE_MODE
);
1851 tw32(TG3_CPMU_EEE_MODE
, val
| TG3_CPMU_EEEMD_LPI_ENABLE
);
1854 static int tg3_wait_macro_done(struct tg3
*tp
)
1861 if (!tg3_readphy(tp
, MII_TG3_DSP_CONTROL
, &tmp32
)) {
1862 if ((tmp32
& 0x1000) == 0)
1872 static int tg3_phy_write_and_check_testpat(struct tg3
*tp
, int *resetp
)
1874 static const u32 test_pat
[4][6] = {
1875 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1876 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1877 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1878 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1882 for (chan
= 0; chan
< 4; chan
++) {
1885 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
1886 (chan
* 0x2000) | 0x0200);
1887 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0002);
1889 for (i
= 0; i
< 6; i
++)
1890 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
,
1893 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0202);
1894 if (tg3_wait_macro_done(tp
)) {
1899 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
1900 (chan
* 0x2000) | 0x0200);
1901 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0082);
1902 if (tg3_wait_macro_done(tp
)) {
1907 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0802);
1908 if (tg3_wait_macro_done(tp
)) {
1913 for (i
= 0; i
< 6; i
+= 2) {
1916 if (tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &low
) ||
1917 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &high
) ||
1918 tg3_wait_macro_done(tp
)) {
1924 if (low
!= test_pat
[chan
][i
] ||
1925 high
!= test_pat
[chan
][i
+1]) {
1926 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000b);
1927 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x4001);
1928 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x4005);
1938 static int tg3_phy_reset_chanpat(struct tg3
*tp
)
1942 for (chan
= 0; chan
< 4; chan
++) {
1945 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
1946 (chan
* 0x2000) | 0x0200);
1947 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0002);
1948 for (i
= 0; i
< 6; i
++)
1949 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x000);
1950 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0202);
1951 if (tg3_wait_macro_done(tp
))
1958 static int tg3_phy_reset_5703_4_5(struct tg3
*tp
)
1960 u32 reg32
, phy9_orig
;
1961 int retries
, do_phy_reset
, err
;
1967 err
= tg3_bmcr_reset(tp
);
1973 /* Disable transmitter and interrupt. */
1974 if (tg3_readphy(tp
, MII_TG3_EXT_CTRL
, ®32
))
1978 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, reg32
);
1980 /* Set full-duplex, 1000 mbps. */
1981 tg3_writephy(tp
, MII_BMCR
,
1982 BMCR_FULLDPLX
| TG3_BMCR_SPEED1000
);
1984 /* Set to master mode. */
1985 if (tg3_readphy(tp
, MII_TG3_CTRL
, &phy9_orig
))
1988 tg3_writephy(tp
, MII_TG3_CTRL
,
1989 (MII_TG3_CTRL_AS_MASTER
|
1990 MII_TG3_CTRL_ENABLE_AS_MASTER
));
1992 err
= TG3_PHY_AUXCTL_SMDSP_ENABLE(tp
);
1996 /* Block the PHY control access. */
1997 tg3_phydsp_write(tp
, 0x8005, 0x0800);
1999 err
= tg3_phy_write_and_check_testpat(tp
, &do_phy_reset
);
2002 } while (--retries
);
2004 err
= tg3_phy_reset_chanpat(tp
);
2008 tg3_phydsp_write(tp
, 0x8005, 0x0000);
2010 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x8200);
2011 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0000);
2013 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp
);
2015 tg3_writephy(tp
, MII_TG3_CTRL
, phy9_orig
);
2017 if (!tg3_readphy(tp
, MII_TG3_EXT_CTRL
, ®32
)) {
2019 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, reg32
);
2026 /* This will reset the tigon3 PHY if there is no valid
2027 * link unless the FORCE argument is non-zero.
2029 static int tg3_phy_reset(struct tg3
*tp
)
2034 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
2035 val
= tr32(GRC_MISC_CFG
);
2036 tw32_f(GRC_MISC_CFG
, val
& ~GRC_MISC_CFG_EPHY_IDDQ
);
2039 err
= tg3_readphy(tp
, MII_BMSR
, &val
);
2040 err
|= tg3_readphy(tp
, MII_BMSR
, &val
);
2044 if (netif_running(tp
->dev
) && netif_carrier_ok(tp
->dev
)) {
2045 netif_carrier_off(tp
->dev
);
2046 tg3_link_report(tp
);
2049 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
2050 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
2051 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
2052 err
= tg3_phy_reset_5703_4_5(tp
);
2059 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
&&
2060 GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5784_AX
) {
2061 cpmuctrl
= tr32(TG3_CPMU_CTRL
);
2062 if (cpmuctrl
& CPMU_CTRL_GPHY_10MB_RXONLY
)
2064 cpmuctrl
& ~CPMU_CTRL_GPHY_10MB_RXONLY
);
2067 err
= tg3_bmcr_reset(tp
);
2071 if (cpmuctrl
& CPMU_CTRL_GPHY_10MB_RXONLY
) {
2072 val
= MII_TG3_DSP_EXP8_AEDW
| MII_TG3_DSP_EXP8_REJ2MHz
;
2073 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP8
, val
);
2075 tw32(TG3_CPMU_CTRL
, cpmuctrl
);
2078 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5784_AX
||
2079 GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5761_AX
) {
2080 val
= tr32(TG3_CPMU_LSPD_1000MB_CLK
);
2081 if ((val
& CPMU_LSPD_1000MB_MACCLK_MASK
) ==
2082 CPMU_LSPD_1000MB_MACCLK_12_5
) {
2083 val
&= ~CPMU_LSPD_1000MB_MACCLK_MASK
;
2085 tw32_f(TG3_CPMU_LSPD_1000MB_CLK
, val
);
2089 if (tg3_flag(tp
, 5717_PLUS
) &&
2090 (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
))
2093 tg3_phy_apply_otp(tp
);
2095 if (tp
->phy_flags
& TG3_PHYFLG_ENABLE_APD
)
2096 tg3_phy_toggle_apd(tp
, true);
2098 tg3_phy_toggle_apd(tp
, false);
2101 if ((tp
->phy_flags
& TG3_PHYFLG_ADC_BUG
) &&
2102 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp
)) {
2103 tg3_phydsp_write(tp
, 0x201f, 0x2aaa);
2104 tg3_phydsp_write(tp
, 0x000a, 0x0323);
2105 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp
);
2108 if (tp
->phy_flags
& TG3_PHYFLG_5704_A0_BUG
) {
2109 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
2110 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
2113 if (tp
->phy_flags
& TG3_PHYFLG_BER_BUG
) {
2114 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp
)) {
2115 tg3_phydsp_write(tp
, 0x000a, 0x310b);
2116 tg3_phydsp_write(tp
, 0x201f, 0x9506);
2117 tg3_phydsp_write(tp
, 0x401f, 0x14e2);
2118 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp
);
2120 } else if (tp
->phy_flags
& TG3_PHYFLG_JITTER_BUG
) {
2121 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp
)) {
2122 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000a);
2123 if (tp
->phy_flags
& TG3_PHYFLG_ADJUST_TRIM
) {
2124 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x110b);
2125 tg3_writephy(tp
, MII_TG3_TEST1
,
2126 MII_TG3_TEST1_TRIM_EN
| 0x4);
2128 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x010b);
2130 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp
);
2134 /* Set Extended packet length bit (bit 14) on all chips that */
2135 /* support jumbo frames */
2136 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
2137 /* Cannot do read-modify-write on 5401 */
2138 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, 0x4c20);
2139 } else if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
2140 /* Set bit 14 with read-modify-write to preserve other bits */
2141 err
= tg3_phy_auxctl_read(tp
,
2142 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, &val
);
2144 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
,
2145 val
| MII_TG3_AUXCTL_ACTL_EXTPKTLEN
);
2148 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2149 * jumbo frames transmission.
2151 if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
2152 if (!tg3_readphy(tp
, MII_TG3_EXT_CTRL
, &val
))
2153 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
2154 val
| MII_TG3_EXT_CTRL_FIFO_ELASTIC
);
2157 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
2158 /* adjust output voltage */
2159 tg3_writephy(tp
, MII_TG3_FET_PTEST
, 0x12);
2162 tg3_phy_toggle_automdix(tp
, 1);
2163 tg3_phy_set_wirespeed(tp
);
2167 static void tg3_frob_aux_power(struct tg3
*tp
)
2169 bool need_vaux
= false;
2171 /* The GPIOs do something completely different on 57765. */
2172 if (!tg3_flag(tp
, IS_NIC
) ||
2173 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
2174 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
2177 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
2178 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
||
2179 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
2180 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) &&
2181 tp
->pdev_peer
!= tp
->pdev
) {
2182 struct net_device
*dev_peer
;
2184 dev_peer
= pci_get_drvdata(tp
->pdev_peer
);
2186 /* remove_one() may have been run on the peer. */
2188 struct tg3
*tp_peer
= netdev_priv(dev_peer
);
2190 if (tg3_flag(tp_peer
, INIT_COMPLETE
))
2193 if (tg3_flag(tp_peer
, WOL_ENABLE
) ||
2194 tg3_flag(tp_peer
, ENABLE_ASF
))
2199 if (tg3_flag(tp
, WOL_ENABLE
) || tg3_flag(tp
, ENABLE_ASF
))
2203 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
2204 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
2205 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2206 (GRC_LCLCTRL_GPIO_OE0
|
2207 GRC_LCLCTRL_GPIO_OE1
|
2208 GRC_LCLCTRL_GPIO_OE2
|
2209 GRC_LCLCTRL_GPIO_OUTPUT0
|
2210 GRC_LCLCTRL_GPIO_OUTPUT1
),
2212 } else if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
2213 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
) {
2214 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2215 u32 grc_local_ctrl
= GRC_LCLCTRL_GPIO_OE0
|
2216 GRC_LCLCTRL_GPIO_OE1
|
2217 GRC_LCLCTRL_GPIO_OE2
|
2218 GRC_LCLCTRL_GPIO_OUTPUT0
|
2219 GRC_LCLCTRL_GPIO_OUTPUT1
|
2221 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
, 100);
2223 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OUTPUT2
;
2224 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
, 100);
2226 grc_local_ctrl
&= ~GRC_LCLCTRL_GPIO_OUTPUT0
;
2227 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
, 100);
2230 u32 grc_local_ctrl
= 0;
2232 /* Workaround to prevent overdrawing Amps. */
2233 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
2235 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE3
;
2236 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2237 grc_local_ctrl
, 100);
2240 /* On 5753 and variants, GPIO2 cannot be used. */
2241 no_gpio2
= tp
->nic_sram_data_cfg
&
2242 NIC_SRAM_DATA_CFG_NO_GPIO2
;
2244 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE0
|
2245 GRC_LCLCTRL_GPIO_OE1
|
2246 GRC_LCLCTRL_GPIO_OE2
|
2247 GRC_LCLCTRL_GPIO_OUTPUT1
|
2248 GRC_LCLCTRL_GPIO_OUTPUT2
;
2250 grc_local_ctrl
&= ~(GRC_LCLCTRL_GPIO_OE2
|
2251 GRC_LCLCTRL_GPIO_OUTPUT2
);
2253 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2254 grc_local_ctrl
, 100);
2256 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OUTPUT0
;
2258 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2259 grc_local_ctrl
, 100);
2262 grc_local_ctrl
&= ~GRC_LCLCTRL_GPIO_OUTPUT2
;
2263 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2264 grc_local_ctrl
, 100);
2268 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
2269 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
) {
2270 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2271 (GRC_LCLCTRL_GPIO_OE1
|
2272 GRC_LCLCTRL_GPIO_OUTPUT1
), 100);
2274 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2275 GRC_LCLCTRL_GPIO_OE1
, 100);
2277 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2278 (GRC_LCLCTRL_GPIO_OE1
|
2279 GRC_LCLCTRL_GPIO_OUTPUT1
), 100);
2284 static int tg3_5700_link_polarity(struct tg3
*tp
, u32 speed
)
2286 if (tp
->led_ctrl
== LED_CTRL_MODE_PHY_2
)
2288 else if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5411
) {
2289 if (speed
!= SPEED_10
)
2291 } else if (speed
== SPEED_10
)
2297 static int tg3_setup_phy(struct tg3
*, int);
2299 #define RESET_KIND_SHUTDOWN 0
2300 #define RESET_KIND_INIT 1
2301 #define RESET_KIND_SUSPEND 2
2303 static void tg3_write_sig_post_reset(struct tg3
*, int);
2304 static int tg3_halt_cpu(struct tg3
*, u32
);
2306 static void tg3_power_down_phy(struct tg3
*tp
, bool do_low_power
)
2310 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
2311 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
2312 u32 sg_dig_ctrl
= tr32(SG_DIG_CTRL
);
2313 u32 serdes_cfg
= tr32(MAC_SERDES_CFG
);
2316 SG_DIG_USING_HW_AUTONEG
| SG_DIG_SOFT_RESET
;
2317 tw32(SG_DIG_CTRL
, sg_dig_ctrl
);
2318 tw32(MAC_SERDES_CFG
, serdes_cfg
| (1 << 15));
2323 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
2325 val
= tr32(GRC_MISC_CFG
);
2326 tw32_f(GRC_MISC_CFG
, val
| GRC_MISC_CFG_EPHY_IDDQ
);
2329 } else if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
2331 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &phytest
)) {
2334 tg3_writephy(tp
, MII_ADVERTISE
, 0);
2335 tg3_writephy(tp
, MII_BMCR
,
2336 BMCR_ANENABLE
| BMCR_ANRESTART
);
2338 tg3_writephy(tp
, MII_TG3_FET_TEST
,
2339 phytest
| MII_TG3_FET_SHADOW_EN
);
2340 if (!tg3_readphy(tp
, MII_TG3_FET_SHDW_AUXMODE4
, &phy
)) {
2341 phy
|= MII_TG3_FET_SHDW_AUXMODE4_SBPD
;
2343 MII_TG3_FET_SHDW_AUXMODE4
,
2346 tg3_writephy(tp
, MII_TG3_FET_TEST
, phytest
);
2349 } else if (do_low_power
) {
2350 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
2351 MII_TG3_EXT_CTRL_FORCE_LED_OFF
);
2353 val
= MII_TG3_AUXCTL_PCTL_100TX_LPWR
|
2354 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE
|
2355 MII_TG3_AUXCTL_PCTL_VREG_11V
;
2356 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_PWRCTL
, val
);
2359 /* The PHY should not be powered down on some chips because
2362 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
2363 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
2364 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5780
&&
2365 (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)))
2368 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5784_AX
||
2369 GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5761_AX
) {
2370 val
= tr32(TG3_CPMU_LSPD_1000MB_CLK
);
2371 val
&= ~CPMU_LSPD_1000MB_MACCLK_MASK
;
2372 val
|= CPMU_LSPD_1000MB_MACCLK_12_5
;
2373 tw32_f(TG3_CPMU_LSPD_1000MB_CLK
, val
);
2376 tg3_writephy(tp
, MII_BMCR
, BMCR_PDOWN
);
2379 /* tp->lock is held. */
2380 static int tg3_nvram_lock(struct tg3
*tp
)
2382 if (tg3_flag(tp
, NVRAM
)) {
2385 if (tp
->nvram_lock_cnt
== 0) {
2386 tw32(NVRAM_SWARB
, SWARB_REQ_SET1
);
2387 for (i
= 0; i
< 8000; i
++) {
2388 if (tr32(NVRAM_SWARB
) & SWARB_GNT1
)
2393 tw32(NVRAM_SWARB
, SWARB_REQ_CLR1
);
2397 tp
->nvram_lock_cnt
++;
2402 /* tp->lock is held. */
2403 static void tg3_nvram_unlock(struct tg3
*tp
)
2405 if (tg3_flag(tp
, NVRAM
)) {
2406 if (tp
->nvram_lock_cnt
> 0)
2407 tp
->nvram_lock_cnt
--;
2408 if (tp
->nvram_lock_cnt
== 0)
2409 tw32_f(NVRAM_SWARB
, SWARB_REQ_CLR1
);
2413 /* tp->lock is held. */
2414 static void tg3_enable_nvram_access(struct tg3
*tp
)
2416 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
)) {
2417 u32 nvaccess
= tr32(NVRAM_ACCESS
);
2419 tw32(NVRAM_ACCESS
, nvaccess
| ACCESS_ENABLE
);
2423 /* tp->lock is held. */
2424 static void tg3_disable_nvram_access(struct tg3
*tp
)
2426 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
)) {
2427 u32 nvaccess
= tr32(NVRAM_ACCESS
);
2429 tw32(NVRAM_ACCESS
, nvaccess
& ~ACCESS_ENABLE
);
2433 static int tg3_nvram_read_using_eeprom(struct tg3
*tp
,
2434 u32 offset
, u32
*val
)
2439 if (offset
> EEPROM_ADDR_ADDR_MASK
|| (offset
% 4) != 0)
2442 tmp
= tr32(GRC_EEPROM_ADDR
) & ~(EEPROM_ADDR_ADDR_MASK
|
2443 EEPROM_ADDR_DEVID_MASK
|
2445 tw32(GRC_EEPROM_ADDR
,
2447 (0 << EEPROM_ADDR_DEVID_SHIFT
) |
2448 ((offset
<< EEPROM_ADDR_ADDR_SHIFT
) &
2449 EEPROM_ADDR_ADDR_MASK
) |
2450 EEPROM_ADDR_READ
| EEPROM_ADDR_START
);
2452 for (i
= 0; i
< 1000; i
++) {
2453 tmp
= tr32(GRC_EEPROM_ADDR
);
2455 if (tmp
& EEPROM_ADDR_COMPLETE
)
2459 if (!(tmp
& EEPROM_ADDR_COMPLETE
))
2462 tmp
= tr32(GRC_EEPROM_DATA
);
2465 * The data will always be opposite the native endian
2466 * format. Perform a blind byteswap to compensate.
2473 #define NVRAM_CMD_TIMEOUT 10000
2475 static int tg3_nvram_exec_cmd(struct tg3
*tp
, u32 nvram_cmd
)
2479 tw32(NVRAM_CMD
, nvram_cmd
);
2480 for (i
= 0; i
< NVRAM_CMD_TIMEOUT
; i
++) {
2482 if (tr32(NVRAM_CMD
) & NVRAM_CMD_DONE
) {
2488 if (i
== NVRAM_CMD_TIMEOUT
)
2494 static u32
tg3_nvram_phys_addr(struct tg3
*tp
, u32 addr
)
2496 if (tg3_flag(tp
, NVRAM
) &&
2497 tg3_flag(tp
, NVRAM_BUFFERED
) &&
2498 tg3_flag(tp
, FLASH
) &&
2499 !tg3_flag(tp
, NO_NVRAM_ADDR_TRANS
) &&
2500 (tp
->nvram_jedecnum
== JEDEC_ATMEL
))
2502 addr
= ((addr
/ tp
->nvram_pagesize
) <<
2503 ATMEL_AT45DB0X1B_PAGE_POS
) +
2504 (addr
% tp
->nvram_pagesize
);
2509 static u32
tg3_nvram_logical_addr(struct tg3
*tp
, u32 addr
)
2511 if (tg3_flag(tp
, NVRAM
) &&
2512 tg3_flag(tp
, NVRAM_BUFFERED
) &&
2513 tg3_flag(tp
, FLASH
) &&
2514 !tg3_flag(tp
, NO_NVRAM_ADDR_TRANS
) &&
2515 (tp
->nvram_jedecnum
== JEDEC_ATMEL
))
2517 addr
= ((addr
>> ATMEL_AT45DB0X1B_PAGE_POS
) *
2518 tp
->nvram_pagesize
) +
2519 (addr
& ((1 << ATMEL_AT45DB0X1B_PAGE_POS
) - 1));
2524 /* NOTE: Data read in from NVRAM is byteswapped according to
2525 * the byteswapping settings for all other register accesses.
2526 * tg3 devices are BE devices, so on a BE machine, the data
2527 * returned will be exactly as it is seen in NVRAM. On a LE
2528 * machine, the 32-bit value will be byteswapped.
2530 static int tg3_nvram_read(struct tg3
*tp
, u32 offset
, u32
*val
)
2534 if (!tg3_flag(tp
, NVRAM
))
2535 return tg3_nvram_read_using_eeprom(tp
, offset
, val
);
2537 offset
= tg3_nvram_phys_addr(tp
, offset
);
2539 if (offset
> NVRAM_ADDR_MSK
)
2542 ret
= tg3_nvram_lock(tp
);
2546 tg3_enable_nvram_access(tp
);
2548 tw32(NVRAM_ADDR
, offset
);
2549 ret
= tg3_nvram_exec_cmd(tp
, NVRAM_CMD_RD
| NVRAM_CMD_GO
|
2550 NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
| NVRAM_CMD_DONE
);
2553 *val
= tr32(NVRAM_RDDATA
);
2555 tg3_disable_nvram_access(tp
);
2557 tg3_nvram_unlock(tp
);
2562 /* Ensures NVRAM data is in bytestream format. */
2563 static int tg3_nvram_read_be32(struct tg3
*tp
, u32 offset
, __be32
*val
)
2566 int res
= tg3_nvram_read(tp
, offset
, &v
);
2568 *val
= cpu_to_be32(v
);
2572 /* tp->lock is held. */
2573 static void __tg3_set_mac_addr(struct tg3
*tp
, int skip_mac_1
)
2575 u32 addr_high
, addr_low
;
2578 addr_high
= ((tp
->dev
->dev_addr
[0] << 8) |
2579 tp
->dev
->dev_addr
[1]);
2580 addr_low
= ((tp
->dev
->dev_addr
[2] << 24) |
2581 (tp
->dev
->dev_addr
[3] << 16) |
2582 (tp
->dev
->dev_addr
[4] << 8) |
2583 (tp
->dev
->dev_addr
[5] << 0));
2584 for (i
= 0; i
< 4; i
++) {
2585 if (i
== 1 && skip_mac_1
)
2587 tw32(MAC_ADDR_0_HIGH
+ (i
* 8), addr_high
);
2588 tw32(MAC_ADDR_0_LOW
+ (i
* 8), addr_low
);
2591 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
2592 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
2593 for (i
= 0; i
< 12; i
++) {
2594 tw32(MAC_EXTADDR_0_HIGH
+ (i
* 8), addr_high
);
2595 tw32(MAC_EXTADDR_0_LOW
+ (i
* 8), addr_low
);
2599 addr_high
= (tp
->dev
->dev_addr
[0] +
2600 tp
->dev
->dev_addr
[1] +
2601 tp
->dev
->dev_addr
[2] +
2602 tp
->dev
->dev_addr
[3] +
2603 tp
->dev
->dev_addr
[4] +
2604 tp
->dev
->dev_addr
[5]) &
2605 TX_BACKOFF_SEED_MASK
;
2606 tw32(MAC_TX_BACKOFF_SEED
, addr_high
);
2609 static void tg3_enable_register_access(struct tg3
*tp
)
2612 * Make sure register accesses (indirect or otherwise) will function
2615 pci_write_config_dword(tp
->pdev
,
2616 TG3PCI_MISC_HOST_CTRL
, tp
->misc_host_ctrl
);
2619 static int tg3_power_up(struct tg3
*tp
)
2621 tg3_enable_register_access(tp
);
2623 pci_set_power_state(tp
->pdev
, PCI_D0
);
2625 /* Switch out of Vaux if it is a NIC */
2626 if (tg3_flag(tp
, IS_NIC
))
2627 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
, 100);
2632 static int tg3_power_down_prepare(struct tg3
*tp
)
2635 bool device_should_wake
, do_low_power
;
2637 tg3_enable_register_access(tp
);
2639 /* Restore the CLKREQ setting. */
2640 if (tg3_flag(tp
, CLKREQ_BUG
)) {
2643 pci_read_config_word(tp
->pdev
,
2644 tp
->pcie_cap
+ PCI_EXP_LNKCTL
,
2646 lnkctl
|= PCI_EXP_LNKCTL_CLKREQ_EN
;
2647 pci_write_config_word(tp
->pdev
,
2648 tp
->pcie_cap
+ PCI_EXP_LNKCTL
,
2652 misc_host_ctrl
= tr32(TG3PCI_MISC_HOST_CTRL
);
2653 tw32(TG3PCI_MISC_HOST_CTRL
,
2654 misc_host_ctrl
| MISC_HOST_CTRL_MASK_PCI_INT
);
2656 device_should_wake
= device_may_wakeup(&tp
->pdev
->dev
) &&
2657 tg3_flag(tp
, WOL_ENABLE
);
2659 if (tg3_flag(tp
, USE_PHYLIB
)) {
2660 do_low_power
= false;
2661 if ((tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) &&
2662 !(tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
2663 struct phy_device
*phydev
;
2664 u32 phyid
, advertising
;
2666 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
2668 tp
->phy_flags
|= TG3_PHYFLG_IS_LOW_POWER
;
2670 tp
->link_config
.orig_speed
= phydev
->speed
;
2671 tp
->link_config
.orig_duplex
= phydev
->duplex
;
2672 tp
->link_config
.orig_autoneg
= phydev
->autoneg
;
2673 tp
->link_config
.orig_advertising
= phydev
->advertising
;
2675 advertising
= ADVERTISED_TP
|
2677 ADVERTISED_Autoneg
|
2678 ADVERTISED_10baseT_Half
;
2680 if (tg3_flag(tp
, ENABLE_ASF
) || device_should_wake
) {
2681 if (tg3_flag(tp
, WOL_SPEED_100MB
))
2683 ADVERTISED_100baseT_Half
|
2684 ADVERTISED_100baseT_Full
|
2685 ADVERTISED_10baseT_Full
;
2687 advertising
|= ADVERTISED_10baseT_Full
;
2690 phydev
->advertising
= advertising
;
2692 phy_start_aneg(phydev
);
2694 phyid
= phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
;
2695 if (phyid
!= PHY_ID_BCMAC131
) {
2696 phyid
&= PHY_BCM_OUI_MASK
;
2697 if (phyid
== PHY_BCM_OUI_1
||
2698 phyid
== PHY_BCM_OUI_2
||
2699 phyid
== PHY_BCM_OUI_3
)
2700 do_low_power
= true;
2704 do_low_power
= true;
2706 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
2707 tp
->phy_flags
|= TG3_PHYFLG_IS_LOW_POWER
;
2708 tp
->link_config
.orig_speed
= tp
->link_config
.speed
;
2709 tp
->link_config
.orig_duplex
= tp
->link_config
.duplex
;
2710 tp
->link_config
.orig_autoneg
= tp
->link_config
.autoneg
;
2713 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
2714 tp
->link_config
.speed
= SPEED_10
;
2715 tp
->link_config
.duplex
= DUPLEX_HALF
;
2716 tp
->link_config
.autoneg
= AUTONEG_ENABLE
;
2717 tg3_setup_phy(tp
, 0);
2721 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
2724 val
= tr32(GRC_VCPU_EXT_CTRL
);
2725 tw32(GRC_VCPU_EXT_CTRL
, val
| GRC_VCPU_EXT_CTRL_DISABLE_WOL
);
2726 } else if (!tg3_flag(tp
, ENABLE_ASF
)) {
2730 for (i
= 0; i
< 200; i
++) {
2731 tg3_read_mem(tp
, NIC_SRAM_FW_ASF_STATUS_MBOX
, &val
);
2732 if (val
== ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1
)
2737 if (tg3_flag(tp
, WOL_CAP
))
2738 tg3_write_mem(tp
, NIC_SRAM_WOL_MBOX
, WOL_SIGNATURE
|
2739 WOL_DRV_STATE_SHUTDOWN
|
2743 if (device_should_wake
) {
2746 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
2748 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
2749 tg3_phy_auxctl_write(tp
,
2750 MII_TG3_AUXCTL_SHDWSEL_PWRCTL
,
2751 MII_TG3_AUXCTL_PCTL_WOL_EN
|
2752 MII_TG3_AUXCTL_PCTL_100TX_LPWR
|
2753 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC
);
2757 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
2758 mac_mode
= MAC_MODE_PORT_MODE_GMII
;
2760 mac_mode
= MAC_MODE_PORT_MODE_MII
;
2762 mac_mode
|= tp
->mac_mode
& MAC_MODE_LINK_POLARITY
;
2763 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
2765 u32 speed
= tg3_flag(tp
, WOL_SPEED_100MB
) ?
2766 SPEED_100
: SPEED_10
;
2767 if (tg3_5700_link_polarity(tp
, speed
))
2768 mac_mode
|= MAC_MODE_LINK_POLARITY
;
2770 mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
2773 mac_mode
= MAC_MODE_PORT_MODE_TBI
;
2776 if (!tg3_flag(tp
, 5750_PLUS
))
2777 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
2779 mac_mode
|= MAC_MODE_MAGIC_PKT_ENABLE
;
2780 if ((tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, 5780_CLASS
)) &&
2781 (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
)))
2782 mac_mode
|= MAC_MODE_KEEP_FRAME_IN_WOL
;
2784 if (tg3_flag(tp
, ENABLE_APE
))
2785 mac_mode
|= MAC_MODE_APE_TX_EN
|
2786 MAC_MODE_APE_RX_EN
|
2787 MAC_MODE_TDE_ENABLE
;
2789 tw32_f(MAC_MODE
, mac_mode
);
2792 tw32_f(MAC_RX_MODE
, RX_MODE_ENABLE
);
2796 if (!tg3_flag(tp
, WOL_SPEED_100MB
) &&
2797 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
2798 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
)) {
2801 base_val
= tp
->pci_clock_ctrl
;
2802 base_val
|= (CLOCK_CTRL_RXCLK_DISABLE
|
2803 CLOCK_CTRL_TXCLK_DISABLE
);
2805 tw32_wait_f(TG3PCI_CLOCK_CTRL
, base_val
| CLOCK_CTRL_ALTCLK
|
2806 CLOCK_CTRL_PWRDOWN_PLL133
, 40);
2807 } else if (tg3_flag(tp
, 5780_CLASS
) ||
2808 tg3_flag(tp
, CPMU_PRESENT
) ||
2809 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
2811 } else if (!(tg3_flag(tp
, 5750_PLUS
) && tg3_flag(tp
, ENABLE_ASF
))) {
2812 u32 newbits1
, newbits2
;
2814 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
2815 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
2816 newbits1
= (CLOCK_CTRL_RXCLK_DISABLE
|
2817 CLOCK_CTRL_TXCLK_DISABLE
|
2819 newbits2
= newbits1
| CLOCK_CTRL_44MHZ_CORE
;
2820 } else if (tg3_flag(tp
, 5705_PLUS
)) {
2821 newbits1
= CLOCK_CTRL_625_CORE
;
2822 newbits2
= newbits1
| CLOCK_CTRL_ALTCLK
;
2824 newbits1
= CLOCK_CTRL_ALTCLK
;
2825 newbits2
= newbits1
| CLOCK_CTRL_44MHZ_CORE
;
2828 tw32_wait_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
| newbits1
,
2831 tw32_wait_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
| newbits2
,
2834 if (!tg3_flag(tp
, 5705_PLUS
)) {
2837 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
2838 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
2839 newbits3
= (CLOCK_CTRL_RXCLK_DISABLE
|
2840 CLOCK_CTRL_TXCLK_DISABLE
|
2841 CLOCK_CTRL_44MHZ_CORE
);
2843 newbits3
= CLOCK_CTRL_44MHZ_CORE
;
2846 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
2847 tp
->pci_clock_ctrl
| newbits3
, 40);
2851 if (!(device_should_wake
) && !tg3_flag(tp
, ENABLE_ASF
))
2852 tg3_power_down_phy(tp
, do_low_power
);
2854 tg3_frob_aux_power(tp
);
2856 /* Workaround for unstable PLL clock */
2857 if ((GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5750_AX
) ||
2858 (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5750_BX
)) {
2859 u32 val
= tr32(0x7d00);
2861 val
&= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2863 if (!tg3_flag(tp
, ENABLE_ASF
)) {
2866 err
= tg3_nvram_lock(tp
);
2867 tg3_halt_cpu(tp
, RX_CPU_BASE
);
2869 tg3_nvram_unlock(tp
);
2873 tg3_write_sig_post_reset(tp
, RESET_KIND_SHUTDOWN
);
2878 static void tg3_power_down(struct tg3
*tp
)
2880 tg3_power_down_prepare(tp
);
2882 pci_wake_from_d3(tp
->pdev
, tg3_flag(tp
, WOL_ENABLE
));
2883 pci_set_power_state(tp
->pdev
, PCI_D3hot
);
2886 static void tg3_aux_stat_to_speed_duplex(struct tg3
*tp
, u32 val
, u16
*speed
, u8
*duplex
)
2888 switch (val
& MII_TG3_AUX_STAT_SPDMASK
) {
2889 case MII_TG3_AUX_STAT_10HALF
:
2891 *duplex
= DUPLEX_HALF
;
2894 case MII_TG3_AUX_STAT_10FULL
:
2896 *duplex
= DUPLEX_FULL
;
2899 case MII_TG3_AUX_STAT_100HALF
:
2901 *duplex
= DUPLEX_HALF
;
2904 case MII_TG3_AUX_STAT_100FULL
:
2906 *duplex
= DUPLEX_FULL
;
2909 case MII_TG3_AUX_STAT_1000HALF
:
2910 *speed
= SPEED_1000
;
2911 *duplex
= DUPLEX_HALF
;
2914 case MII_TG3_AUX_STAT_1000FULL
:
2915 *speed
= SPEED_1000
;
2916 *duplex
= DUPLEX_FULL
;
2920 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
2921 *speed
= (val
& MII_TG3_AUX_STAT_100
) ? SPEED_100
:
2923 *duplex
= (val
& MII_TG3_AUX_STAT_FULL
) ? DUPLEX_FULL
:
2927 *speed
= SPEED_INVALID
;
2928 *duplex
= DUPLEX_INVALID
;
2933 static int tg3_phy_autoneg_cfg(struct tg3
*tp
, u32 advertise
, u32 flowctrl
)
2938 new_adv
= ADVERTISE_CSMA
;
2939 if (advertise
& ADVERTISED_10baseT_Half
)
2940 new_adv
|= ADVERTISE_10HALF
;
2941 if (advertise
& ADVERTISED_10baseT_Full
)
2942 new_adv
|= ADVERTISE_10FULL
;
2943 if (advertise
& ADVERTISED_100baseT_Half
)
2944 new_adv
|= ADVERTISE_100HALF
;
2945 if (advertise
& ADVERTISED_100baseT_Full
)
2946 new_adv
|= ADVERTISE_100FULL
;
2948 new_adv
|= tg3_advert_flowctrl_1000T(flowctrl
);
2950 err
= tg3_writephy(tp
, MII_ADVERTISE
, new_adv
);
2954 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
2958 if (advertise
& ADVERTISED_1000baseT_Half
)
2959 new_adv
|= MII_TG3_CTRL_ADV_1000_HALF
;
2960 if (advertise
& ADVERTISED_1000baseT_Full
)
2961 new_adv
|= MII_TG3_CTRL_ADV_1000_FULL
;
2963 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
||
2964 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B0
)
2965 new_adv
|= (MII_TG3_CTRL_AS_MASTER
|
2966 MII_TG3_CTRL_ENABLE_AS_MASTER
);
2968 err
= tg3_writephy(tp
, MII_TG3_CTRL
, new_adv
);
2972 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
))
2975 tw32(TG3_CPMU_EEE_MODE
,
2976 tr32(TG3_CPMU_EEE_MODE
) & ~TG3_CPMU_EEEMD_LPI_ENABLE
);
2978 err
= TG3_PHY_AUXCTL_SMDSP_ENABLE(tp
);
2982 switch (GET_ASIC_REV(tp
->pci_chip_rev_id
)) {
2984 case ASIC_REV_57765
:
2985 if (!tg3_phydsp_read(tp
, MII_TG3_DSP_CH34TP2
, &val
))
2986 tg3_phydsp_write(tp
, MII_TG3_DSP_CH34TP2
, val
|
2987 MII_TG3_DSP_CH34TP2_HIBW01
);
2990 val
= MII_TG3_DSP_TAP26_ALNOKO
|
2991 MII_TG3_DSP_TAP26_RMRXSTO
|
2992 MII_TG3_DSP_TAP26_OPCSINPT
;
2993 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, val
);
2997 /* Advertise 100-BaseTX EEE ability */
2998 if (advertise
& ADVERTISED_100baseT_Full
)
2999 val
|= MDIO_AN_EEE_ADV_100TX
;
3000 /* Advertise 1000-BaseT EEE ability */
3001 if (advertise
& ADVERTISED_1000baseT_Full
)
3002 val
|= MDIO_AN_EEE_ADV_1000T
;
3003 err
= tg3_phy_cl45_write(tp
, MDIO_MMD_AN
, MDIO_AN_EEE_ADV
, val
);
3005 err2
= TG3_PHY_AUXCTL_SMDSP_DISABLE(tp
);
3014 static void tg3_phy_copper_begin(struct tg3
*tp
)
3019 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) {
3020 new_adv
= ADVERTISED_10baseT_Half
|
3021 ADVERTISED_10baseT_Full
;
3022 if (tg3_flag(tp
, WOL_SPEED_100MB
))
3023 new_adv
|= ADVERTISED_100baseT_Half
|
3024 ADVERTISED_100baseT_Full
;
3026 tg3_phy_autoneg_cfg(tp
, new_adv
,
3027 FLOW_CTRL_TX
| FLOW_CTRL_RX
);
3028 } else if (tp
->link_config
.speed
== SPEED_INVALID
) {
3029 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
3030 tp
->link_config
.advertising
&=
3031 ~(ADVERTISED_1000baseT_Half
|
3032 ADVERTISED_1000baseT_Full
);
3034 tg3_phy_autoneg_cfg(tp
, tp
->link_config
.advertising
,
3035 tp
->link_config
.flowctrl
);
3037 /* Asking for a specific link mode. */
3038 if (tp
->link_config
.speed
== SPEED_1000
) {
3039 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
3040 new_adv
= ADVERTISED_1000baseT_Full
;
3042 new_adv
= ADVERTISED_1000baseT_Half
;
3043 } else if (tp
->link_config
.speed
== SPEED_100
) {
3044 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
3045 new_adv
= ADVERTISED_100baseT_Full
;
3047 new_adv
= ADVERTISED_100baseT_Half
;
3049 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
3050 new_adv
= ADVERTISED_10baseT_Full
;
3052 new_adv
= ADVERTISED_10baseT_Half
;
3055 tg3_phy_autoneg_cfg(tp
, new_adv
,
3056 tp
->link_config
.flowctrl
);
3059 if (tp
->link_config
.autoneg
== AUTONEG_DISABLE
&&
3060 tp
->link_config
.speed
!= SPEED_INVALID
) {
3061 u32 bmcr
, orig_bmcr
;
3063 tp
->link_config
.active_speed
= tp
->link_config
.speed
;
3064 tp
->link_config
.active_duplex
= tp
->link_config
.duplex
;
3067 switch (tp
->link_config
.speed
) {
3073 bmcr
|= BMCR_SPEED100
;
3077 bmcr
|= TG3_BMCR_SPEED1000
;
3081 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
3082 bmcr
|= BMCR_FULLDPLX
;
3084 if (!tg3_readphy(tp
, MII_BMCR
, &orig_bmcr
) &&
3085 (bmcr
!= orig_bmcr
)) {
3086 tg3_writephy(tp
, MII_BMCR
, BMCR_LOOPBACK
);
3087 for (i
= 0; i
< 1500; i
++) {
3091 if (tg3_readphy(tp
, MII_BMSR
, &tmp
) ||
3092 tg3_readphy(tp
, MII_BMSR
, &tmp
))
3094 if (!(tmp
& BMSR_LSTATUS
)) {
3099 tg3_writephy(tp
, MII_BMCR
, bmcr
);
3103 tg3_writephy(tp
, MII_BMCR
,
3104 BMCR_ANENABLE
| BMCR_ANRESTART
);
3108 static int tg3_init_5401phy_dsp(struct tg3
*tp
)
3112 /* Turn off tap power management. */
3113 /* Set Extended packet length bit */
3114 err
= tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, 0x4c20);
3116 err
|= tg3_phydsp_write(tp
, 0x0012, 0x1804);
3117 err
|= tg3_phydsp_write(tp
, 0x0013, 0x1204);
3118 err
|= tg3_phydsp_write(tp
, 0x8006, 0x0132);
3119 err
|= tg3_phydsp_write(tp
, 0x8006, 0x0232);
3120 err
|= tg3_phydsp_write(tp
, 0x201f, 0x0a20);
3127 static int tg3_copper_is_advertising_all(struct tg3
*tp
, u32 mask
)
3129 u32 adv_reg
, all_mask
= 0;
3131 if (mask
& ADVERTISED_10baseT_Half
)
3132 all_mask
|= ADVERTISE_10HALF
;
3133 if (mask
& ADVERTISED_10baseT_Full
)
3134 all_mask
|= ADVERTISE_10FULL
;
3135 if (mask
& ADVERTISED_100baseT_Half
)
3136 all_mask
|= ADVERTISE_100HALF
;
3137 if (mask
& ADVERTISED_100baseT_Full
)
3138 all_mask
|= ADVERTISE_100FULL
;
3140 if (tg3_readphy(tp
, MII_ADVERTISE
, &adv_reg
))
3143 if ((adv_reg
& all_mask
) != all_mask
)
3145 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
3149 if (mask
& ADVERTISED_1000baseT_Half
)
3150 all_mask
|= ADVERTISE_1000HALF
;
3151 if (mask
& ADVERTISED_1000baseT_Full
)
3152 all_mask
|= ADVERTISE_1000FULL
;
3154 if (tg3_readphy(tp
, MII_TG3_CTRL
, &tg3_ctrl
))
3157 if ((tg3_ctrl
& all_mask
) != all_mask
)
3163 static int tg3_adv_1000T_flowctrl_ok(struct tg3
*tp
, u32
*lcladv
, u32
*rmtadv
)
3167 if (tg3_readphy(tp
, MII_ADVERTISE
, lcladv
))
3170 curadv
= *lcladv
& (ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
);
3171 reqadv
= tg3_advert_flowctrl_1000T(tp
->link_config
.flowctrl
);
3173 if (tp
->link_config
.active_duplex
== DUPLEX_FULL
) {
3174 if (curadv
!= reqadv
)
3177 if (tg3_flag(tp
, PAUSE_AUTONEG
))
3178 tg3_readphy(tp
, MII_LPA
, rmtadv
);
3180 /* Reprogram the advertisement register, even if it
3181 * does not affect the current link. If the link
3182 * gets renegotiated in the future, we can save an
3183 * additional renegotiation cycle by advertising
3184 * it correctly in the first place.
3186 if (curadv
!= reqadv
) {
3187 *lcladv
&= ~(ADVERTISE_PAUSE_CAP
|
3188 ADVERTISE_PAUSE_ASYM
);
3189 tg3_writephy(tp
, MII_ADVERTISE
, *lcladv
| reqadv
);
3196 static int tg3_setup_copper_phy(struct tg3
*tp
, int force_reset
)
3198 int current_link_up
;
3200 u32 lcl_adv
, rmt_adv
;
3208 (MAC_STATUS_SYNC_CHANGED
|
3209 MAC_STATUS_CFG_CHANGED
|
3210 MAC_STATUS_MI_COMPLETION
|
3211 MAC_STATUS_LNKSTATE_CHANGED
));
3214 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
3216 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
3220 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_PWRCTL
, 0);
3222 /* Some third-party PHYs need to be reset on link going
3225 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
3226 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
3227 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) &&
3228 netif_carrier_ok(tp
->dev
)) {
3229 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
3230 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
3231 !(bmsr
& BMSR_LSTATUS
))
3237 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
3238 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
3239 if (tg3_readphy(tp
, MII_BMSR
, &bmsr
) ||
3240 !tg3_flag(tp
, INIT_COMPLETE
))
3243 if (!(bmsr
& BMSR_LSTATUS
)) {
3244 err
= tg3_init_5401phy_dsp(tp
);
3248 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
3249 for (i
= 0; i
< 1000; i
++) {
3251 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
3252 (bmsr
& BMSR_LSTATUS
)) {
3258 if ((tp
->phy_id
& TG3_PHY_ID_REV_MASK
) ==
3259 TG3_PHY_REV_BCM5401_B0
&&
3260 !(bmsr
& BMSR_LSTATUS
) &&
3261 tp
->link_config
.active_speed
== SPEED_1000
) {
3262 err
= tg3_phy_reset(tp
);
3264 err
= tg3_init_5401phy_dsp(tp
);
3269 } else if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
||
3270 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B0
) {
3271 /* 5701 {A0,B0} CRC bug workaround */
3272 tg3_writephy(tp
, 0x15, 0x0a75);
3273 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8c68);
3274 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
3275 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8c68);
3278 /* Clear pending interrupts... */
3279 tg3_readphy(tp
, MII_TG3_ISTAT
, &val
);
3280 tg3_readphy(tp
, MII_TG3_ISTAT
, &val
);
3282 if (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
)
3283 tg3_writephy(tp
, MII_TG3_IMASK
, ~MII_TG3_INT_LINKCHG
);
3284 else if (!(tp
->phy_flags
& TG3_PHYFLG_IS_FET
))
3285 tg3_writephy(tp
, MII_TG3_IMASK
, ~0);
3287 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
3288 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
3289 if (tp
->led_ctrl
== LED_CTRL_MODE_PHY_1
)
3290 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
3291 MII_TG3_EXT_CTRL_LNK3_LED_MODE
);
3293 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, 0);
3296 current_link_up
= 0;
3297 current_speed
= SPEED_INVALID
;
3298 current_duplex
= DUPLEX_INVALID
;
3300 if (tp
->phy_flags
& TG3_PHYFLG_CAPACITIVE_COUPLING
) {
3301 err
= tg3_phy_auxctl_read(tp
,
3302 MII_TG3_AUXCTL_SHDWSEL_MISCTEST
,
3304 if (!err
&& !(val
& (1 << 10))) {
3305 tg3_phy_auxctl_write(tp
,
3306 MII_TG3_AUXCTL_SHDWSEL_MISCTEST
,
3313 for (i
= 0; i
< 100; i
++) {
3314 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
3315 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
3316 (bmsr
& BMSR_LSTATUS
))
3321 if (bmsr
& BMSR_LSTATUS
) {
3324 tg3_readphy(tp
, MII_TG3_AUX_STAT
, &aux_stat
);
3325 for (i
= 0; i
< 2000; i
++) {
3327 if (!tg3_readphy(tp
, MII_TG3_AUX_STAT
, &aux_stat
) &&
3332 tg3_aux_stat_to_speed_duplex(tp
, aux_stat
,
3337 for (i
= 0; i
< 200; i
++) {
3338 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
3339 if (tg3_readphy(tp
, MII_BMCR
, &bmcr
))
3341 if (bmcr
&& bmcr
!= 0x7fff)
3349 tp
->link_config
.active_speed
= current_speed
;
3350 tp
->link_config
.active_duplex
= current_duplex
;
3352 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
3353 if ((bmcr
& BMCR_ANENABLE
) &&
3354 tg3_copper_is_advertising_all(tp
,
3355 tp
->link_config
.advertising
)) {
3356 if (tg3_adv_1000T_flowctrl_ok(tp
, &lcl_adv
,
3358 current_link_up
= 1;
3361 if (!(bmcr
& BMCR_ANENABLE
) &&
3362 tp
->link_config
.speed
== current_speed
&&
3363 tp
->link_config
.duplex
== current_duplex
&&
3364 tp
->link_config
.flowctrl
==
3365 tp
->link_config
.active_flowctrl
) {
3366 current_link_up
= 1;
3370 if (current_link_up
== 1 &&
3371 tp
->link_config
.active_duplex
== DUPLEX_FULL
)
3372 tg3_setup_flow_control(tp
, lcl_adv
, rmt_adv
);
3376 if (current_link_up
== 0 || (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
3377 tg3_phy_copper_begin(tp
);
3379 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
3380 if ((!tg3_readphy(tp
, MII_BMSR
, &bmsr
) && (bmsr
& BMSR_LSTATUS
)) ||
3381 (tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
))
3382 current_link_up
= 1;
3385 tp
->mac_mode
&= ~MAC_MODE_PORT_MODE_MASK
;
3386 if (current_link_up
== 1) {
3387 if (tp
->link_config
.active_speed
== SPEED_100
||
3388 tp
->link_config
.active_speed
== SPEED_10
)
3389 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
3391 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
3392 } else if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
)
3393 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
3395 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
3397 tp
->mac_mode
&= ~MAC_MODE_HALF_DUPLEX
;
3398 if (tp
->link_config
.active_duplex
== DUPLEX_HALF
)
3399 tp
->mac_mode
|= MAC_MODE_HALF_DUPLEX
;
3401 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
) {
3402 if (current_link_up
== 1 &&
3403 tg3_5700_link_polarity(tp
, tp
->link_config
.active_speed
))
3404 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
3406 tp
->mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
3409 /* ??? Without this setting Netgear GA302T PHY does not
3410 * ??? send/receive packets...
3412 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5411
&&
3413 tp
->pci_chip_rev_id
== CHIPREV_ID_5700_ALTIMA
) {
3414 tp
->mi_mode
|= MAC_MI_MODE_AUTO_POLL
;
3415 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
3419 tw32_f(MAC_MODE
, tp
->mac_mode
);
3422 tg3_phy_eee_adjust(tp
, current_link_up
);
3424 if (tg3_flag(tp
, USE_LINKCHG_REG
)) {
3425 /* Polled via timer. */
3426 tw32_f(MAC_EVENT
, 0);
3428 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
3432 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
&&
3433 current_link_up
== 1 &&
3434 tp
->link_config
.active_speed
== SPEED_1000
&&
3435 (tg3_flag(tp
, PCIX_MODE
) || tg3_flag(tp
, PCI_HIGH_SPEED
))) {
3438 (MAC_STATUS_SYNC_CHANGED
|
3439 MAC_STATUS_CFG_CHANGED
));
3442 NIC_SRAM_FIRMWARE_MBOX
,
3443 NIC_SRAM_FIRMWARE_MBOX_MAGIC2
);
3446 /* Prevent send BD corruption. */
3447 if (tg3_flag(tp
, CLKREQ_BUG
)) {
3448 u16 oldlnkctl
, newlnkctl
;
3450 pci_read_config_word(tp
->pdev
,
3451 tp
->pcie_cap
+ PCI_EXP_LNKCTL
,
3453 if (tp
->link_config
.active_speed
== SPEED_100
||
3454 tp
->link_config
.active_speed
== SPEED_10
)
3455 newlnkctl
= oldlnkctl
& ~PCI_EXP_LNKCTL_CLKREQ_EN
;
3457 newlnkctl
= oldlnkctl
| PCI_EXP_LNKCTL_CLKREQ_EN
;
3458 if (newlnkctl
!= oldlnkctl
)
3459 pci_write_config_word(tp
->pdev
,
3460 tp
->pcie_cap
+ PCI_EXP_LNKCTL
,
3464 if (current_link_up
!= netif_carrier_ok(tp
->dev
)) {
3465 if (current_link_up
)
3466 netif_carrier_on(tp
->dev
);
3468 netif_carrier_off(tp
->dev
);
3469 tg3_link_report(tp
);
3475 struct tg3_fiber_aneginfo
{
3477 #define ANEG_STATE_UNKNOWN 0
3478 #define ANEG_STATE_AN_ENABLE 1
3479 #define ANEG_STATE_RESTART_INIT 2
3480 #define ANEG_STATE_RESTART 3
3481 #define ANEG_STATE_DISABLE_LINK_OK 4
3482 #define ANEG_STATE_ABILITY_DETECT_INIT 5
3483 #define ANEG_STATE_ABILITY_DETECT 6
3484 #define ANEG_STATE_ACK_DETECT_INIT 7
3485 #define ANEG_STATE_ACK_DETECT 8
3486 #define ANEG_STATE_COMPLETE_ACK_INIT 9
3487 #define ANEG_STATE_COMPLETE_ACK 10
3488 #define ANEG_STATE_IDLE_DETECT_INIT 11
3489 #define ANEG_STATE_IDLE_DETECT 12
3490 #define ANEG_STATE_LINK_OK 13
3491 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
3492 #define ANEG_STATE_NEXT_PAGE_WAIT 15
3495 #define MR_AN_ENABLE 0x00000001
3496 #define MR_RESTART_AN 0x00000002
3497 #define MR_AN_COMPLETE 0x00000004
3498 #define MR_PAGE_RX 0x00000008
3499 #define MR_NP_LOADED 0x00000010
3500 #define MR_TOGGLE_TX 0x00000020
3501 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
3502 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
3503 #define MR_LP_ADV_SYM_PAUSE 0x00000100
3504 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
3505 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3506 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3507 #define MR_LP_ADV_NEXT_PAGE 0x00001000
3508 #define MR_TOGGLE_RX 0x00002000
3509 #define MR_NP_RX 0x00004000
3511 #define MR_LINK_OK 0x80000000
3513 unsigned long link_time
, cur_time
;
3515 u32 ability_match_cfg
;
3516 int ability_match_count
;
3518 char ability_match
, idle_match
, ack_match
;
3520 u32 txconfig
, rxconfig
;
3521 #define ANEG_CFG_NP 0x00000080
3522 #define ANEG_CFG_ACK 0x00000040
3523 #define ANEG_CFG_RF2 0x00000020
3524 #define ANEG_CFG_RF1 0x00000010
3525 #define ANEG_CFG_PS2 0x00000001
3526 #define ANEG_CFG_PS1 0x00008000
3527 #define ANEG_CFG_HD 0x00004000
3528 #define ANEG_CFG_FD 0x00002000
3529 #define ANEG_CFG_INVAL 0x00001f06
3534 #define ANEG_TIMER_ENAB 2
3535 #define ANEG_FAILED -1
3537 #define ANEG_STATE_SETTLE_TIME 10000
3539 static int tg3_fiber_aneg_smachine(struct tg3
*tp
,
3540 struct tg3_fiber_aneginfo
*ap
)
3543 unsigned long delta
;
3547 if (ap
->state
== ANEG_STATE_UNKNOWN
) {
3551 ap
->ability_match_cfg
= 0;
3552 ap
->ability_match_count
= 0;
3553 ap
->ability_match
= 0;
3559 if (tr32(MAC_STATUS
) & MAC_STATUS_RCVD_CFG
) {
3560 rx_cfg_reg
= tr32(MAC_RX_AUTO_NEG
);
3562 if (rx_cfg_reg
!= ap
->ability_match_cfg
) {
3563 ap
->ability_match_cfg
= rx_cfg_reg
;
3564 ap
->ability_match
= 0;
3565 ap
->ability_match_count
= 0;
3567 if (++ap
->ability_match_count
> 1) {
3568 ap
->ability_match
= 1;
3569 ap
->ability_match_cfg
= rx_cfg_reg
;
3572 if (rx_cfg_reg
& ANEG_CFG_ACK
)
3580 ap
->ability_match_cfg
= 0;
3581 ap
->ability_match_count
= 0;
3582 ap
->ability_match
= 0;
3588 ap
->rxconfig
= rx_cfg_reg
;
3591 switch (ap
->state
) {
3592 case ANEG_STATE_UNKNOWN
:
3593 if (ap
->flags
& (MR_AN_ENABLE
| MR_RESTART_AN
))
3594 ap
->state
= ANEG_STATE_AN_ENABLE
;
3597 case ANEG_STATE_AN_ENABLE
:
3598 ap
->flags
&= ~(MR_AN_COMPLETE
| MR_PAGE_RX
);
3599 if (ap
->flags
& MR_AN_ENABLE
) {
3602 ap
->ability_match_cfg
= 0;
3603 ap
->ability_match_count
= 0;
3604 ap
->ability_match
= 0;
3608 ap
->state
= ANEG_STATE_RESTART_INIT
;
3610 ap
->state
= ANEG_STATE_DISABLE_LINK_OK
;
3614 case ANEG_STATE_RESTART_INIT
:
3615 ap
->link_time
= ap
->cur_time
;
3616 ap
->flags
&= ~(MR_NP_LOADED
);
3618 tw32(MAC_TX_AUTO_NEG
, 0);
3619 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
3620 tw32_f(MAC_MODE
, tp
->mac_mode
);
3623 ret
= ANEG_TIMER_ENAB
;
3624 ap
->state
= ANEG_STATE_RESTART
;
3627 case ANEG_STATE_RESTART
:
3628 delta
= ap
->cur_time
- ap
->link_time
;
3629 if (delta
> ANEG_STATE_SETTLE_TIME
)
3630 ap
->state
= ANEG_STATE_ABILITY_DETECT_INIT
;
3632 ret
= ANEG_TIMER_ENAB
;
3635 case ANEG_STATE_DISABLE_LINK_OK
:
3639 case ANEG_STATE_ABILITY_DETECT_INIT
:
3640 ap
->flags
&= ~(MR_TOGGLE_TX
);
3641 ap
->txconfig
= ANEG_CFG_FD
;
3642 flowctrl
= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
3643 if (flowctrl
& ADVERTISE_1000XPAUSE
)
3644 ap
->txconfig
|= ANEG_CFG_PS1
;
3645 if (flowctrl
& ADVERTISE_1000XPSE_ASYM
)
3646 ap
->txconfig
|= ANEG_CFG_PS2
;
3647 tw32(MAC_TX_AUTO_NEG
, ap
->txconfig
);
3648 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
3649 tw32_f(MAC_MODE
, tp
->mac_mode
);
3652 ap
->state
= ANEG_STATE_ABILITY_DETECT
;
3655 case ANEG_STATE_ABILITY_DETECT
:
3656 if (ap
->ability_match
!= 0 && ap
->rxconfig
!= 0)
3657 ap
->state
= ANEG_STATE_ACK_DETECT_INIT
;
3660 case ANEG_STATE_ACK_DETECT_INIT
:
3661 ap
->txconfig
|= ANEG_CFG_ACK
;
3662 tw32(MAC_TX_AUTO_NEG
, ap
->txconfig
);
3663 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
3664 tw32_f(MAC_MODE
, tp
->mac_mode
);
3667 ap
->state
= ANEG_STATE_ACK_DETECT
;
3670 case ANEG_STATE_ACK_DETECT
:
3671 if (ap
->ack_match
!= 0) {
3672 if ((ap
->rxconfig
& ~ANEG_CFG_ACK
) ==
3673 (ap
->ability_match_cfg
& ~ANEG_CFG_ACK
)) {
3674 ap
->state
= ANEG_STATE_COMPLETE_ACK_INIT
;
3676 ap
->state
= ANEG_STATE_AN_ENABLE
;
3678 } else if (ap
->ability_match
!= 0 &&
3679 ap
->rxconfig
== 0) {
3680 ap
->state
= ANEG_STATE_AN_ENABLE
;
3684 case ANEG_STATE_COMPLETE_ACK_INIT
:
3685 if (ap
->rxconfig
& ANEG_CFG_INVAL
) {
3689 ap
->flags
&= ~(MR_LP_ADV_FULL_DUPLEX
|
3690 MR_LP_ADV_HALF_DUPLEX
|
3691 MR_LP_ADV_SYM_PAUSE
|
3692 MR_LP_ADV_ASYM_PAUSE
|
3693 MR_LP_ADV_REMOTE_FAULT1
|
3694 MR_LP_ADV_REMOTE_FAULT2
|
3695 MR_LP_ADV_NEXT_PAGE
|
3698 if (ap
->rxconfig
& ANEG_CFG_FD
)
3699 ap
->flags
|= MR_LP_ADV_FULL_DUPLEX
;
3700 if (ap
->rxconfig
& ANEG_CFG_HD
)
3701 ap
->flags
|= MR_LP_ADV_HALF_DUPLEX
;
3702 if (ap
->rxconfig
& ANEG_CFG_PS1
)
3703 ap
->flags
|= MR_LP_ADV_SYM_PAUSE
;
3704 if (ap
->rxconfig
& ANEG_CFG_PS2
)
3705 ap
->flags
|= MR_LP_ADV_ASYM_PAUSE
;
3706 if (ap
->rxconfig
& ANEG_CFG_RF1
)
3707 ap
->flags
|= MR_LP_ADV_REMOTE_FAULT1
;
3708 if (ap
->rxconfig
& ANEG_CFG_RF2
)
3709 ap
->flags
|= MR_LP_ADV_REMOTE_FAULT2
;
3710 if (ap
->rxconfig
& ANEG_CFG_NP
)
3711 ap
->flags
|= MR_LP_ADV_NEXT_PAGE
;
3713 ap
->link_time
= ap
->cur_time
;
3715 ap
->flags
^= (MR_TOGGLE_TX
);
3716 if (ap
->rxconfig
& 0x0008)
3717 ap
->flags
|= MR_TOGGLE_RX
;
3718 if (ap
->rxconfig
& ANEG_CFG_NP
)
3719 ap
->flags
|= MR_NP_RX
;
3720 ap
->flags
|= MR_PAGE_RX
;
3722 ap
->state
= ANEG_STATE_COMPLETE_ACK
;
3723 ret
= ANEG_TIMER_ENAB
;
3726 case ANEG_STATE_COMPLETE_ACK
:
3727 if (ap
->ability_match
!= 0 &&
3728 ap
->rxconfig
== 0) {
3729 ap
->state
= ANEG_STATE_AN_ENABLE
;
3732 delta
= ap
->cur_time
- ap
->link_time
;
3733 if (delta
> ANEG_STATE_SETTLE_TIME
) {
3734 if (!(ap
->flags
& (MR_LP_ADV_NEXT_PAGE
))) {
3735 ap
->state
= ANEG_STATE_IDLE_DETECT_INIT
;
3737 if ((ap
->txconfig
& ANEG_CFG_NP
) == 0 &&
3738 !(ap
->flags
& MR_NP_RX
)) {
3739 ap
->state
= ANEG_STATE_IDLE_DETECT_INIT
;
3747 case ANEG_STATE_IDLE_DETECT_INIT
:
3748 ap
->link_time
= ap
->cur_time
;
3749 tp
->mac_mode
&= ~MAC_MODE_SEND_CONFIGS
;
3750 tw32_f(MAC_MODE
, tp
->mac_mode
);
3753 ap
->state
= ANEG_STATE_IDLE_DETECT
;
3754 ret
= ANEG_TIMER_ENAB
;
3757 case ANEG_STATE_IDLE_DETECT
:
3758 if (ap
->ability_match
!= 0 &&
3759 ap
->rxconfig
== 0) {
3760 ap
->state
= ANEG_STATE_AN_ENABLE
;
3763 delta
= ap
->cur_time
- ap
->link_time
;
3764 if (delta
> ANEG_STATE_SETTLE_TIME
) {
3765 /* XXX another gem from the Broadcom driver :( */
3766 ap
->state
= ANEG_STATE_LINK_OK
;
3770 case ANEG_STATE_LINK_OK
:
3771 ap
->flags
|= (MR_AN_COMPLETE
| MR_LINK_OK
);
3775 case ANEG_STATE_NEXT_PAGE_WAIT_INIT
:
3776 /* ??? unimplemented */
3779 case ANEG_STATE_NEXT_PAGE_WAIT
:
3780 /* ??? unimplemented */
3791 static int fiber_autoneg(struct tg3
*tp
, u32
*txflags
, u32
*rxflags
)
3794 struct tg3_fiber_aneginfo aninfo
;
3795 int status
= ANEG_FAILED
;
3799 tw32_f(MAC_TX_AUTO_NEG
, 0);
3801 tmp
= tp
->mac_mode
& ~MAC_MODE_PORT_MODE_MASK
;
3802 tw32_f(MAC_MODE
, tmp
| MAC_MODE_PORT_MODE_GMII
);
3805 tw32_f(MAC_MODE
, tp
->mac_mode
| MAC_MODE_SEND_CONFIGS
);
3808 memset(&aninfo
, 0, sizeof(aninfo
));
3809 aninfo
.flags
|= MR_AN_ENABLE
;
3810 aninfo
.state
= ANEG_STATE_UNKNOWN
;
3811 aninfo
.cur_time
= 0;
3813 while (++tick
< 195000) {
3814 status
= tg3_fiber_aneg_smachine(tp
, &aninfo
);
3815 if (status
== ANEG_DONE
|| status
== ANEG_FAILED
)
3821 tp
->mac_mode
&= ~MAC_MODE_SEND_CONFIGS
;
3822 tw32_f(MAC_MODE
, tp
->mac_mode
);
3825 *txflags
= aninfo
.txconfig
;
3826 *rxflags
= aninfo
.flags
;
3828 if (status
== ANEG_DONE
&&
3829 (aninfo
.flags
& (MR_AN_COMPLETE
| MR_LINK_OK
|
3830 MR_LP_ADV_FULL_DUPLEX
)))
3836 static void tg3_init_bcm8002(struct tg3
*tp
)
3838 u32 mac_status
= tr32(MAC_STATUS
);
3841 /* Reset when initting first time or we have a link. */
3842 if (tg3_flag(tp
, INIT_COMPLETE
) &&
3843 !(mac_status
& MAC_STATUS_PCS_SYNCED
))
3846 /* Set PLL lock range. */
3847 tg3_writephy(tp
, 0x16, 0x8007);
3850 tg3_writephy(tp
, MII_BMCR
, BMCR_RESET
);
3852 /* Wait for reset to complete. */
3853 /* XXX schedule_timeout() ... */
3854 for (i
= 0; i
< 500; i
++)
3857 /* Config mode; select PMA/Ch 1 regs. */
3858 tg3_writephy(tp
, 0x10, 0x8411);
3860 /* Enable auto-lock and comdet, select txclk for tx. */
3861 tg3_writephy(tp
, 0x11, 0x0a10);
3863 tg3_writephy(tp
, 0x18, 0x00a0);
3864 tg3_writephy(tp
, 0x16, 0x41ff);
3866 /* Assert and deassert POR. */
3867 tg3_writephy(tp
, 0x13, 0x0400);
3869 tg3_writephy(tp
, 0x13, 0x0000);
3871 tg3_writephy(tp
, 0x11, 0x0a50);
3873 tg3_writephy(tp
, 0x11, 0x0a10);
3875 /* Wait for signal to stabilize */
3876 /* XXX schedule_timeout() ... */
3877 for (i
= 0; i
< 15000; i
++)
3880 /* Deselect the channel register so we can read the PHYID
3883 tg3_writephy(tp
, 0x10, 0x8011);
3886 static int tg3_setup_fiber_hw_autoneg(struct tg3
*tp
, u32 mac_status
)
3889 u32 sg_dig_ctrl
, sg_dig_status
;
3890 u32 serdes_cfg
, expected_sg_dig_ctrl
;
3891 int workaround
, port_a
;
3892 int current_link_up
;
3895 expected_sg_dig_ctrl
= 0;
3898 current_link_up
= 0;
3900 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5704_A0
&&
3901 tp
->pci_chip_rev_id
!= CHIPREV_ID_5704_A1
) {
3903 if (tr32(TG3PCI_DUAL_MAC_CTRL
) & DUAL_MAC_CTRL_ID
)
3906 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3907 /* preserve bits 20-23 for voltage regulator */
3908 serdes_cfg
= tr32(MAC_SERDES_CFG
) & 0x00f06fff;
3911 sg_dig_ctrl
= tr32(SG_DIG_CTRL
);
3913 if (tp
->link_config
.autoneg
!= AUTONEG_ENABLE
) {
3914 if (sg_dig_ctrl
& SG_DIG_USING_HW_AUTONEG
) {
3916 u32 val
= serdes_cfg
;
3922 tw32_f(MAC_SERDES_CFG
, val
);
3925 tw32_f(SG_DIG_CTRL
, SG_DIG_COMMON_SETUP
);
3927 if (mac_status
& MAC_STATUS_PCS_SYNCED
) {
3928 tg3_setup_flow_control(tp
, 0, 0);
3929 current_link_up
= 1;
3934 /* Want auto-negotiation. */
3935 expected_sg_dig_ctrl
= SG_DIG_USING_HW_AUTONEG
| SG_DIG_COMMON_SETUP
;
3937 flowctrl
= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
3938 if (flowctrl
& ADVERTISE_1000XPAUSE
)
3939 expected_sg_dig_ctrl
|= SG_DIG_PAUSE_CAP
;
3940 if (flowctrl
& ADVERTISE_1000XPSE_ASYM
)
3941 expected_sg_dig_ctrl
|= SG_DIG_ASYM_PAUSE
;
3943 if (sg_dig_ctrl
!= expected_sg_dig_ctrl
) {
3944 if ((tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
) &&
3945 tp
->serdes_counter
&&
3946 ((mac_status
& (MAC_STATUS_PCS_SYNCED
|
3947 MAC_STATUS_RCVD_CFG
)) ==
3948 MAC_STATUS_PCS_SYNCED
)) {
3949 tp
->serdes_counter
--;
3950 current_link_up
= 1;
3955 tw32_f(MAC_SERDES_CFG
, serdes_cfg
| 0xc011000);
3956 tw32_f(SG_DIG_CTRL
, expected_sg_dig_ctrl
| SG_DIG_SOFT_RESET
);
3958 tw32_f(SG_DIG_CTRL
, expected_sg_dig_ctrl
);
3960 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5704S
;
3961 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
3962 } else if (mac_status
& (MAC_STATUS_PCS_SYNCED
|
3963 MAC_STATUS_SIGNAL_DET
)) {
3964 sg_dig_status
= tr32(SG_DIG_STATUS
);
3965 mac_status
= tr32(MAC_STATUS
);
3967 if ((sg_dig_status
& SG_DIG_AUTONEG_COMPLETE
) &&
3968 (mac_status
& MAC_STATUS_PCS_SYNCED
)) {
3969 u32 local_adv
= 0, remote_adv
= 0;
3971 if (sg_dig_ctrl
& SG_DIG_PAUSE_CAP
)
3972 local_adv
|= ADVERTISE_1000XPAUSE
;
3973 if (sg_dig_ctrl
& SG_DIG_ASYM_PAUSE
)
3974 local_adv
|= ADVERTISE_1000XPSE_ASYM
;
3976 if (sg_dig_status
& SG_DIG_PARTNER_PAUSE_CAPABLE
)
3977 remote_adv
|= LPA_1000XPAUSE
;
3978 if (sg_dig_status
& SG_DIG_PARTNER_ASYM_PAUSE
)
3979 remote_adv
|= LPA_1000XPAUSE_ASYM
;
3981 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
3982 current_link_up
= 1;
3983 tp
->serdes_counter
= 0;
3984 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
3985 } else if (!(sg_dig_status
& SG_DIG_AUTONEG_COMPLETE
)) {
3986 if (tp
->serdes_counter
)
3987 tp
->serdes_counter
--;
3990 u32 val
= serdes_cfg
;
3997 tw32_f(MAC_SERDES_CFG
, val
);
4000 tw32_f(SG_DIG_CTRL
, SG_DIG_COMMON_SETUP
);
4003 /* Link parallel detection - link is up */
4004 /* only if we have PCS_SYNC and not */
4005 /* receiving config code words */
4006 mac_status
= tr32(MAC_STATUS
);
4007 if ((mac_status
& MAC_STATUS_PCS_SYNCED
) &&
4008 !(mac_status
& MAC_STATUS_RCVD_CFG
)) {
4009 tg3_setup_flow_control(tp
, 0, 0);
4010 current_link_up
= 1;
4012 TG3_PHYFLG_PARALLEL_DETECT
;
4013 tp
->serdes_counter
=
4014 SERDES_PARALLEL_DET_TIMEOUT
;
4016 goto restart_autoneg
;
4020 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5704S
;
4021 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
4025 return current_link_up
;
4028 static int tg3_setup_fiber_by_hand(struct tg3
*tp
, u32 mac_status
)
4030 int current_link_up
= 0;
4032 if (!(mac_status
& MAC_STATUS_PCS_SYNCED
))
4035 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
4036 u32 txflags
, rxflags
;
4039 if (fiber_autoneg(tp
, &txflags
, &rxflags
)) {
4040 u32 local_adv
= 0, remote_adv
= 0;
4042 if (txflags
& ANEG_CFG_PS1
)
4043 local_adv
|= ADVERTISE_1000XPAUSE
;
4044 if (txflags
& ANEG_CFG_PS2
)
4045 local_adv
|= ADVERTISE_1000XPSE_ASYM
;
4047 if (rxflags
& MR_LP_ADV_SYM_PAUSE
)
4048 remote_adv
|= LPA_1000XPAUSE
;
4049 if (rxflags
& MR_LP_ADV_ASYM_PAUSE
)
4050 remote_adv
|= LPA_1000XPAUSE_ASYM
;
4052 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
4054 current_link_up
= 1;
4056 for (i
= 0; i
< 30; i
++) {
4059 (MAC_STATUS_SYNC_CHANGED
|
4060 MAC_STATUS_CFG_CHANGED
));
4062 if ((tr32(MAC_STATUS
) &
4063 (MAC_STATUS_SYNC_CHANGED
|
4064 MAC_STATUS_CFG_CHANGED
)) == 0)
4068 mac_status
= tr32(MAC_STATUS
);
4069 if (current_link_up
== 0 &&
4070 (mac_status
& MAC_STATUS_PCS_SYNCED
) &&
4071 !(mac_status
& MAC_STATUS_RCVD_CFG
))
4072 current_link_up
= 1;
4074 tg3_setup_flow_control(tp
, 0, 0);
4076 /* Forcing 1000FD link up. */
4077 current_link_up
= 1;
4079 tw32_f(MAC_MODE
, (tp
->mac_mode
| MAC_MODE_SEND_CONFIGS
));
4082 tw32_f(MAC_MODE
, tp
->mac_mode
);
4087 return current_link_up
;
4090 static int tg3_setup_fiber_phy(struct tg3
*tp
, int force_reset
)
4093 u16 orig_active_speed
;
4094 u8 orig_active_duplex
;
4096 int current_link_up
;
4099 orig_pause_cfg
= tp
->link_config
.active_flowctrl
;
4100 orig_active_speed
= tp
->link_config
.active_speed
;
4101 orig_active_duplex
= tp
->link_config
.active_duplex
;
4103 if (!tg3_flag(tp
, HW_AUTONEG
) &&
4104 netif_carrier_ok(tp
->dev
) &&
4105 tg3_flag(tp
, INIT_COMPLETE
)) {
4106 mac_status
= tr32(MAC_STATUS
);
4107 mac_status
&= (MAC_STATUS_PCS_SYNCED
|
4108 MAC_STATUS_SIGNAL_DET
|
4109 MAC_STATUS_CFG_CHANGED
|
4110 MAC_STATUS_RCVD_CFG
);
4111 if (mac_status
== (MAC_STATUS_PCS_SYNCED
|
4112 MAC_STATUS_SIGNAL_DET
)) {
4113 tw32_f(MAC_STATUS
, (MAC_STATUS_SYNC_CHANGED
|
4114 MAC_STATUS_CFG_CHANGED
));
4119 tw32_f(MAC_TX_AUTO_NEG
, 0);
4121 tp
->mac_mode
&= ~(MAC_MODE_PORT_MODE_MASK
| MAC_MODE_HALF_DUPLEX
);
4122 tp
->mac_mode
|= MAC_MODE_PORT_MODE_TBI
;
4123 tw32_f(MAC_MODE
, tp
->mac_mode
);
4126 if (tp
->phy_id
== TG3_PHY_ID_BCM8002
)
4127 tg3_init_bcm8002(tp
);
4129 /* Enable link change event even when serdes polling. */
4130 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
4133 current_link_up
= 0;
4134 mac_status
= tr32(MAC_STATUS
);
4136 if (tg3_flag(tp
, HW_AUTONEG
))
4137 current_link_up
= tg3_setup_fiber_hw_autoneg(tp
, mac_status
);
4139 current_link_up
= tg3_setup_fiber_by_hand(tp
, mac_status
);
4141 tp
->napi
[0].hw_status
->status
=
4142 (SD_STATUS_UPDATED
|
4143 (tp
->napi
[0].hw_status
->status
& ~SD_STATUS_LINK_CHG
));
4145 for (i
= 0; i
< 100; i
++) {
4146 tw32_f(MAC_STATUS
, (MAC_STATUS_SYNC_CHANGED
|
4147 MAC_STATUS_CFG_CHANGED
));
4149 if ((tr32(MAC_STATUS
) & (MAC_STATUS_SYNC_CHANGED
|
4150 MAC_STATUS_CFG_CHANGED
|
4151 MAC_STATUS_LNKSTATE_CHANGED
)) == 0)
4155 mac_status
= tr32(MAC_STATUS
);
4156 if ((mac_status
& MAC_STATUS_PCS_SYNCED
) == 0) {
4157 current_link_up
= 0;
4158 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
&&
4159 tp
->serdes_counter
== 0) {
4160 tw32_f(MAC_MODE
, (tp
->mac_mode
|
4161 MAC_MODE_SEND_CONFIGS
));
4163 tw32_f(MAC_MODE
, tp
->mac_mode
);
4167 if (current_link_up
== 1) {
4168 tp
->link_config
.active_speed
= SPEED_1000
;
4169 tp
->link_config
.active_duplex
= DUPLEX_FULL
;
4170 tw32(MAC_LED_CTRL
, (tp
->led_ctrl
|
4171 LED_CTRL_LNKLED_OVERRIDE
|
4172 LED_CTRL_1000MBPS_ON
));
4174 tp
->link_config
.active_speed
= SPEED_INVALID
;
4175 tp
->link_config
.active_duplex
= DUPLEX_INVALID
;
4176 tw32(MAC_LED_CTRL
, (tp
->led_ctrl
|
4177 LED_CTRL_LNKLED_OVERRIDE
|
4178 LED_CTRL_TRAFFIC_OVERRIDE
));
4181 if (current_link_up
!= netif_carrier_ok(tp
->dev
)) {
4182 if (current_link_up
)
4183 netif_carrier_on(tp
->dev
);
4185 netif_carrier_off(tp
->dev
);
4186 tg3_link_report(tp
);
4188 u32 now_pause_cfg
= tp
->link_config
.active_flowctrl
;
4189 if (orig_pause_cfg
!= now_pause_cfg
||
4190 orig_active_speed
!= tp
->link_config
.active_speed
||
4191 orig_active_duplex
!= tp
->link_config
.active_duplex
)
4192 tg3_link_report(tp
);
4198 static int tg3_setup_fiber_mii_phy(struct tg3
*tp
, int force_reset
)
4200 int current_link_up
, err
= 0;
4204 u32 local_adv
, remote_adv
;
4206 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
4207 tw32_f(MAC_MODE
, tp
->mac_mode
);
4213 (MAC_STATUS_SYNC_CHANGED
|
4214 MAC_STATUS_CFG_CHANGED
|
4215 MAC_STATUS_MI_COMPLETION
|
4216 MAC_STATUS_LNKSTATE_CHANGED
));
4222 current_link_up
= 0;
4223 current_speed
= SPEED_INVALID
;
4224 current_duplex
= DUPLEX_INVALID
;
4226 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4227 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4228 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
) {
4229 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
4230 bmsr
|= BMSR_LSTATUS
;
4232 bmsr
&= ~BMSR_LSTATUS
;
4235 err
|= tg3_readphy(tp
, MII_BMCR
, &bmcr
);
4237 if ((tp
->link_config
.autoneg
== AUTONEG_ENABLE
) && !force_reset
&&
4238 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
)) {
4239 /* do nothing, just check for link up at the end */
4240 } else if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
4243 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &adv
);
4244 new_adv
= adv
& ~(ADVERTISE_1000XFULL
| ADVERTISE_1000XHALF
|
4245 ADVERTISE_1000XPAUSE
|
4246 ADVERTISE_1000XPSE_ASYM
|
4249 new_adv
|= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
4251 if (tp
->link_config
.advertising
& ADVERTISED_1000baseT_Half
)
4252 new_adv
|= ADVERTISE_1000XHALF
;
4253 if (tp
->link_config
.advertising
& ADVERTISED_1000baseT_Full
)
4254 new_adv
|= ADVERTISE_1000XFULL
;
4256 if ((new_adv
!= adv
) || !(bmcr
& BMCR_ANENABLE
)) {
4257 tg3_writephy(tp
, MII_ADVERTISE
, new_adv
);
4258 bmcr
|= BMCR_ANENABLE
| BMCR_ANRESTART
;
4259 tg3_writephy(tp
, MII_BMCR
, bmcr
);
4261 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
4262 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5714S
;
4263 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
4270 bmcr
&= ~BMCR_SPEED1000
;
4271 new_bmcr
= bmcr
& ~(BMCR_ANENABLE
| BMCR_FULLDPLX
);
4273 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
4274 new_bmcr
|= BMCR_FULLDPLX
;
4276 if (new_bmcr
!= bmcr
) {
4277 /* BMCR_SPEED1000 is a reserved bit that needs
4278 * to be set on write.
4280 new_bmcr
|= BMCR_SPEED1000
;
4282 /* Force a linkdown */
4283 if (netif_carrier_ok(tp
->dev
)) {
4286 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &adv
);
4287 adv
&= ~(ADVERTISE_1000XFULL
|
4288 ADVERTISE_1000XHALF
|
4290 tg3_writephy(tp
, MII_ADVERTISE
, adv
);
4291 tg3_writephy(tp
, MII_BMCR
, bmcr
|
4295 netif_carrier_off(tp
->dev
);
4297 tg3_writephy(tp
, MII_BMCR
, new_bmcr
);
4299 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4300 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4301 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
4303 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
4304 bmsr
|= BMSR_LSTATUS
;
4306 bmsr
&= ~BMSR_LSTATUS
;
4308 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
4312 if (bmsr
& BMSR_LSTATUS
) {
4313 current_speed
= SPEED_1000
;
4314 current_link_up
= 1;
4315 if (bmcr
& BMCR_FULLDPLX
)
4316 current_duplex
= DUPLEX_FULL
;
4318 current_duplex
= DUPLEX_HALF
;
4323 if (bmcr
& BMCR_ANENABLE
) {
4326 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &local_adv
);
4327 err
|= tg3_readphy(tp
, MII_LPA
, &remote_adv
);
4328 common
= local_adv
& remote_adv
;
4329 if (common
& (ADVERTISE_1000XHALF
|
4330 ADVERTISE_1000XFULL
)) {
4331 if (common
& ADVERTISE_1000XFULL
)
4332 current_duplex
= DUPLEX_FULL
;
4334 current_duplex
= DUPLEX_HALF
;
4335 } else if (!tg3_flag(tp
, 5780_CLASS
)) {
4336 /* Link is up via parallel detect */
4338 current_link_up
= 0;
4343 if (current_link_up
== 1 && current_duplex
== DUPLEX_FULL
)
4344 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
4346 tp
->mac_mode
&= ~MAC_MODE_HALF_DUPLEX
;
4347 if (tp
->link_config
.active_duplex
== DUPLEX_HALF
)
4348 tp
->mac_mode
|= MAC_MODE_HALF_DUPLEX
;
4350 tw32_f(MAC_MODE
, tp
->mac_mode
);
4353 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
4355 tp
->link_config
.active_speed
= current_speed
;
4356 tp
->link_config
.active_duplex
= current_duplex
;
4358 if (current_link_up
!= netif_carrier_ok(tp
->dev
)) {
4359 if (current_link_up
)
4360 netif_carrier_on(tp
->dev
);
4362 netif_carrier_off(tp
->dev
);
4363 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
4365 tg3_link_report(tp
);
4370 static void tg3_serdes_parallel_detect(struct tg3
*tp
)
4372 if (tp
->serdes_counter
) {
4373 /* Give autoneg time to complete. */
4374 tp
->serdes_counter
--;
4378 if (!netif_carrier_ok(tp
->dev
) &&
4379 (tp
->link_config
.autoneg
== AUTONEG_ENABLE
)) {
4382 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
4383 if (bmcr
& BMCR_ANENABLE
) {
4386 /* Select shadow register 0x1f */
4387 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x7c00);
4388 tg3_readphy(tp
, MII_TG3_MISC_SHDW
, &phy1
);
4390 /* Select expansion interrupt status register */
4391 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
4392 MII_TG3_DSP_EXP1_INT_STAT
);
4393 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
4394 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
4396 if ((phy1
& 0x10) && !(phy2
& 0x20)) {
4397 /* We have signal detect and not receiving
4398 * config code words, link is up by parallel
4402 bmcr
&= ~BMCR_ANENABLE
;
4403 bmcr
|= BMCR_SPEED1000
| BMCR_FULLDPLX
;
4404 tg3_writephy(tp
, MII_BMCR
, bmcr
);
4405 tp
->phy_flags
|= TG3_PHYFLG_PARALLEL_DETECT
;
4408 } else if (netif_carrier_ok(tp
->dev
) &&
4409 (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) &&
4410 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
)) {
4413 /* Select expansion interrupt status register */
4414 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
4415 MII_TG3_DSP_EXP1_INT_STAT
);
4416 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
4420 /* Config code words received, turn on autoneg. */
4421 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
4422 tg3_writephy(tp
, MII_BMCR
, bmcr
| BMCR_ANENABLE
);
4424 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
4430 static int tg3_setup_phy(struct tg3
*tp
, int force_reset
)
4435 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
4436 err
= tg3_setup_fiber_phy(tp
, force_reset
);
4437 else if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
4438 err
= tg3_setup_fiber_mii_phy(tp
, force_reset
);
4440 err
= tg3_setup_copper_phy(tp
, force_reset
);
4442 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5784_AX
) {
4445 val
= tr32(TG3_CPMU_CLCK_STAT
) & CPMU_CLCK_STAT_MAC_CLCK_MASK
;
4446 if (val
== CPMU_CLCK_STAT_MAC_CLCK_62_5
)
4448 else if (val
== CPMU_CLCK_STAT_MAC_CLCK_6_25
)
4453 val
= tr32(GRC_MISC_CFG
) & ~GRC_MISC_CFG_PRESCALAR_MASK
;
4454 val
|= (scale
<< GRC_MISC_CFG_PRESCALAR_SHIFT
);
4455 tw32(GRC_MISC_CFG
, val
);
4458 val
= (2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
4459 (6 << TX_LENGTHS_IPG_SHIFT
);
4460 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
4461 val
|= tr32(MAC_TX_LENGTHS
) &
4462 (TX_LENGTHS_JMB_FRM_LEN_MSK
|
4463 TX_LENGTHS_CNT_DWN_VAL_MSK
);
4465 if (tp
->link_config
.active_speed
== SPEED_1000
&&
4466 tp
->link_config
.active_duplex
== DUPLEX_HALF
)
4467 tw32(MAC_TX_LENGTHS
, val
|
4468 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT
));
4470 tw32(MAC_TX_LENGTHS
, val
|
4471 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
));
4473 if (!tg3_flag(tp
, 5705_PLUS
)) {
4474 if (netif_carrier_ok(tp
->dev
)) {
4475 tw32(HOSTCC_STAT_COAL_TICKS
,
4476 tp
->coal
.stats_block_coalesce_usecs
);
4478 tw32(HOSTCC_STAT_COAL_TICKS
, 0);
4482 if (tg3_flag(tp
, ASPM_WORKAROUND
)) {
4483 val
= tr32(PCIE_PWR_MGMT_THRESH
);
4484 if (!netif_carrier_ok(tp
->dev
))
4485 val
= (val
& ~PCIE_PWR_MGMT_L1_THRESH_MSK
) |
4488 val
|= PCIE_PWR_MGMT_L1_THRESH_MSK
;
4489 tw32(PCIE_PWR_MGMT_THRESH
, val
);
4495 static inline int tg3_irq_sync(struct tg3
*tp
)
4497 return tp
->irq_sync
;
4500 static inline void tg3_rd32_loop(struct tg3
*tp
, u32
*dst
, u32 off
, u32 len
)
4504 dst
= (u32
*)((u8
*)dst
+ off
);
4505 for (i
= 0; i
< len
; i
+= sizeof(u32
))
4506 *dst
++ = tr32(off
+ i
);
4509 static void tg3_dump_legacy_regs(struct tg3
*tp
, u32
*regs
)
4511 tg3_rd32_loop(tp
, regs
, TG3PCI_VENDOR
, 0xb0);
4512 tg3_rd32_loop(tp
, regs
, MAILBOX_INTERRUPT_0
, 0x200);
4513 tg3_rd32_loop(tp
, regs
, MAC_MODE
, 0x4f0);
4514 tg3_rd32_loop(tp
, regs
, SNDDATAI_MODE
, 0xe0);
4515 tg3_rd32_loop(tp
, regs
, SNDDATAC_MODE
, 0x04);
4516 tg3_rd32_loop(tp
, regs
, SNDBDS_MODE
, 0x80);
4517 tg3_rd32_loop(tp
, regs
, SNDBDI_MODE
, 0x48);
4518 tg3_rd32_loop(tp
, regs
, SNDBDC_MODE
, 0x04);
4519 tg3_rd32_loop(tp
, regs
, RCVLPC_MODE
, 0x20);
4520 tg3_rd32_loop(tp
, regs
, RCVLPC_SELLST_BASE
, 0x15c);
4521 tg3_rd32_loop(tp
, regs
, RCVDBDI_MODE
, 0x0c);
4522 tg3_rd32_loop(tp
, regs
, RCVDBDI_JUMBO_BD
, 0x3c);
4523 tg3_rd32_loop(tp
, regs
, RCVDBDI_BD_PROD_IDX_0
, 0x44);
4524 tg3_rd32_loop(tp
, regs
, RCVDCC_MODE
, 0x04);
4525 tg3_rd32_loop(tp
, regs
, RCVBDI_MODE
, 0x20);
4526 tg3_rd32_loop(tp
, regs
, RCVCC_MODE
, 0x14);
4527 tg3_rd32_loop(tp
, regs
, RCVLSC_MODE
, 0x08);
4528 tg3_rd32_loop(tp
, regs
, MBFREE_MODE
, 0x08);
4529 tg3_rd32_loop(tp
, regs
, HOSTCC_MODE
, 0x100);
4531 if (tg3_flag(tp
, SUPPORT_MSIX
))
4532 tg3_rd32_loop(tp
, regs
, HOSTCC_RXCOL_TICKS_VEC1
, 0x180);
4534 tg3_rd32_loop(tp
, regs
, MEMARB_MODE
, 0x10);
4535 tg3_rd32_loop(tp
, regs
, BUFMGR_MODE
, 0x58);
4536 tg3_rd32_loop(tp
, regs
, RDMAC_MODE
, 0x08);
4537 tg3_rd32_loop(tp
, regs
, WDMAC_MODE
, 0x08);
4538 tg3_rd32_loop(tp
, regs
, RX_CPU_MODE
, 0x04);
4539 tg3_rd32_loop(tp
, regs
, RX_CPU_STATE
, 0x04);
4540 tg3_rd32_loop(tp
, regs
, RX_CPU_PGMCTR
, 0x04);
4541 tg3_rd32_loop(tp
, regs
, RX_CPU_HWBKPT
, 0x04);
4543 if (!tg3_flag(tp
, 5705_PLUS
)) {
4544 tg3_rd32_loop(tp
, regs
, TX_CPU_MODE
, 0x04);
4545 tg3_rd32_loop(tp
, regs
, TX_CPU_STATE
, 0x04);
4546 tg3_rd32_loop(tp
, regs
, TX_CPU_PGMCTR
, 0x04);
4549 tg3_rd32_loop(tp
, regs
, GRCMBOX_INTERRUPT_0
, 0x110);
4550 tg3_rd32_loop(tp
, regs
, FTQ_RESET
, 0x120);
4551 tg3_rd32_loop(tp
, regs
, MSGINT_MODE
, 0x0c);
4552 tg3_rd32_loop(tp
, regs
, DMAC_MODE
, 0x04);
4553 tg3_rd32_loop(tp
, regs
, GRC_MODE
, 0x4c);
4555 if (tg3_flag(tp
, NVRAM
))
4556 tg3_rd32_loop(tp
, regs
, NVRAM_CMD
, 0x24);
4559 static void tg3_dump_state(struct tg3
*tp
)
4564 regs
= kzalloc(TG3_REG_BLK_SIZE
, GFP_ATOMIC
);
4566 netdev_err(tp
->dev
, "Failed allocating register dump buffer\n");
4570 if (tg3_flag(tp
, PCI_EXPRESS
)) {
4571 /* Read up to but not including private PCI registers */
4572 for (i
= 0; i
< TG3_PCIE_TLDLPL_PORT
; i
+= sizeof(u32
))
4573 regs
[i
/ sizeof(u32
)] = tr32(i
);
4575 tg3_dump_legacy_regs(tp
, regs
);
4577 for (i
= 0; i
< TG3_REG_BLK_SIZE
/ sizeof(u32
); i
+= 4) {
4578 if (!regs
[i
+ 0] && !regs
[i
+ 1] &&
4579 !regs
[i
+ 2] && !regs
[i
+ 3])
4582 netdev_err(tp
->dev
, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
4584 regs
[i
+ 0], regs
[i
+ 1], regs
[i
+ 2], regs
[i
+ 3]);
4589 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
4590 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
4592 /* SW status block */
4594 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
4596 tnapi
->hw_status
->status
,
4597 tnapi
->hw_status
->status_tag
,
4598 tnapi
->hw_status
->rx_jumbo_consumer
,
4599 tnapi
->hw_status
->rx_consumer
,
4600 tnapi
->hw_status
->rx_mini_consumer
,
4601 tnapi
->hw_status
->idx
[0].rx_producer
,
4602 tnapi
->hw_status
->idx
[0].tx_consumer
);
4605 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
4607 tnapi
->last_tag
, tnapi
->last_irq_tag
,
4608 tnapi
->tx_prod
, tnapi
->tx_cons
, tnapi
->tx_pending
,
4610 tnapi
->prodring
.rx_std_prod_idx
,
4611 tnapi
->prodring
.rx_std_cons_idx
,
4612 tnapi
->prodring
.rx_jmb_prod_idx
,
4613 tnapi
->prodring
.rx_jmb_cons_idx
);
4617 /* This is called whenever we suspect that the system chipset is re-
4618 * ordering the sequence of MMIO to the tx send mailbox. The symptom
4619 * is bogus tx completions. We try to recover by setting the
4620 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4623 static void tg3_tx_recover(struct tg3
*tp
)
4625 BUG_ON(tg3_flag(tp
, MBOX_WRITE_REORDER
) ||
4626 tp
->write32_tx_mbox
== tg3_write_indirect_mbox
);
4628 netdev_warn(tp
->dev
,
4629 "The system may be re-ordering memory-mapped I/O "
4630 "cycles to the network device, attempting to recover. "
4631 "Please report the problem to the driver maintainer "
4632 "and include system chipset information.\n");
4634 spin_lock(&tp
->lock
);
4635 tg3_flag_set(tp
, TX_RECOVERY_PENDING
);
4636 spin_unlock(&tp
->lock
);
4639 static inline u32
tg3_tx_avail(struct tg3_napi
*tnapi
)
4641 /* Tell compiler to fetch tx indices from memory. */
4643 return tnapi
->tx_pending
-
4644 ((tnapi
->tx_prod
- tnapi
->tx_cons
) & (TG3_TX_RING_SIZE
- 1));
4647 /* Tigon3 never reports partial packet sends. So we do not
4648 * need special logic to handle SKBs that have not had all
4649 * of their frags sent yet, like SunGEM does.
4651 static void tg3_tx(struct tg3_napi
*tnapi
)
4653 struct tg3
*tp
= tnapi
->tp
;
4654 u32 hw_idx
= tnapi
->hw_status
->idx
[0].tx_consumer
;
4655 u32 sw_idx
= tnapi
->tx_cons
;
4656 struct netdev_queue
*txq
;
4657 int index
= tnapi
- tp
->napi
;
4659 if (tg3_flag(tp
, ENABLE_TSS
))
4662 txq
= netdev_get_tx_queue(tp
->dev
, index
);
4664 while (sw_idx
!= hw_idx
) {
4665 struct ring_info
*ri
= &tnapi
->tx_buffers
[sw_idx
];
4666 struct sk_buff
*skb
= ri
->skb
;
4669 if (unlikely(skb
== NULL
)) {
4674 pci_unmap_single(tp
->pdev
,
4675 dma_unmap_addr(ri
, mapping
),
4681 sw_idx
= NEXT_TX(sw_idx
);
4683 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
4684 ri
= &tnapi
->tx_buffers
[sw_idx
];
4685 if (unlikely(ri
->skb
!= NULL
|| sw_idx
== hw_idx
))
4688 pci_unmap_page(tp
->pdev
,
4689 dma_unmap_addr(ri
, mapping
),
4690 skb_shinfo(skb
)->frags
[i
].size
,
4692 sw_idx
= NEXT_TX(sw_idx
);
4697 if (unlikely(tx_bug
)) {
4703 tnapi
->tx_cons
= sw_idx
;
4705 /* Need to make the tx_cons update visible to tg3_start_xmit()
4706 * before checking for netif_queue_stopped(). Without the
4707 * memory barrier, there is a small possibility that tg3_start_xmit()
4708 * will miss it and cause the queue to be stopped forever.
4712 if (unlikely(netif_tx_queue_stopped(txq
) &&
4713 (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
)))) {
4714 __netif_tx_lock(txq
, smp_processor_id());
4715 if (netif_tx_queue_stopped(txq
) &&
4716 (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
)))
4717 netif_tx_wake_queue(txq
);
4718 __netif_tx_unlock(txq
);
4722 static void tg3_rx_skb_free(struct tg3
*tp
, struct ring_info
*ri
, u32 map_sz
)
4727 pci_unmap_single(tp
->pdev
, dma_unmap_addr(ri
, mapping
),
4728 map_sz
, PCI_DMA_FROMDEVICE
);
4729 dev_kfree_skb_any(ri
->skb
);
4733 /* Returns size of skb allocated or < 0 on error.
4735 * We only need to fill in the address because the other members
4736 * of the RX descriptor are invariant, see tg3_init_rings.
4738 * Note the purposeful assymetry of cpu vs. chip accesses. For
4739 * posting buffers we only dirty the first cache line of the RX
4740 * descriptor (containing the address). Whereas for the RX status
4741 * buffers the cpu only reads the last cacheline of the RX descriptor
4742 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4744 static int tg3_alloc_rx_skb(struct tg3
*tp
, struct tg3_rx_prodring_set
*tpr
,
4745 u32 opaque_key
, u32 dest_idx_unmasked
)
4747 struct tg3_rx_buffer_desc
*desc
;
4748 struct ring_info
*map
;
4749 struct sk_buff
*skb
;
4751 int skb_size
, dest_idx
;
4753 switch (opaque_key
) {
4754 case RXD_OPAQUE_RING_STD
:
4755 dest_idx
= dest_idx_unmasked
& tp
->rx_std_ring_mask
;
4756 desc
= &tpr
->rx_std
[dest_idx
];
4757 map
= &tpr
->rx_std_buffers
[dest_idx
];
4758 skb_size
= tp
->rx_pkt_map_sz
;
4761 case RXD_OPAQUE_RING_JUMBO
:
4762 dest_idx
= dest_idx_unmasked
& tp
->rx_jmb_ring_mask
;
4763 desc
= &tpr
->rx_jmb
[dest_idx
].std
;
4764 map
= &tpr
->rx_jmb_buffers
[dest_idx
];
4765 skb_size
= TG3_RX_JMB_MAP_SZ
;
4772 /* Do not overwrite any of the map or rp information
4773 * until we are sure we can commit to a new buffer.
4775 * Callers depend upon this behavior and assume that
4776 * we leave everything unchanged if we fail.
4778 skb
= netdev_alloc_skb(tp
->dev
, skb_size
+ tp
->rx_offset
);
4782 skb_reserve(skb
, tp
->rx_offset
);
4784 mapping
= pci_map_single(tp
->pdev
, skb
->data
, skb_size
,
4785 PCI_DMA_FROMDEVICE
);
4786 if (pci_dma_mapping_error(tp
->pdev
, mapping
)) {
4792 dma_unmap_addr_set(map
, mapping
, mapping
);
4794 desc
->addr_hi
= ((u64
)mapping
>> 32);
4795 desc
->addr_lo
= ((u64
)mapping
& 0xffffffff);
4800 /* We only need to move over in the address because the other
4801 * members of the RX descriptor are invariant. See notes above
4802 * tg3_alloc_rx_skb for full details.
4804 static void tg3_recycle_rx(struct tg3_napi
*tnapi
,
4805 struct tg3_rx_prodring_set
*dpr
,
4806 u32 opaque_key
, int src_idx
,
4807 u32 dest_idx_unmasked
)
4809 struct tg3
*tp
= tnapi
->tp
;
4810 struct tg3_rx_buffer_desc
*src_desc
, *dest_desc
;
4811 struct ring_info
*src_map
, *dest_map
;
4812 struct tg3_rx_prodring_set
*spr
= &tp
->napi
[0].prodring
;
4815 switch (opaque_key
) {
4816 case RXD_OPAQUE_RING_STD
:
4817 dest_idx
= dest_idx_unmasked
& tp
->rx_std_ring_mask
;
4818 dest_desc
= &dpr
->rx_std
[dest_idx
];
4819 dest_map
= &dpr
->rx_std_buffers
[dest_idx
];
4820 src_desc
= &spr
->rx_std
[src_idx
];
4821 src_map
= &spr
->rx_std_buffers
[src_idx
];
4824 case RXD_OPAQUE_RING_JUMBO
:
4825 dest_idx
= dest_idx_unmasked
& tp
->rx_jmb_ring_mask
;
4826 dest_desc
= &dpr
->rx_jmb
[dest_idx
].std
;
4827 dest_map
= &dpr
->rx_jmb_buffers
[dest_idx
];
4828 src_desc
= &spr
->rx_jmb
[src_idx
].std
;
4829 src_map
= &spr
->rx_jmb_buffers
[src_idx
];
4836 dest_map
->skb
= src_map
->skb
;
4837 dma_unmap_addr_set(dest_map
, mapping
,
4838 dma_unmap_addr(src_map
, mapping
));
4839 dest_desc
->addr_hi
= src_desc
->addr_hi
;
4840 dest_desc
->addr_lo
= src_desc
->addr_lo
;
4842 /* Ensure that the update to the skb happens after the physical
4843 * addresses have been transferred to the new BD location.
4847 src_map
->skb
= NULL
;
4850 /* The RX ring scheme is composed of multiple rings which post fresh
4851 * buffers to the chip, and one special ring the chip uses to report
4852 * status back to the host.
4854 * The special ring reports the status of received packets to the
4855 * host. The chip does not write into the original descriptor the
4856 * RX buffer was obtained from. The chip simply takes the original
4857 * descriptor as provided by the host, updates the status and length
4858 * field, then writes this into the next status ring entry.
4860 * Each ring the host uses to post buffers to the chip is described
4861 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
4862 * it is first placed into the on-chip ram. When the packet's length
4863 * is known, it walks down the TG3_BDINFO entries to select the ring.
4864 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4865 * which is within the range of the new packet's length is chosen.
4867 * The "separate ring for rx status" scheme may sound queer, but it makes
4868 * sense from a cache coherency perspective. If only the host writes
4869 * to the buffer post rings, and only the chip writes to the rx status
4870 * rings, then cache lines never move beyond shared-modified state.
4871 * If both the host and chip were to write into the same ring, cache line
4872 * eviction could occur since both entities want it in an exclusive state.
4874 static int tg3_rx(struct tg3_napi
*tnapi
, int budget
)
4876 struct tg3
*tp
= tnapi
->tp
;
4877 u32 work_mask
, rx_std_posted
= 0;
4878 u32 std_prod_idx
, jmb_prod_idx
;
4879 u32 sw_idx
= tnapi
->rx_rcb_ptr
;
4882 struct tg3_rx_prodring_set
*tpr
= &tnapi
->prodring
;
4884 hw_idx
= *(tnapi
->rx_rcb_prod_idx
);
4886 * We need to order the read of hw_idx and the read of
4887 * the opaque cookie.
4892 std_prod_idx
= tpr
->rx_std_prod_idx
;
4893 jmb_prod_idx
= tpr
->rx_jmb_prod_idx
;
4894 while (sw_idx
!= hw_idx
&& budget
> 0) {
4895 struct ring_info
*ri
;
4896 struct tg3_rx_buffer_desc
*desc
= &tnapi
->rx_rcb
[sw_idx
];
4898 struct sk_buff
*skb
;
4899 dma_addr_t dma_addr
;
4900 u32 opaque_key
, desc_idx
, *post_ptr
;
4902 desc_idx
= desc
->opaque
& RXD_OPAQUE_INDEX_MASK
;
4903 opaque_key
= desc
->opaque
& RXD_OPAQUE_RING_MASK
;
4904 if (opaque_key
== RXD_OPAQUE_RING_STD
) {
4905 ri
= &tp
->napi
[0].prodring
.rx_std_buffers
[desc_idx
];
4906 dma_addr
= dma_unmap_addr(ri
, mapping
);
4908 post_ptr
= &std_prod_idx
;
4910 } else if (opaque_key
== RXD_OPAQUE_RING_JUMBO
) {
4911 ri
= &tp
->napi
[0].prodring
.rx_jmb_buffers
[desc_idx
];
4912 dma_addr
= dma_unmap_addr(ri
, mapping
);
4914 post_ptr
= &jmb_prod_idx
;
4916 goto next_pkt_nopost
;
4918 work_mask
|= opaque_key
;
4920 if ((desc
->err_vlan
& RXD_ERR_MASK
) != 0 &&
4921 (desc
->err_vlan
!= RXD_ERR_ODD_NIBBLE_RCVD_MII
)) {
4923 tg3_recycle_rx(tnapi
, tpr
, opaque_key
,
4924 desc_idx
, *post_ptr
);
4926 /* Other statistics kept track of by card. */
4931 len
= ((desc
->idx_len
& RXD_LEN_MASK
) >> RXD_LEN_SHIFT
) -
4934 if (len
> TG3_RX_COPY_THRESH(tp
)) {
4937 skb_size
= tg3_alloc_rx_skb(tp
, tpr
, opaque_key
,
4942 pci_unmap_single(tp
->pdev
, dma_addr
, skb_size
,
4943 PCI_DMA_FROMDEVICE
);
4945 /* Ensure that the update to the skb happens
4946 * after the usage of the old DMA mapping.
4954 struct sk_buff
*copy_skb
;
4956 tg3_recycle_rx(tnapi
, tpr
, opaque_key
,
4957 desc_idx
, *post_ptr
);
4959 copy_skb
= netdev_alloc_skb(tp
->dev
, len
+
4961 if (copy_skb
== NULL
)
4962 goto drop_it_no_recycle
;
4964 skb_reserve(copy_skb
, TG3_RAW_IP_ALIGN
);
4965 skb_put(copy_skb
, len
);
4966 pci_dma_sync_single_for_cpu(tp
->pdev
, dma_addr
, len
, PCI_DMA_FROMDEVICE
);
4967 skb_copy_from_linear_data(skb
, copy_skb
->data
, len
);
4968 pci_dma_sync_single_for_device(tp
->pdev
, dma_addr
, len
, PCI_DMA_FROMDEVICE
);
4970 /* We'll reuse the original ring buffer. */
4974 if ((tp
->dev
->features
& NETIF_F_RXCSUM
) &&
4975 (desc
->type_flags
& RXD_FLAG_TCPUDP_CSUM
) &&
4976 (((desc
->ip_tcp_csum
& RXD_TCPCSUM_MASK
)
4977 >> RXD_TCPCSUM_SHIFT
) == 0xffff))
4978 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
4980 skb_checksum_none_assert(skb
);
4982 skb
->protocol
= eth_type_trans(skb
, tp
->dev
);
4984 if (len
> (tp
->dev
->mtu
+ ETH_HLEN
) &&
4985 skb
->protocol
!= htons(ETH_P_8021Q
)) {
4987 goto drop_it_no_recycle
;
4990 if (desc
->type_flags
& RXD_FLAG_VLAN
&&
4991 !(tp
->rx_mode
& RX_MODE_KEEP_VLAN_TAG
))
4992 __vlan_hwaccel_put_tag(skb
,
4993 desc
->err_vlan
& RXD_VLAN_MASK
);
4995 napi_gro_receive(&tnapi
->napi
, skb
);
5003 if (unlikely(rx_std_posted
>= tp
->rx_std_max_post
)) {
5004 tpr
->rx_std_prod_idx
= std_prod_idx
&
5005 tp
->rx_std_ring_mask
;
5006 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
5007 tpr
->rx_std_prod_idx
);
5008 work_mask
&= ~RXD_OPAQUE_RING_STD
;
5013 sw_idx
&= tp
->rx_ret_ring_mask
;
5015 /* Refresh hw_idx to see if there is new work */
5016 if (sw_idx
== hw_idx
) {
5017 hw_idx
= *(tnapi
->rx_rcb_prod_idx
);
5022 /* ACK the status ring. */
5023 tnapi
->rx_rcb_ptr
= sw_idx
;
5024 tw32_rx_mbox(tnapi
->consmbox
, sw_idx
);
5026 /* Refill RX ring(s). */
5027 if (!tg3_flag(tp
, ENABLE_RSS
)) {
5028 if (work_mask
& RXD_OPAQUE_RING_STD
) {
5029 tpr
->rx_std_prod_idx
= std_prod_idx
&
5030 tp
->rx_std_ring_mask
;
5031 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
5032 tpr
->rx_std_prod_idx
);
5034 if (work_mask
& RXD_OPAQUE_RING_JUMBO
) {
5035 tpr
->rx_jmb_prod_idx
= jmb_prod_idx
&
5036 tp
->rx_jmb_ring_mask
;
5037 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
,
5038 tpr
->rx_jmb_prod_idx
);
5041 } else if (work_mask
) {
5042 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5043 * updated before the producer indices can be updated.
5047 tpr
->rx_std_prod_idx
= std_prod_idx
& tp
->rx_std_ring_mask
;
5048 tpr
->rx_jmb_prod_idx
= jmb_prod_idx
& tp
->rx_jmb_ring_mask
;
5050 if (tnapi
!= &tp
->napi
[1])
5051 napi_schedule(&tp
->napi
[1].napi
);
5057 static void tg3_poll_link(struct tg3
*tp
)
5059 /* handle link change and other phy events */
5060 if (!(tg3_flag(tp
, USE_LINKCHG_REG
) || tg3_flag(tp
, POLL_SERDES
))) {
5061 struct tg3_hw_status
*sblk
= tp
->napi
[0].hw_status
;
5063 if (sblk
->status
& SD_STATUS_LINK_CHG
) {
5064 sblk
->status
= SD_STATUS_UPDATED
|
5065 (sblk
->status
& ~SD_STATUS_LINK_CHG
);
5066 spin_lock(&tp
->lock
);
5067 if (tg3_flag(tp
, USE_PHYLIB
)) {
5069 (MAC_STATUS_SYNC_CHANGED
|
5070 MAC_STATUS_CFG_CHANGED
|
5071 MAC_STATUS_MI_COMPLETION
|
5072 MAC_STATUS_LNKSTATE_CHANGED
));
5075 tg3_setup_phy(tp
, 0);
5076 spin_unlock(&tp
->lock
);
5081 static int tg3_rx_prodring_xfer(struct tg3
*tp
,
5082 struct tg3_rx_prodring_set
*dpr
,
5083 struct tg3_rx_prodring_set
*spr
)
5085 u32 si
, di
, cpycnt
, src_prod_idx
;
5089 src_prod_idx
= spr
->rx_std_prod_idx
;
5091 /* Make sure updates to the rx_std_buffers[] entries and the
5092 * standard producer index are seen in the correct order.
5096 if (spr
->rx_std_cons_idx
== src_prod_idx
)
5099 if (spr
->rx_std_cons_idx
< src_prod_idx
)
5100 cpycnt
= src_prod_idx
- spr
->rx_std_cons_idx
;
5102 cpycnt
= tp
->rx_std_ring_mask
+ 1 -
5103 spr
->rx_std_cons_idx
;
5105 cpycnt
= min(cpycnt
,
5106 tp
->rx_std_ring_mask
+ 1 - dpr
->rx_std_prod_idx
);
5108 si
= spr
->rx_std_cons_idx
;
5109 di
= dpr
->rx_std_prod_idx
;
5111 for (i
= di
; i
< di
+ cpycnt
; i
++) {
5112 if (dpr
->rx_std_buffers
[i
].skb
) {
5122 /* Ensure that updates to the rx_std_buffers ring and the
5123 * shadowed hardware producer ring from tg3_recycle_skb() are
5124 * ordered correctly WRT the skb check above.
5128 memcpy(&dpr
->rx_std_buffers
[di
],
5129 &spr
->rx_std_buffers
[si
],
5130 cpycnt
* sizeof(struct ring_info
));
5132 for (i
= 0; i
< cpycnt
; i
++, di
++, si
++) {
5133 struct tg3_rx_buffer_desc
*sbd
, *dbd
;
5134 sbd
= &spr
->rx_std
[si
];
5135 dbd
= &dpr
->rx_std
[di
];
5136 dbd
->addr_hi
= sbd
->addr_hi
;
5137 dbd
->addr_lo
= sbd
->addr_lo
;
5140 spr
->rx_std_cons_idx
= (spr
->rx_std_cons_idx
+ cpycnt
) &
5141 tp
->rx_std_ring_mask
;
5142 dpr
->rx_std_prod_idx
= (dpr
->rx_std_prod_idx
+ cpycnt
) &
5143 tp
->rx_std_ring_mask
;
5147 src_prod_idx
= spr
->rx_jmb_prod_idx
;
5149 /* Make sure updates to the rx_jmb_buffers[] entries and
5150 * the jumbo producer index are seen in the correct order.
5154 if (spr
->rx_jmb_cons_idx
== src_prod_idx
)
5157 if (spr
->rx_jmb_cons_idx
< src_prod_idx
)
5158 cpycnt
= src_prod_idx
- spr
->rx_jmb_cons_idx
;
5160 cpycnt
= tp
->rx_jmb_ring_mask
+ 1 -
5161 spr
->rx_jmb_cons_idx
;
5163 cpycnt
= min(cpycnt
,
5164 tp
->rx_jmb_ring_mask
+ 1 - dpr
->rx_jmb_prod_idx
);
5166 si
= spr
->rx_jmb_cons_idx
;
5167 di
= dpr
->rx_jmb_prod_idx
;
5169 for (i
= di
; i
< di
+ cpycnt
; i
++) {
5170 if (dpr
->rx_jmb_buffers
[i
].skb
) {
5180 /* Ensure that updates to the rx_jmb_buffers ring and the
5181 * shadowed hardware producer ring from tg3_recycle_skb() are
5182 * ordered correctly WRT the skb check above.
5186 memcpy(&dpr
->rx_jmb_buffers
[di
],
5187 &spr
->rx_jmb_buffers
[si
],
5188 cpycnt
* sizeof(struct ring_info
));
5190 for (i
= 0; i
< cpycnt
; i
++, di
++, si
++) {
5191 struct tg3_rx_buffer_desc
*sbd
, *dbd
;
5192 sbd
= &spr
->rx_jmb
[si
].std
;
5193 dbd
= &dpr
->rx_jmb
[di
].std
;
5194 dbd
->addr_hi
= sbd
->addr_hi
;
5195 dbd
->addr_lo
= sbd
->addr_lo
;
5198 spr
->rx_jmb_cons_idx
= (spr
->rx_jmb_cons_idx
+ cpycnt
) &
5199 tp
->rx_jmb_ring_mask
;
5200 dpr
->rx_jmb_prod_idx
= (dpr
->rx_jmb_prod_idx
+ cpycnt
) &
5201 tp
->rx_jmb_ring_mask
;
5207 static int tg3_poll_work(struct tg3_napi
*tnapi
, int work_done
, int budget
)
5209 struct tg3
*tp
= tnapi
->tp
;
5211 /* run TX completion thread */
5212 if (tnapi
->hw_status
->idx
[0].tx_consumer
!= tnapi
->tx_cons
) {
5214 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
5218 /* run RX thread, within the bounds set by NAPI.
5219 * All RX "locking" is done by ensuring outside
5220 * code synchronizes with tg3->napi.poll()
5222 if (*(tnapi
->rx_rcb_prod_idx
) != tnapi
->rx_rcb_ptr
)
5223 work_done
+= tg3_rx(tnapi
, budget
- work_done
);
5225 if (tg3_flag(tp
, ENABLE_RSS
) && tnapi
== &tp
->napi
[1]) {
5226 struct tg3_rx_prodring_set
*dpr
= &tp
->napi
[0].prodring
;
5228 u32 std_prod_idx
= dpr
->rx_std_prod_idx
;
5229 u32 jmb_prod_idx
= dpr
->rx_jmb_prod_idx
;
5231 for (i
= 1; i
< tp
->irq_cnt
; i
++)
5232 err
|= tg3_rx_prodring_xfer(tp
, dpr
,
5233 &tp
->napi
[i
].prodring
);
5237 if (std_prod_idx
!= dpr
->rx_std_prod_idx
)
5238 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
5239 dpr
->rx_std_prod_idx
);
5241 if (jmb_prod_idx
!= dpr
->rx_jmb_prod_idx
)
5242 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
,
5243 dpr
->rx_jmb_prod_idx
);
5248 tw32_f(HOSTCC_MODE
, tp
->coal_now
);
5254 static int tg3_poll_msix(struct napi_struct
*napi
, int budget
)
5256 struct tg3_napi
*tnapi
= container_of(napi
, struct tg3_napi
, napi
);
5257 struct tg3
*tp
= tnapi
->tp
;
5259 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
5262 work_done
= tg3_poll_work(tnapi
, work_done
, budget
);
5264 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
5267 if (unlikely(work_done
>= budget
))
5270 /* tp->last_tag is used in tg3_int_reenable() below
5271 * to tell the hw how much work has been processed,
5272 * so we must read it before checking for more work.
5274 tnapi
->last_tag
= sblk
->status_tag
;
5275 tnapi
->last_irq_tag
= tnapi
->last_tag
;
5278 /* check for RX/TX work to do */
5279 if (likely(sblk
->idx
[0].tx_consumer
== tnapi
->tx_cons
&&
5280 *(tnapi
->rx_rcb_prod_idx
) == tnapi
->rx_rcb_ptr
)) {
5281 napi_complete(napi
);
5282 /* Reenable interrupts. */
5283 tw32_mailbox(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
5292 /* work_done is guaranteed to be less than budget. */
5293 napi_complete(napi
);
5294 schedule_work(&tp
->reset_task
);
5298 static void tg3_process_error(struct tg3
*tp
)
5301 bool real_error
= false;
5303 if (tg3_flag(tp
, ERROR_PROCESSED
))
5306 /* Check Flow Attention register */
5307 val
= tr32(HOSTCC_FLOW_ATTN
);
5308 if (val
& ~HOSTCC_FLOW_ATTN_MBUF_LWM
) {
5309 netdev_err(tp
->dev
, "FLOW Attention error. Resetting chip.\n");
5313 if (tr32(MSGINT_STATUS
) & ~MSGINT_STATUS_MSI_REQ
) {
5314 netdev_err(tp
->dev
, "MSI Status error. Resetting chip.\n");
5318 if (tr32(RDMAC_STATUS
) || tr32(WDMAC_STATUS
)) {
5319 netdev_err(tp
->dev
, "DMA Status error. Resetting chip.\n");
5328 tg3_flag_set(tp
, ERROR_PROCESSED
);
5329 schedule_work(&tp
->reset_task
);
5332 static int tg3_poll(struct napi_struct
*napi
, int budget
)
5334 struct tg3_napi
*tnapi
= container_of(napi
, struct tg3_napi
, napi
);
5335 struct tg3
*tp
= tnapi
->tp
;
5337 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
5340 if (sblk
->status
& SD_STATUS_ERROR
)
5341 tg3_process_error(tp
);
5345 work_done
= tg3_poll_work(tnapi
, work_done
, budget
);
5347 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
5350 if (unlikely(work_done
>= budget
))
5353 if (tg3_flag(tp
, TAGGED_STATUS
)) {
5354 /* tp->last_tag is used in tg3_int_reenable() below
5355 * to tell the hw how much work has been processed,
5356 * so we must read it before checking for more work.
5358 tnapi
->last_tag
= sblk
->status_tag
;
5359 tnapi
->last_irq_tag
= tnapi
->last_tag
;
5362 sblk
->status
&= ~SD_STATUS_UPDATED
;
5364 if (likely(!tg3_has_work(tnapi
))) {
5365 napi_complete(napi
);
5366 tg3_int_reenable(tnapi
);
5374 /* work_done is guaranteed to be less than budget. */
5375 napi_complete(napi
);
5376 schedule_work(&tp
->reset_task
);
5380 static void tg3_napi_disable(struct tg3
*tp
)
5384 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--)
5385 napi_disable(&tp
->napi
[i
].napi
);
5388 static void tg3_napi_enable(struct tg3
*tp
)
5392 for (i
= 0; i
< tp
->irq_cnt
; i
++)
5393 napi_enable(&tp
->napi
[i
].napi
);
5396 static void tg3_napi_init(struct tg3
*tp
)
5400 netif_napi_add(tp
->dev
, &tp
->napi
[0].napi
, tg3_poll
, 64);
5401 for (i
= 1; i
< tp
->irq_cnt
; i
++)
5402 netif_napi_add(tp
->dev
, &tp
->napi
[i
].napi
, tg3_poll_msix
, 64);
5405 static void tg3_napi_fini(struct tg3
*tp
)
5409 for (i
= 0; i
< tp
->irq_cnt
; i
++)
5410 netif_napi_del(&tp
->napi
[i
].napi
);
5413 static inline void tg3_netif_stop(struct tg3
*tp
)
5415 tp
->dev
->trans_start
= jiffies
; /* prevent tx timeout */
5416 tg3_napi_disable(tp
);
5417 netif_tx_disable(tp
->dev
);
5420 static inline void tg3_netif_start(struct tg3
*tp
)
5422 /* NOTE: unconditional netif_tx_wake_all_queues is only
5423 * appropriate so long as all callers are assured to
5424 * have free tx slots (such as after tg3_init_hw)
5426 netif_tx_wake_all_queues(tp
->dev
);
5428 tg3_napi_enable(tp
);
5429 tp
->napi
[0].hw_status
->status
|= SD_STATUS_UPDATED
;
5430 tg3_enable_ints(tp
);
5433 static void tg3_irq_quiesce(struct tg3
*tp
)
5437 BUG_ON(tp
->irq_sync
);
5442 for (i
= 0; i
< tp
->irq_cnt
; i
++)
5443 synchronize_irq(tp
->napi
[i
].irq_vec
);
5446 /* Fully shutdown all tg3 driver activity elsewhere in the system.
5447 * If irq_sync is non-zero, then the IRQ handler must be synchronized
5448 * with as well. Most of the time, this is not necessary except when
5449 * shutting down the device.
5451 static inline void tg3_full_lock(struct tg3
*tp
, int irq_sync
)
5453 spin_lock_bh(&tp
->lock
);
5455 tg3_irq_quiesce(tp
);
5458 static inline void tg3_full_unlock(struct tg3
*tp
)
5460 spin_unlock_bh(&tp
->lock
);
5463 /* One-shot MSI handler - Chip automatically disables interrupt
5464 * after sending MSI so driver doesn't have to do it.
5466 static irqreturn_t
tg3_msi_1shot(int irq
, void *dev_id
)
5468 struct tg3_napi
*tnapi
= dev_id
;
5469 struct tg3
*tp
= tnapi
->tp
;
5471 prefetch(tnapi
->hw_status
);
5473 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
5475 if (likely(!tg3_irq_sync(tp
)))
5476 napi_schedule(&tnapi
->napi
);
5481 /* MSI ISR - No need to check for interrupt sharing and no need to
5482 * flush status block and interrupt mailbox. PCI ordering rules
5483 * guarantee that MSI will arrive after the status block.
5485 static irqreturn_t
tg3_msi(int irq
, void *dev_id
)
5487 struct tg3_napi
*tnapi
= dev_id
;
5488 struct tg3
*tp
= tnapi
->tp
;
5490 prefetch(tnapi
->hw_status
);
5492 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
5494 * Writing any value to intr-mbox-0 clears PCI INTA# and
5495 * chip-internal interrupt pending events.
5496 * Writing non-zero to intr-mbox-0 additional tells the
5497 * NIC to stop sending us irqs, engaging "in-intr-handler"
5500 tw32_mailbox(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
5501 if (likely(!tg3_irq_sync(tp
)))
5502 napi_schedule(&tnapi
->napi
);
5504 return IRQ_RETVAL(1);
5507 static irqreturn_t
tg3_interrupt(int irq
, void *dev_id
)
5509 struct tg3_napi
*tnapi
= dev_id
;
5510 struct tg3
*tp
= tnapi
->tp
;
5511 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
5512 unsigned int handled
= 1;
5514 /* In INTx mode, it is possible for the interrupt to arrive at
5515 * the CPU before the status block posted prior to the interrupt.
5516 * Reading the PCI State register will confirm whether the
5517 * interrupt is ours and will flush the status block.
5519 if (unlikely(!(sblk
->status
& SD_STATUS_UPDATED
))) {
5520 if (tg3_flag(tp
, CHIP_RESETTING
) ||
5521 (tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
5528 * Writing any value to intr-mbox-0 clears PCI INTA# and
5529 * chip-internal interrupt pending events.
5530 * Writing non-zero to intr-mbox-0 additional tells the
5531 * NIC to stop sending us irqs, engaging "in-intr-handler"
5534 * Flush the mailbox to de-assert the IRQ immediately to prevent
5535 * spurious interrupts. The flush impacts performance but
5536 * excessive spurious interrupts can be worse in some cases.
5538 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
5539 if (tg3_irq_sync(tp
))
5541 sblk
->status
&= ~SD_STATUS_UPDATED
;
5542 if (likely(tg3_has_work(tnapi
))) {
5543 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
5544 napi_schedule(&tnapi
->napi
);
5546 /* No work, shared interrupt perhaps? re-enable
5547 * interrupts, and flush that PCI write
5549 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
,
5553 return IRQ_RETVAL(handled
);
5556 static irqreturn_t
tg3_interrupt_tagged(int irq
, void *dev_id
)
5558 struct tg3_napi
*tnapi
= dev_id
;
5559 struct tg3
*tp
= tnapi
->tp
;
5560 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
5561 unsigned int handled
= 1;
5563 /* In INTx mode, it is possible for the interrupt to arrive at
5564 * the CPU before the status block posted prior to the interrupt.
5565 * Reading the PCI State register will confirm whether the
5566 * interrupt is ours and will flush the status block.
5568 if (unlikely(sblk
->status_tag
== tnapi
->last_irq_tag
)) {
5569 if (tg3_flag(tp
, CHIP_RESETTING
) ||
5570 (tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
5577 * writing any value to intr-mbox-0 clears PCI INTA# and
5578 * chip-internal interrupt pending events.
5579 * writing non-zero to intr-mbox-0 additional tells the
5580 * NIC to stop sending us irqs, engaging "in-intr-handler"
5583 * Flush the mailbox to de-assert the IRQ immediately to prevent
5584 * spurious interrupts. The flush impacts performance but
5585 * excessive spurious interrupts can be worse in some cases.
5587 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
5590 * In a shared interrupt configuration, sometimes other devices'
5591 * interrupts will scream. We record the current status tag here
5592 * so that the above check can report that the screaming interrupts
5593 * are unhandled. Eventually they will be silenced.
5595 tnapi
->last_irq_tag
= sblk
->status_tag
;
5597 if (tg3_irq_sync(tp
))
5600 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
5602 napi_schedule(&tnapi
->napi
);
5605 return IRQ_RETVAL(handled
);
5608 /* ISR for interrupt test */
5609 static irqreturn_t
tg3_test_isr(int irq
, void *dev_id
)
5611 struct tg3_napi
*tnapi
= dev_id
;
5612 struct tg3
*tp
= tnapi
->tp
;
5613 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
5615 if ((sblk
->status
& SD_STATUS_UPDATED
) ||
5616 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
5617 tg3_disable_ints(tp
);
5618 return IRQ_RETVAL(1);
5620 return IRQ_RETVAL(0);
5623 static int tg3_init_hw(struct tg3
*, int);
5624 static int tg3_halt(struct tg3
*, int, int);
5626 /* Restart hardware after configuration changes, self-test, etc.
5627 * Invoked with tp->lock held.
5629 static int tg3_restart_hw(struct tg3
*tp
, int reset_phy
)
5630 __releases(tp
->lock
)
5631 __acquires(tp
->lock
)
5635 err
= tg3_init_hw(tp
, reset_phy
);
5638 "Failed to re-initialize device, aborting\n");
5639 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
5640 tg3_full_unlock(tp
);
5641 del_timer_sync(&tp
->timer
);
5643 tg3_napi_enable(tp
);
5645 tg3_full_lock(tp
, 0);
5650 #ifdef CONFIG_NET_POLL_CONTROLLER
5651 static void tg3_poll_controller(struct net_device
*dev
)
5654 struct tg3
*tp
= netdev_priv(dev
);
5656 for (i
= 0; i
< tp
->irq_cnt
; i
++)
5657 tg3_interrupt(tp
->napi
[i
].irq_vec
, &tp
->napi
[i
]);
5661 static void tg3_reset_task(struct work_struct
*work
)
5663 struct tg3
*tp
= container_of(work
, struct tg3
, reset_task
);
5665 unsigned int restart_timer
;
5667 tg3_full_lock(tp
, 0);
5669 if (!netif_running(tp
->dev
)) {
5670 tg3_full_unlock(tp
);
5674 tg3_full_unlock(tp
);
5680 tg3_full_lock(tp
, 1);
5682 restart_timer
= tg3_flag(tp
, RESTART_TIMER
);
5683 tg3_flag_clear(tp
, RESTART_TIMER
);
5685 if (tg3_flag(tp
, TX_RECOVERY_PENDING
)) {
5686 tp
->write32_tx_mbox
= tg3_write32_tx_mbox
;
5687 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
5688 tg3_flag_set(tp
, MBOX_WRITE_REORDER
);
5689 tg3_flag_clear(tp
, TX_RECOVERY_PENDING
);
5692 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 0);
5693 err
= tg3_init_hw(tp
, 1);
5697 tg3_netif_start(tp
);
5700 mod_timer(&tp
->timer
, jiffies
+ 1);
5703 tg3_full_unlock(tp
);
5709 static void tg3_tx_timeout(struct net_device
*dev
)
5711 struct tg3
*tp
= netdev_priv(dev
);
5713 if (netif_msg_tx_err(tp
)) {
5714 netdev_err(dev
, "transmit timed out, resetting\n");
5718 schedule_work(&tp
->reset_task
);
5721 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5722 static inline int tg3_4g_overflow_test(dma_addr_t mapping
, int len
)
5724 u32 base
= (u32
) mapping
& 0xffffffff;
5726 return (base
> 0xffffdcc0) && (base
+ len
+ 8 < base
);
5729 /* Test for DMA addresses > 40-bit */
5730 static inline int tg3_40bit_overflow_test(struct tg3
*tp
, dma_addr_t mapping
,
5733 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5734 if (tg3_flag(tp
, 40BIT_DMA_BUG
))
5735 return ((u64
) mapping
+ len
) > DMA_BIT_MASK(40);
5742 static void tg3_set_txd(struct tg3_napi
*tnapi
, int entry
,
5743 dma_addr_t mapping
, int len
, u32 flags
,
5746 struct tg3_tx_buffer_desc
*txd
= &tnapi
->tx_ring
[entry
];
5747 int is_end
= (mss_and_is_end
& 0x1);
5748 u32 mss
= (mss_and_is_end
>> 1);
5752 flags
|= TXD_FLAG_END
;
5753 if (flags
& TXD_FLAG_VLAN
) {
5754 vlan_tag
= flags
>> 16;
5757 vlan_tag
|= (mss
<< TXD_MSS_SHIFT
);
5759 txd
->addr_hi
= ((u64
) mapping
>> 32);
5760 txd
->addr_lo
= ((u64
) mapping
& 0xffffffff);
5761 txd
->len_flags
= (len
<< TXD_LEN_SHIFT
) | flags
;
5762 txd
->vlan_tag
= vlan_tag
<< TXD_VLAN_TAG_SHIFT
;
5765 static void tg3_skb_error_unmap(struct tg3_napi
*tnapi
,
5766 struct sk_buff
*skb
, int last
)
5769 u32 entry
= tnapi
->tx_prod
;
5770 struct ring_info
*txb
= &tnapi
->tx_buffers
[entry
];
5772 pci_unmap_single(tnapi
->tp
->pdev
,
5773 dma_unmap_addr(txb
, mapping
),
5776 for (i
= 0; i
<= last
; i
++) {
5777 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
5779 entry
= NEXT_TX(entry
);
5780 txb
= &tnapi
->tx_buffers
[entry
];
5782 pci_unmap_page(tnapi
->tp
->pdev
,
5783 dma_unmap_addr(txb
, mapping
),
5784 frag
->size
, PCI_DMA_TODEVICE
);
5788 /* Workaround 4GB and 40-bit hardware DMA bugs. */
5789 static int tigon3_dma_hwbug_workaround(struct tg3_napi
*tnapi
,
5790 struct sk_buff
*skb
,
5791 u32 base_flags
, u32 mss
)
5793 struct tg3
*tp
= tnapi
->tp
;
5794 struct sk_buff
*new_skb
;
5795 dma_addr_t new_addr
= 0;
5796 u32 entry
= tnapi
->tx_prod
;
5799 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
)
5800 new_skb
= skb_copy(skb
, GFP_ATOMIC
);
5802 int more_headroom
= 4 - ((unsigned long)skb
->data
& 3);
5804 new_skb
= skb_copy_expand(skb
,
5805 skb_headroom(skb
) + more_headroom
,
5806 skb_tailroom(skb
), GFP_ATOMIC
);
5812 /* New SKB is guaranteed to be linear. */
5813 new_addr
= pci_map_single(tp
->pdev
, new_skb
->data
, new_skb
->len
,
5815 /* Make sure the mapping succeeded */
5816 if (pci_dma_mapping_error(tp
->pdev
, new_addr
)) {
5818 dev_kfree_skb(new_skb
);
5820 /* Make sure new skb does not cross any 4G boundaries.
5821 * Drop the packet if it does.
5823 } else if (tg3_flag(tp
, 4G_DMA_BNDRY_BUG
) &&
5824 tg3_4g_overflow_test(new_addr
, new_skb
->len
)) {
5825 pci_unmap_single(tp
->pdev
, new_addr
, new_skb
->len
,
5828 dev_kfree_skb(new_skb
);
5830 tnapi
->tx_buffers
[entry
].skb
= new_skb
;
5831 dma_unmap_addr_set(&tnapi
->tx_buffers
[entry
],
5834 tg3_set_txd(tnapi
, entry
, new_addr
, new_skb
->len
,
5835 base_flags
, 1 | (mss
<< 1));
5844 static netdev_tx_t
tg3_start_xmit(struct sk_buff
*, struct net_device
*);
5846 /* Use GSO to workaround a rare TSO bug that may be triggered when the
5847 * TSO header is greater than 80 bytes.
5849 static int tg3_tso_bug(struct tg3
*tp
, struct sk_buff
*skb
)
5851 struct sk_buff
*segs
, *nskb
;
5852 u32 frag_cnt_est
= skb_shinfo(skb
)->gso_segs
* 3;
5854 /* Estimate the number of fragments in the worst case */
5855 if (unlikely(tg3_tx_avail(&tp
->napi
[0]) <= frag_cnt_est
)) {
5856 netif_stop_queue(tp
->dev
);
5858 /* netif_tx_stop_queue() must be done before checking
5859 * checking tx index in tg3_tx_avail() below, because in
5860 * tg3_tx(), we update tx index before checking for
5861 * netif_tx_queue_stopped().
5864 if (tg3_tx_avail(&tp
->napi
[0]) <= frag_cnt_est
)
5865 return NETDEV_TX_BUSY
;
5867 netif_wake_queue(tp
->dev
);
5870 segs
= skb_gso_segment(skb
, tp
->dev
->features
& ~NETIF_F_TSO
);
5872 goto tg3_tso_bug_end
;
5878 tg3_start_xmit(nskb
, tp
->dev
);
5884 return NETDEV_TX_OK
;
5887 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
5888 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
5890 static netdev_tx_t
tg3_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
5892 struct tg3
*tp
= netdev_priv(dev
);
5893 u32 len
, entry
, base_flags
, mss
;
5894 int i
= -1, would_hit_hwbug
;
5896 struct tg3_napi
*tnapi
;
5897 struct netdev_queue
*txq
;
5900 txq
= netdev_get_tx_queue(dev
, skb_get_queue_mapping(skb
));
5901 tnapi
= &tp
->napi
[skb_get_queue_mapping(skb
)];
5902 if (tg3_flag(tp
, ENABLE_TSS
))
5905 /* We are running in BH disabled context with netif_tx_lock
5906 * and TX reclaim runs via tp->napi.poll inside of a software
5907 * interrupt. Furthermore, IRQ processing runs lockless so we have
5908 * no IRQ context deadlocks to worry about either. Rejoice!
5910 if (unlikely(tg3_tx_avail(tnapi
) <= (skb_shinfo(skb
)->nr_frags
+ 1))) {
5911 if (!netif_tx_queue_stopped(txq
)) {
5912 netif_tx_stop_queue(txq
);
5914 /* This is a hard error, log it. */
5916 "BUG! Tx Ring full when queue awake!\n");
5918 return NETDEV_TX_BUSY
;
5921 entry
= tnapi
->tx_prod
;
5923 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
5924 base_flags
|= TXD_FLAG_TCPUDP_CSUM
;
5926 mss
= skb_shinfo(skb
)->gso_size
;
5929 u32 tcp_opt_len
, hdr_len
;
5931 if (skb_header_cloned(skb
) &&
5932 pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
)) {
5938 tcp_opt_len
= tcp_optlen(skb
);
5940 if (skb_is_gso_v6(skb
)) {
5941 hdr_len
= skb_headlen(skb
) - ETH_HLEN
;
5945 ip_tcp_len
= ip_hdrlen(skb
) + sizeof(struct tcphdr
);
5946 hdr_len
= ip_tcp_len
+ tcp_opt_len
;
5949 iph
->tot_len
= htons(mss
+ hdr_len
);
5952 if (unlikely((ETH_HLEN
+ hdr_len
) > 80) &&
5953 tg3_flag(tp
, TSO_BUG
))
5954 return tg3_tso_bug(tp
, skb
);
5956 base_flags
|= (TXD_FLAG_CPU_PRE_DMA
|
5957 TXD_FLAG_CPU_POST_DMA
);
5959 if (tg3_flag(tp
, HW_TSO_1
) ||
5960 tg3_flag(tp
, HW_TSO_2
) ||
5961 tg3_flag(tp
, HW_TSO_3
)) {
5962 tcp_hdr(skb
)->check
= 0;
5963 base_flags
&= ~TXD_FLAG_TCPUDP_CSUM
;
5965 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(iph
->saddr
,
5970 if (tg3_flag(tp
, HW_TSO_3
)) {
5971 mss
|= (hdr_len
& 0xc) << 12;
5973 base_flags
|= 0x00000010;
5974 base_flags
|= (hdr_len
& 0x3e0) << 5;
5975 } else if (tg3_flag(tp
, HW_TSO_2
))
5976 mss
|= hdr_len
<< 9;
5977 else if (tg3_flag(tp
, HW_TSO_1
) ||
5978 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
5979 if (tcp_opt_len
|| iph
->ihl
> 5) {
5982 tsflags
= (iph
->ihl
- 5) + (tcp_opt_len
>> 2);
5983 mss
|= (tsflags
<< 11);
5986 if (tcp_opt_len
|| iph
->ihl
> 5) {
5989 tsflags
= (iph
->ihl
- 5) + (tcp_opt_len
>> 2);
5990 base_flags
|= tsflags
<< 12;
5995 if (vlan_tx_tag_present(skb
))
5996 base_flags
|= (TXD_FLAG_VLAN
|
5997 (vlan_tx_tag_get(skb
) << 16));
5999 if (tg3_flag(tp
, USE_JUMBO_BDFLAG
) &&
6000 !mss
&& skb
->len
> VLAN_ETH_FRAME_LEN
)
6001 base_flags
|= TXD_FLAG_JMB_PKT
;
6003 len
= skb_headlen(skb
);
6005 mapping
= pci_map_single(tp
->pdev
, skb
->data
, len
, PCI_DMA_TODEVICE
);
6006 if (pci_dma_mapping_error(tp
->pdev
, mapping
)) {
6011 tnapi
->tx_buffers
[entry
].skb
= skb
;
6012 dma_unmap_addr_set(&tnapi
->tx_buffers
[entry
], mapping
, mapping
);
6014 would_hit_hwbug
= 0;
6016 if (tg3_flag(tp
, SHORT_DMA_BUG
) && len
<= 8)
6017 would_hit_hwbug
= 1;
6019 if (tg3_flag(tp
, 4G_DMA_BNDRY_BUG
) &&
6020 tg3_4g_overflow_test(mapping
, len
))
6021 would_hit_hwbug
= 1;
6023 if (tg3_flag(tp
, 40BIT_DMA_LIMIT_BUG
) &&
6024 tg3_40bit_overflow_test(tp
, mapping
, len
))
6025 would_hit_hwbug
= 1;
6027 if (tg3_flag(tp
, 5701_DMA_BUG
))
6028 would_hit_hwbug
= 1;
6030 tg3_set_txd(tnapi
, entry
, mapping
, len
, base_flags
,
6031 (skb_shinfo(skb
)->nr_frags
== 0) | (mss
<< 1));
6033 entry
= NEXT_TX(entry
);
6035 /* Now loop through additional data fragments, and queue them. */
6036 if (skb_shinfo(skb
)->nr_frags
> 0) {
6037 last
= skb_shinfo(skb
)->nr_frags
- 1;
6038 for (i
= 0; i
<= last
; i
++) {
6039 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
6042 mapping
= pci_map_page(tp
->pdev
,
6045 len
, PCI_DMA_TODEVICE
);
6047 tnapi
->tx_buffers
[entry
].skb
= NULL
;
6048 dma_unmap_addr_set(&tnapi
->tx_buffers
[entry
], mapping
,
6050 if (pci_dma_mapping_error(tp
->pdev
, mapping
))
6053 if (tg3_flag(tp
, SHORT_DMA_BUG
) &&
6055 would_hit_hwbug
= 1;
6057 if (tg3_flag(tp
, 4G_DMA_BNDRY_BUG
) &&
6058 tg3_4g_overflow_test(mapping
, len
))
6059 would_hit_hwbug
= 1;
6061 if (tg3_flag(tp
, 40BIT_DMA_LIMIT_BUG
) &&
6062 tg3_40bit_overflow_test(tp
, mapping
, len
))
6063 would_hit_hwbug
= 1;
6065 if (tg3_flag(tp
, HW_TSO_1
) ||
6066 tg3_flag(tp
, HW_TSO_2
) ||
6067 tg3_flag(tp
, HW_TSO_3
))
6068 tg3_set_txd(tnapi
, entry
, mapping
, len
,
6069 base_flags
, (i
== last
)|(mss
<< 1));
6071 tg3_set_txd(tnapi
, entry
, mapping
, len
,
6072 base_flags
, (i
== last
));
6074 entry
= NEXT_TX(entry
);
6078 if (would_hit_hwbug
) {
6079 tg3_skb_error_unmap(tnapi
, skb
, i
);
6081 /* If the workaround fails due to memory/mapping
6082 * failure, silently drop this packet.
6084 if (tigon3_dma_hwbug_workaround(tnapi
, skb
, base_flags
, mss
))
6087 entry
= NEXT_TX(tnapi
->tx_prod
);
6090 /* Packets are ready, update Tx producer idx local and on card. */
6091 tw32_tx_mbox(tnapi
->prodmbox
, entry
);
6093 tnapi
->tx_prod
= entry
;
6094 if (unlikely(tg3_tx_avail(tnapi
) <= (MAX_SKB_FRAGS
+ 1))) {
6095 netif_tx_stop_queue(txq
);
6097 /* netif_tx_stop_queue() must be done before checking
6098 * checking tx index in tg3_tx_avail() below, because in
6099 * tg3_tx(), we update tx index before checking for
6100 * netif_tx_queue_stopped().
6103 if (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
))
6104 netif_tx_wake_queue(txq
);
6110 return NETDEV_TX_OK
;
6113 tg3_skb_error_unmap(tnapi
, skb
, i
);
6115 tnapi
->tx_buffers
[tnapi
->tx_prod
].skb
= NULL
;
6116 return NETDEV_TX_OK
;
6119 static void tg3_set_loopback(struct net_device
*dev
, u32 features
)
6121 struct tg3
*tp
= netdev_priv(dev
);
6123 if (features
& NETIF_F_LOOPBACK
) {
6124 if (tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
)
6128 * Clear MAC_MODE_HALF_DUPLEX or you won't get packets back in
6129 * loopback mode if Half-Duplex mode was negotiated earlier.
6131 tp
->mac_mode
&= ~MAC_MODE_HALF_DUPLEX
;
6133 /* Enable internal MAC loopback mode */
6134 tp
->mac_mode
|= MAC_MODE_PORT_INT_LPBACK
;
6135 spin_lock_bh(&tp
->lock
);
6136 tw32(MAC_MODE
, tp
->mac_mode
);
6137 netif_carrier_on(tp
->dev
);
6138 spin_unlock_bh(&tp
->lock
);
6139 netdev_info(dev
, "Internal MAC loopback mode enabled.\n");
6141 if (!(tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
))
6144 /* Disable internal MAC loopback mode */
6145 tp
->mac_mode
&= ~MAC_MODE_PORT_INT_LPBACK
;
6146 spin_lock_bh(&tp
->lock
);
6147 tw32(MAC_MODE
, tp
->mac_mode
);
6148 /* Force link status check */
6149 tg3_setup_phy(tp
, 1);
6150 spin_unlock_bh(&tp
->lock
);
6151 netdev_info(dev
, "Internal MAC loopback mode disabled.\n");
6155 static u32
tg3_fix_features(struct net_device
*dev
, u32 features
)
6157 struct tg3
*tp
= netdev_priv(dev
);
6159 if (dev
->mtu
> ETH_DATA_LEN
&& tg3_flag(tp
, 5780_CLASS
))
6160 features
&= ~NETIF_F_ALL_TSO
;
6165 static int tg3_set_features(struct net_device
*dev
, u32 features
)
6167 u32 changed
= dev
->features
^ features
;
6169 if ((changed
& NETIF_F_LOOPBACK
) && netif_running(dev
))
6170 tg3_set_loopback(dev
, features
);
6175 static inline void tg3_set_mtu(struct net_device
*dev
, struct tg3
*tp
,
6180 if (new_mtu
> ETH_DATA_LEN
) {
6181 if (tg3_flag(tp
, 5780_CLASS
)) {
6182 netdev_update_features(dev
);
6183 tg3_flag_clear(tp
, TSO_CAPABLE
);
6185 tg3_flag_set(tp
, JUMBO_RING_ENABLE
);
6188 if (tg3_flag(tp
, 5780_CLASS
)) {
6189 tg3_flag_set(tp
, TSO_CAPABLE
);
6190 netdev_update_features(dev
);
6192 tg3_flag_clear(tp
, JUMBO_RING_ENABLE
);
6196 static int tg3_change_mtu(struct net_device
*dev
, int new_mtu
)
6198 struct tg3
*tp
= netdev_priv(dev
);
6201 if (new_mtu
< TG3_MIN_MTU
|| new_mtu
> TG3_MAX_MTU(tp
))
6204 if (!netif_running(dev
)) {
6205 /* We'll just catch it later when the
6208 tg3_set_mtu(dev
, tp
, new_mtu
);
6216 tg3_full_lock(tp
, 1);
6218 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
6220 tg3_set_mtu(dev
, tp
, new_mtu
);
6222 err
= tg3_restart_hw(tp
, 0);
6225 tg3_netif_start(tp
);
6227 tg3_full_unlock(tp
);
6235 static void tg3_rx_prodring_free(struct tg3
*tp
,
6236 struct tg3_rx_prodring_set
*tpr
)
6240 if (tpr
!= &tp
->napi
[0].prodring
) {
6241 for (i
= tpr
->rx_std_cons_idx
; i
!= tpr
->rx_std_prod_idx
;
6242 i
= (i
+ 1) & tp
->rx_std_ring_mask
)
6243 tg3_rx_skb_free(tp
, &tpr
->rx_std_buffers
[i
],
6246 if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
6247 for (i
= tpr
->rx_jmb_cons_idx
;
6248 i
!= tpr
->rx_jmb_prod_idx
;
6249 i
= (i
+ 1) & tp
->rx_jmb_ring_mask
) {
6250 tg3_rx_skb_free(tp
, &tpr
->rx_jmb_buffers
[i
],
6258 for (i
= 0; i
<= tp
->rx_std_ring_mask
; i
++)
6259 tg3_rx_skb_free(tp
, &tpr
->rx_std_buffers
[i
],
6262 if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
)) {
6263 for (i
= 0; i
<= tp
->rx_jmb_ring_mask
; i
++)
6264 tg3_rx_skb_free(tp
, &tpr
->rx_jmb_buffers
[i
],
6269 /* Initialize rx rings for packet processing.
6271 * The chip has been shut down and the driver detached from
6272 * the networking, so no interrupts or new tx packets will
6273 * end up in the driver. tp->{tx,}lock are held and thus
6276 static int tg3_rx_prodring_alloc(struct tg3
*tp
,
6277 struct tg3_rx_prodring_set
*tpr
)
6279 u32 i
, rx_pkt_dma_sz
;
6281 tpr
->rx_std_cons_idx
= 0;
6282 tpr
->rx_std_prod_idx
= 0;
6283 tpr
->rx_jmb_cons_idx
= 0;
6284 tpr
->rx_jmb_prod_idx
= 0;
6286 if (tpr
!= &tp
->napi
[0].prodring
) {
6287 memset(&tpr
->rx_std_buffers
[0], 0,
6288 TG3_RX_STD_BUFF_RING_SIZE(tp
));
6289 if (tpr
->rx_jmb_buffers
)
6290 memset(&tpr
->rx_jmb_buffers
[0], 0,
6291 TG3_RX_JMB_BUFF_RING_SIZE(tp
));
6295 /* Zero out all descriptors. */
6296 memset(tpr
->rx_std
, 0, TG3_RX_STD_RING_BYTES(tp
));
6298 rx_pkt_dma_sz
= TG3_RX_STD_DMA_SZ
;
6299 if (tg3_flag(tp
, 5780_CLASS
) &&
6300 tp
->dev
->mtu
> ETH_DATA_LEN
)
6301 rx_pkt_dma_sz
= TG3_RX_JMB_DMA_SZ
;
6302 tp
->rx_pkt_map_sz
= TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz
);
6304 /* Initialize invariants of the rings, we only set this
6305 * stuff once. This works because the card does not
6306 * write into the rx buffer posting rings.
6308 for (i
= 0; i
<= tp
->rx_std_ring_mask
; i
++) {
6309 struct tg3_rx_buffer_desc
*rxd
;
6311 rxd
= &tpr
->rx_std
[i
];
6312 rxd
->idx_len
= rx_pkt_dma_sz
<< RXD_LEN_SHIFT
;
6313 rxd
->type_flags
= (RXD_FLAG_END
<< RXD_FLAGS_SHIFT
);
6314 rxd
->opaque
= (RXD_OPAQUE_RING_STD
|
6315 (i
<< RXD_OPAQUE_INDEX_SHIFT
));
6318 /* Now allocate fresh SKBs for each rx ring. */
6319 for (i
= 0; i
< tp
->rx_pending
; i
++) {
6320 if (tg3_alloc_rx_skb(tp
, tpr
, RXD_OPAQUE_RING_STD
, i
) < 0) {
6321 netdev_warn(tp
->dev
,
6322 "Using a smaller RX standard ring. Only "
6323 "%d out of %d buffers were allocated "
6324 "successfully\n", i
, tp
->rx_pending
);
6332 if (!tg3_flag(tp
, JUMBO_CAPABLE
) || tg3_flag(tp
, 5780_CLASS
))
6335 memset(tpr
->rx_jmb
, 0, TG3_RX_JMB_RING_BYTES(tp
));
6337 if (!tg3_flag(tp
, JUMBO_RING_ENABLE
))
6340 for (i
= 0; i
<= tp
->rx_jmb_ring_mask
; i
++) {
6341 struct tg3_rx_buffer_desc
*rxd
;
6343 rxd
= &tpr
->rx_jmb
[i
].std
;
6344 rxd
->idx_len
= TG3_RX_JMB_DMA_SZ
<< RXD_LEN_SHIFT
;
6345 rxd
->type_flags
= (RXD_FLAG_END
<< RXD_FLAGS_SHIFT
) |
6347 rxd
->opaque
= (RXD_OPAQUE_RING_JUMBO
|
6348 (i
<< RXD_OPAQUE_INDEX_SHIFT
));
6351 for (i
= 0; i
< tp
->rx_jumbo_pending
; i
++) {
6352 if (tg3_alloc_rx_skb(tp
, tpr
, RXD_OPAQUE_RING_JUMBO
, i
) < 0) {
6353 netdev_warn(tp
->dev
,
6354 "Using a smaller RX jumbo ring. Only %d "
6355 "out of %d buffers were allocated "
6356 "successfully\n", i
, tp
->rx_jumbo_pending
);
6359 tp
->rx_jumbo_pending
= i
;
6368 tg3_rx_prodring_free(tp
, tpr
);
6372 static void tg3_rx_prodring_fini(struct tg3
*tp
,
6373 struct tg3_rx_prodring_set
*tpr
)
6375 kfree(tpr
->rx_std_buffers
);
6376 tpr
->rx_std_buffers
= NULL
;
6377 kfree(tpr
->rx_jmb_buffers
);
6378 tpr
->rx_jmb_buffers
= NULL
;
6380 dma_free_coherent(&tp
->pdev
->dev
, TG3_RX_STD_RING_BYTES(tp
),
6381 tpr
->rx_std
, tpr
->rx_std_mapping
);
6385 dma_free_coherent(&tp
->pdev
->dev
, TG3_RX_JMB_RING_BYTES(tp
),
6386 tpr
->rx_jmb
, tpr
->rx_jmb_mapping
);
6391 static int tg3_rx_prodring_init(struct tg3
*tp
,
6392 struct tg3_rx_prodring_set
*tpr
)
6394 tpr
->rx_std_buffers
= kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp
),
6396 if (!tpr
->rx_std_buffers
)
6399 tpr
->rx_std
= dma_alloc_coherent(&tp
->pdev
->dev
,
6400 TG3_RX_STD_RING_BYTES(tp
),
6401 &tpr
->rx_std_mapping
,
6406 if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
)) {
6407 tpr
->rx_jmb_buffers
= kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp
),
6409 if (!tpr
->rx_jmb_buffers
)
6412 tpr
->rx_jmb
= dma_alloc_coherent(&tp
->pdev
->dev
,
6413 TG3_RX_JMB_RING_BYTES(tp
),
6414 &tpr
->rx_jmb_mapping
,
6423 tg3_rx_prodring_fini(tp
, tpr
);
6427 /* Free up pending packets in all rx/tx rings.
6429 * The chip has been shut down and the driver detached from
6430 * the networking, so no interrupts or new tx packets will
6431 * end up in the driver. tp->{tx,}lock is not held and we are not
6432 * in an interrupt context and thus may sleep.
6434 static void tg3_free_rings(struct tg3
*tp
)
6438 for (j
= 0; j
< tp
->irq_cnt
; j
++) {
6439 struct tg3_napi
*tnapi
= &tp
->napi
[j
];
6441 tg3_rx_prodring_free(tp
, &tnapi
->prodring
);
6443 if (!tnapi
->tx_buffers
)
6446 for (i
= 0; i
< TG3_TX_RING_SIZE
; ) {
6447 struct ring_info
*txp
;
6448 struct sk_buff
*skb
;
6451 txp
= &tnapi
->tx_buffers
[i
];
6459 pci_unmap_single(tp
->pdev
,
6460 dma_unmap_addr(txp
, mapping
),
6467 for (k
= 0; k
< skb_shinfo(skb
)->nr_frags
; k
++) {
6468 txp
= &tnapi
->tx_buffers
[i
& (TG3_TX_RING_SIZE
- 1)];
6469 pci_unmap_page(tp
->pdev
,
6470 dma_unmap_addr(txp
, mapping
),
6471 skb_shinfo(skb
)->frags
[k
].size
,
6476 dev_kfree_skb_any(skb
);
6481 /* Initialize tx/rx rings for packet processing.
6483 * The chip has been shut down and the driver detached from
6484 * the networking, so no interrupts or new tx packets will
6485 * end up in the driver. tp->{tx,}lock are held and thus
6488 static int tg3_init_rings(struct tg3
*tp
)
6492 /* Free up all the SKBs. */
6495 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
6496 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
6498 tnapi
->last_tag
= 0;
6499 tnapi
->last_irq_tag
= 0;
6500 tnapi
->hw_status
->status
= 0;
6501 tnapi
->hw_status
->status_tag
= 0;
6502 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
6507 memset(tnapi
->tx_ring
, 0, TG3_TX_RING_BYTES
);
6509 tnapi
->rx_rcb_ptr
= 0;
6511 memset(tnapi
->rx_rcb
, 0, TG3_RX_RCB_RING_BYTES(tp
));
6513 if (tg3_rx_prodring_alloc(tp
, &tnapi
->prodring
)) {
6523 * Must not be invoked with interrupt sources disabled and
6524 * the hardware shutdown down.
6526 static void tg3_free_consistent(struct tg3
*tp
)
6530 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
6531 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
6533 if (tnapi
->tx_ring
) {
6534 dma_free_coherent(&tp
->pdev
->dev
, TG3_TX_RING_BYTES
,
6535 tnapi
->tx_ring
, tnapi
->tx_desc_mapping
);
6536 tnapi
->tx_ring
= NULL
;
6539 kfree(tnapi
->tx_buffers
);
6540 tnapi
->tx_buffers
= NULL
;
6542 if (tnapi
->rx_rcb
) {
6543 dma_free_coherent(&tp
->pdev
->dev
,
6544 TG3_RX_RCB_RING_BYTES(tp
),
6546 tnapi
->rx_rcb_mapping
);
6547 tnapi
->rx_rcb
= NULL
;
6550 tg3_rx_prodring_fini(tp
, &tnapi
->prodring
);
6552 if (tnapi
->hw_status
) {
6553 dma_free_coherent(&tp
->pdev
->dev
, TG3_HW_STATUS_SIZE
,
6555 tnapi
->status_mapping
);
6556 tnapi
->hw_status
= NULL
;
6561 dma_free_coherent(&tp
->pdev
->dev
, sizeof(struct tg3_hw_stats
),
6562 tp
->hw_stats
, tp
->stats_mapping
);
6563 tp
->hw_stats
= NULL
;
6568 * Must not be invoked with interrupt sources disabled and
6569 * the hardware shutdown down. Can sleep.
6571 static int tg3_alloc_consistent(struct tg3
*tp
)
6575 tp
->hw_stats
= dma_alloc_coherent(&tp
->pdev
->dev
,
6576 sizeof(struct tg3_hw_stats
),
6582 memset(tp
->hw_stats
, 0, sizeof(struct tg3_hw_stats
));
6584 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
6585 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
6586 struct tg3_hw_status
*sblk
;
6588 tnapi
->hw_status
= dma_alloc_coherent(&tp
->pdev
->dev
,
6590 &tnapi
->status_mapping
,
6592 if (!tnapi
->hw_status
)
6595 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
6596 sblk
= tnapi
->hw_status
;
6598 if (tg3_rx_prodring_init(tp
, &tnapi
->prodring
))
6601 /* If multivector TSS is enabled, vector 0 does not handle
6602 * tx interrupts. Don't allocate any resources for it.
6604 if ((!i
&& !tg3_flag(tp
, ENABLE_TSS
)) ||
6605 (i
&& tg3_flag(tp
, ENABLE_TSS
))) {
6606 tnapi
->tx_buffers
= kzalloc(sizeof(struct ring_info
) *
6609 if (!tnapi
->tx_buffers
)
6612 tnapi
->tx_ring
= dma_alloc_coherent(&tp
->pdev
->dev
,
6614 &tnapi
->tx_desc_mapping
,
6616 if (!tnapi
->tx_ring
)
6621 * When RSS is enabled, the status block format changes
6622 * slightly. The "rx_jumbo_consumer", "reserved",
6623 * and "rx_mini_consumer" members get mapped to the
6624 * other three rx return ring producer indexes.
6628 tnapi
->rx_rcb_prod_idx
= &sblk
->idx
[0].rx_producer
;
6631 tnapi
->rx_rcb_prod_idx
= &sblk
->rx_jumbo_consumer
;
6634 tnapi
->rx_rcb_prod_idx
= &sblk
->reserved
;
6637 tnapi
->rx_rcb_prod_idx
= &sblk
->rx_mini_consumer
;
6642 * If multivector RSS is enabled, vector 0 does not handle
6643 * rx or tx interrupts. Don't allocate any resources for it.
6645 if (!i
&& tg3_flag(tp
, ENABLE_RSS
))
6648 tnapi
->rx_rcb
= dma_alloc_coherent(&tp
->pdev
->dev
,
6649 TG3_RX_RCB_RING_BYTES(tp
),
6650 &tnapi
->rx_rcb_mapping
,
6655 memset(tnapi
->rx_rcb
, 0, TG3_RX_RCB_RING_BYTES(tp
));
6661 tg3_free_consistent(tp
);
6665 #define MAX_WAIT_CNT 1000
6667 /* To stop a block, clear the enable bit and poll till it
6668 * clears. tp->lock is held.
6670 static int tg3_stop_block(struct tg3
*tp
, unsigned long ofs
, u32 enable_bit
, int silent
)
6675 if (tg3_flag(tp
, 5705_PLUS
)) {
6682 /* We can't enable/disable these bits of the
6683 * 5705/5750, just say success.
6696 for (i
= 0; i
< MAX_WAIT_CNT
; i
++) {
6699 if ((val
& enable_bit
) == 0)
6703 if (i
== MAX_WAIT_CNT
&& !silent
) {
6704 dev_err(&tp
->pdev
->dev
,
6705 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
6713 /* tp->lock is held. */
6714 static int tg3_abort_hw(struct tg3
*tp
, int silent
)
6718 tg3_disable_ints(tp
);
6720 tp
->rx_mode
&= ~RX_MODE_ENABLE
;
6721 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
6724 err
= tg3_stop_block(tp
, RCVBDI_MODE
, RCVBDI_MODE_ENABLE
, silent
);
6725 err
|= tg3_stop_block(tp
, RCVLPC_MODE
, RCVLPC_MODE_ENABLE
, silent
);
6726 err
|= tg3_stop_block(tp
, RCVLSC_MODE
, RCVLSC_MODE_ENABLE
, silent
);
6727 err
|= tg3_stop_block(tp
, RCVDBDI_MODE
, RCVDBDI_MODE_ENABLE
, silent
);
6728 err
|= tg3_stop_block(tp
, RCVDCC_MODE
, RCVDCC_MODE_ENABLE
, silent
);
6729 err
|= tg3_stop_block(tp
, RCVCC_MODE
, RCVCC_MODE_ENABLE
, silent
);
6731 err
|= tg3_stop_block(tp
, SNDBDS_MODE
, SNDBDS_MODE_ENABLE
, silent
);
6732 err
|= tg3_stop_block(tp
, SNDBDI_MODE
, SNDBDI_MODE_ENABLE
, silent
);
6733 err
|= tg3_stop_block(tp
, SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
, silent
);
6734 err
|= tg3_stop_block(tp
, RDMAC_MODE
, RDMAC_MODE_ENABLE
, silent
);
6735 err
|= tg3_stop_block(tp
, SNDDATAC_MODE
, SNDDATAC_MODE_ENABLE
, silent
);
6736 err
|= tg3_stop_block(tp
, DMAC_MODE
, DMAC_MODE_ENABLE
, silent
);
6737 err
|= tg3_stop_block(tp
, SNDBDC_MODE
, SNDBDC_MODE_ENABLE
, silent
);
6739 tp
->mac_mode
&= ~MAC_MODE_TDE_ENABLE
;
6740 tw32_f(MAC_MODE
, tp
->mac_mode
);
6743 tp
->tx_mode
&= ~TX_MODE_ENABLE
;
6744 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
6746 for (i
= 0; i
< MAX_WAIT_CNT
; i
++) {
6748 if (!(tr32(MAC_TX_MODE
) & TX_MODE_ENABLE
))
6751 if (i
>= MAX_WAIT_CNT
) {
6752 dev_err(&tp
->pdev
->dev
,
6753 "%s timed out, TX_MODE_ENABLE will not clear "
6754 "MAC_TX_MODE=%08x\n", __func__
, tr32(MAC_TX_MODE
));
6758 err
|= tg3_stop_block(tp
, HOSTCC_MODE
, HOSTCC_MODE_ENABLE
, silent
);
6759 err
|= tg3_stop_block(tp
, WDMAC_MODE
, WDMAC_MODE_ENABLE
, silent
);
6760 err
|= tg3_stop_block(tp
, MBFREE_MODE
, MBFREE_MODE_ENABLE
, silent
);
6762 tw32(FTQ_RESET
, 0xffffffff);
6763 tw32(FTQ_RESET
, 0x00000000);
6765 err
|= tg3_stop_block(tp
, BUFMGR_MODE
, BUFMGR_MODE_ENABLE
, silent
);
6766 err
|= tg3_stop_block(tp
, MEMARB_MODE
, MEMARB_MODE_ENABLE
, silent
);
6768 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
6769 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
6770 if (tnapi
->hw_status
)
6771 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
6774 memset(tp
->hw_stats
, 0, sizeof(struct tg3_hw_stats
));
6779 static void tg3_ape_send_event(struct tg3
*tp
, u32 event
)
6784 /* NCSI does not support APE events */
6785 if (tg3_flag(tp
, APE_HAS_NCSI
))
6788 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
6789 if (apedata
!= APE_SEG_SIG_MAGIC
)
6792 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
6793 if (!(apedata
& APE_FW_STATUS_READY
))
6796 /* Wait for up to 1 millisecond for APE to service previous event. */
6797 for (i
= 0; i
< 10; i
++) {
6798 if (tg3_ape_lock(tp
, TG3_APE_LOCK_MEM
))
6801 apedata
= tg3_ape_read32(tp
, TG3_APE_EVENT_STATUS
);
6803 if (!(apedata
& APE_EVENT_STATUS_EVENT_PENDING
))
6804 tg3_ape_write32(tp
, TG3_APE_EVENT_STATUS
,
6805 event
| APE_EVENT_STATUS_EVENT_PENDING
);
6807 tg3_ape_unlock(tp
, TG3_APE_LOCK_MEM
);
6809 if (!(apedata
& APE_EVENT_STATUS_EVENT_PENDING
))
6815 if (!(apedata
& APE_EVENT_STATUS_EVENT_PENDING
))
6816 tg3_ape_write32(tp
, TG3_APE_EVENT
, APE_EVENT_1
);
6819 static void tg3_ape_driver_state_change(struct tg3
*tp
, int kind
)
6824 if (!tg3_flag(tp
, ENABLE_APE
))
6828 case RESET_KIND_INIT
:
6829 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_SIG
,
6830 APE_HOST_SEG_SIG_MAGIC
);
6831 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_LEN
,
6832 APE_HOST_SEG_LEN_MAGIC
);
6833 apedata
= tg3_ape_read32(tp
, TG3_APE_HOST_INIT_COUNT
);
6834 tg3_ape_write32(tp
, TG3_APE_HOST_INIT_COUNT
, ++apedata
);
6835 tg3_ape_write32(tp
, TG3_APE_HOST_DRIVER_ID
,
6836 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM
, TG3_MIN_NUM
));
6837 tg3_ape_write32(tp
, TG3_APE_HOST_BEHAVIOR
,
6838 APE_HOST_BEHAV_NO_PHYLOCK
);
6839 tg3_ape_write32(tp
, TG3_APE_HOST_DRVR_STATE
,
6840 TG3_APE_HOST_DRVR_STATE_START
);
6842 event
= APE_EVENT_STATUS_STATE_START
;
6844 case RESET_KIND_SHUTDOWN
:
6845 /* With the interface we are currently using,
6846 * APE does not track driver state. Wiping
6847 * out the HOST SEGMENT SIGNATURE forces
6848 * the APE to assume OS absent status.
6850 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_SIG
, 0x0);
6852 if (device_may_wakeup(&tp
->pdev
->dev
) &&
6853 tg3_flag(tp
, WOL_ENABLE
)) {
6854 tg3_ape_write32(tp
, TG3_APE_HOST_WOL_SPEED
,
6855 TG3_APE_HOST_WOL_SPEED_AUTO
);
6856 apedata
= TG3_APE_HOST_DRVR_STATE_WOL
;
6858 apedata
= TG3_APE_HOST_DRVR_STATE_UNLOAD
;
6860 tg3_ape_write32(tp
, TG3_APE_HOST_DRVR_STATE
, apedata
);
6862 event
= APE_EVENT_STATUS_STATE_UNLOAD
;
6864 case RESET_KIND_SUSPEND
:
6865 event
= APE_EVENT_STATUS_STATE_SUSPEND
;
6871 event
|= APE_EVENT_STATUS_DRIVER_EVNT
| APE_EVENT_STATUS_STATE_CHNGE
;
6873 tg3_ape_send_event(tp
, event
);
6876 /* tp->lock is held. */
6877 static void tg3_write_sig_pre_reset(struct tg3
*tp
, int kind
)
6879 tg3_write_mem(tp
, NIC_SRAM_FIRMWARE_MBOX
,
6880 NIC_SRAM_FIRMWARE_MBOX_MAGIC1
);
6882 if (tg3_flag(tp
, ASF_NEW_HANDSHAKE
)) {
6884 case RESET_KIND_INIT
:
6885 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
6889 case RESET_KIND_SHUTDOWN
:
6890 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
6894 case RESET_KIND_SUSPEND
:
6895 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
6904 if (kind
== RESET_KIND_INIT
||
6905 kind
== RESET_KIND_SUSPEND
)
6906 tg3_ape_driver_state_change(tp
, kind
);
6909 /* tp->lock is held. */
6910 static void tg3_write_sig_post_reset(struct tg3
*tp
, int kind
)
6912 if (tg3_flag(tp
, ASF_NEW_HANDSHAKE
)) {
6914 case RESET_KIND_INIT
:
6915 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
6916 DRV_STATE_START_DONE
);
6919 case RESET_KIND_SHUTDOWN
:
6920 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
6921 DRV_STATE_UNLOAD_DONE
);
6929 if (kind
== RESET_KIND_SHUTDOWN
)
6930 tg3_ape_driver_state_change(tp
, kind
);
6933 /* tp->lock is held. */
6934 static void tg3_write_sig_legacy(struct tg3
*tp
, int kind
)
6936 if (tg3_flag(tp
, ENABLE_ASF
)) {
6938 case RESET_KIND_INIT
:
6939 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
6943 case RESET_KIND_SHUTDOWN
:
6944 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
6948 case RESET_KIND_SUSPEND
:
6949 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
6959 static int tg3_poll_fw(struct tg3
*tp
)
6964 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
6965 /* Wait up to 20ms for init done. */
6966 for (i
= 0; i
< 200; i
++) {
6967 if (tr32(VCPU_STATUS
) & VCPU_STATUS_INIT_DONE
)
6974 /* Wait for firmware initialization to complete. */
6975 for (i
= 0; i
< 100000; i
++) {
6976 tg3_read_mem(tp
, NIC_SRAM_FIRMWARE_MBOX
, &val
);
6977 if (val
== ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1
)
6982 /* Chip might not be fitted with firmware. Some Sun onboard
6983 * parts are configured like that. So don't signal the timeout
6984 * of the above loop as an error, but do report the lack of
6985 * running firmware once.
6987 if (i
>= 100000 && !tg3_flag(tp
, NO_FWARE_REPORTED
)) {
6988 tg3_flag_set(tp
, NO_FWARE_REPORTED
);
6990 netdev_info(tp
->dev
, "No firmware running\n");
6993 if (tp
->pci_chip_rev_id
== CHIPREV_ID_57765_A0
) {
6994 /* The 57765 A0 needs a little more
6995 * time to do some important work.
7003 /* Save PCI command register before chip reset */
7004 static void tg3_save_pci_state(struct tg3
*tp
)
7006 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &tp
->pci_cmd
);
7009 /* Restore PCI state after chip reset */
7010 static void tg3_restore_pci_state(struct tg3
*tp
)
7014 /* Re-enable indirect register accesses. */
7015 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
7016 tp
->misc_host_ctrl
);
7018 /* Set MAX PCI retry to zero. */
7019 val
= (PCISTATE_ROM_ENABLE
| PCISTATE_ROM_RETRY_ENABLE
);
7020 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5704_A0
&&
7021 tg3_flag(tp
, PCIX_MODE
))
7022 val
|= PCISTATE_RETRY_SAME_DMA
;
7023 /* Allow reads and writes to the APE register and memory space. */
7024 if (tg3_flag(tp
, ENABLE_APE
))
7025 val
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
7026 PCISTATE_ALLOW_APE_SHMEM_WR
|
7027 PCISTATE_ALLOW_APE_PSPACE_WR
;
7028 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
, val
);
7030 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, tp
->pci_cmd
);
7032 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5785
) {
7033 if (tg3_flag(tp
, PCI_EXPRESS
))
7034 pcie_set_readrq(tp
->pdev
, tp
->pcie_readrq
);
7036 pci_write_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
,
7037 tp
->pci_cacheline_sz
);
7038 pci_write_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
7043 /* Make sure PCI-X relaxed ordering bit is clear. */
7044 if (tg3_flag(tp
, PCIX_MODE
)) {
7047 pci_read_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
7049 pcix_cmd
&= ~PCI_X_CMD_ERO
;
7050 pci_write_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
7054 if (tg3_flag(tp
, 5780_CLASS
)) {
7056 /* Chip reset on 5780 will reset MSI enable bit,
7057 * so need to restore it.
7059 if (tg3_flag(tp
, USING_MSI
)) {
7062 pci_read_config_word(tp
->pdev
,
7063 tp
->msi_cap
+ PCI_MSI_FLAGS
,
7065 pci_write_config_word(tp
->pdev
,
7066 tp
->msi_cap
+ PCI_MSI_FLAGS
,
7067 ctrl
| PCI_MSI_FLAGS_ENABLE
);
7068 val
= tr32(MSGINT_MODE
);
7069 tw32(MSGINT_MODE
, val
| MSGINT_MODE_ENABLE
);
7074 static void tg3_stop_fw(struct tg3
*);
7076 /* tp->lock is held. */
7077 static int tg3_chip_reset(struct tg3
*tp
)
7080 void (*write_op
)(struct tg3
*, u32
, u32
);
7085 tg3_ape_lock(tp
, TG3_APE_LOCK_GRC
);
7087 /* No matching tg3_nvram_unlock() after this because
7088 * chip reset below will undo the nvram lock.
7090 tp
->nvram_lock_cnt
= 0;
7092 /* GRC_MISC_CFG core clock reset will clear the memory
7093 * enable bit in PCI register 4 and the MSI enable bit
7094 * on some chips, so we save relevant registers here.
7096 tg3_save_pci_state(tp
);
7098 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
||
7099 tg3_flag(tp
, 5755_PLUS
))
7100 tw32(GRC_FASTBOOT_PC
, 0);
7103 * We must avoid the readl() that normally takes place.
7104 * It locks machines, causes machine checks, and other
7105 * fun things. So, temporarily disable the 5701
7106 * hardware workaround, while we do the reset.
7108 write_op
= tp
->write32
;
7109 if (write_op
== tg3_write_flush_reg32
)
7110 tp
->write32
= tg3_write32
;
7112 /* Prevent the irq handler from reading or writing PCI registers
7113 * during chip reset when the memory enable bit in the PCI command
7114 * register may be cleared. The chip does not generate interrupt
7115 * at this time, but the irq handler may still be called due to irq
7116 * sharing or irqpoll.
7118 tg3_flag_set(tp
, CHIP_RESETTING
);
7119 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
7120 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
7121 if (tnapi
->hw_status
) {
7122 tnapi
->hw_status
->status
= 0;
7123 tnapi
->hw_status
->status_tag
= 0;
7125 tnapi
->last_tag
= 0;
7126 tnapi
->last_irq_tag
= 0;
7130 for (i
= 0; i
< tp
->irq_cnt
; i
++)
7131 synchronize_irq(tp
->napi
[i
].irq_vec
);
7133 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
) {
7134 val
= tr32(TG3_PCIE_LNKCTL
) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN
;
7135 tw32(TG3_PCIE_LNKCTL
, val
| TG3_PCIE_LNKCTL_L1_PLL_PD_DIS
);
7139 val
= GRC_MISC_CFG_CORECLK_RESET
;
7141 if (tg3_flag(tp
, PCI_EXPRESS
)) {
7142 /* Force PCIe 1.0a mode */
7143 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5785
&&
7144 !tg3_flag(tp
, 57765_PLUS
) &&
7145 tr32(TG3_PCIE_PHY_TSTCTL
) ==
7146 (TG3_PCIE_PHY_TSTCTL_PCIE10
| TG3_PCIE_PHY_TSTCTL_PSCRAM
))
7147 tw32(TG3_PCIE_PHY_TSTCTL
, TG3_PCIE_PHY_TSTCTL_PSCRAM
);
7149 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A0
) {
7150 tw32(GRC_MISC_CFG
, (1 << 29));
7155 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
7156 tw32(VCPU_STATUS
, tr32(VCPU_STATUS
) | VCPU_STATUS_DRV_RESET
);
7157 tw32(GRC_VCPU_EXT_CTRL
,
7158 tr32(GRC_VCPU_EXT_CTRL
) & ~GRC_VCPU_EXT_CTRL_HALT_CPU
);
7161 /* Manage gphy power for all CPMU absent PCIe devices. */
7162 if (tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, CPMU_PRESENT
))
7163 val
|= GRC_MISC_CFG_KEEP_GPHY_POWER
;
7165 tw32(GRC_MISC_CFG
, val
);
7167 /* restore 5701 hardware bug workaround write method */
7168 tp
->write32
= write_op
;
7170 /* Unfortunately, we have to delay before the PCI read back.
7171 * Some 575X chips even will not respond to a PCI cfg access
7172 * when the reset command is given to the chip.
7174 * How do these hardware designers expect things to work
7175 * properly if the PCI write is posted for a long period
7176 * of time? It is always necessary to have some method by
7177 * which a register read back can occur to push the write
7178 * out which does the reset.
7180 * For most tg3 variants the trick below was working.
7185 /* Flush PCI posted writes. The normal MMIO registers
7186 * are inaccessible at this time so this is the only
7187 * way to make this reliably (actually, this is no longer
7188 * the case, see above). I tried to use indirect
7189 * register read/write but this upset some 5701 variants.
7191 pci_read_config_dword(tp
->pdev
, PCI_COMMAND
, &val
);
7195 if (tg3_flag(tp
, PCI_EXPRESS
) && tp
->pcie_cap
) {
7198 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5750_A0
) {
7202 /* Wait for link training to complete. */
7203 for (i
= 0; i
< 5000; i
++)
7206 pci_read_config_dword(tp
->pdev
, 0xc4, &cfg_val
);
7207 pci_write_config_dword(tp
->pdev
, 0xc4,
7208 cfg_val
| (1 << 15));
7211 /* Clear the "no snoop" and "relaxed ordering" bits. */
7212 pci_read_config_word(tp
->pdev
,
7213 tp
->pcie_cap
+ PCI_EXP_DEVCTL
,
7215 val16
&= ~(PCI_EXP_DEVCTL_RELAX_EN
|
7216 PCI_EXP_DEVCTL_NOSNOOP_EN
);
7218 * Older PCIe devices only support the 128 byte
7219 * MPS setting. Enforce the restriction.
7221 if (!tg3_flag(tp
, CPMU_PRESENT
))
7222 val16
&= ~PCI_EXP_DEVCTL_PAYLOAD
;
7223 pci_write_config_word(tp
->pdev
,
7224 tp
->pcie_cap
+ PCI_EXP_DEVCTL
,
7227 pcie_set_readrq(tp
->pdev
, tp
->pcie_readrq
);
7229 /* Clear error status */
7230 pci_write_config_word(tp
->pdev
,
7231 tp
->pcie_cap
+ PCI_EXP_DEVSTA
,
7232 PCI_EXP_DEVSTA_CED
|
7233 PCI_EXP_DEVSTA_NFED
|
7234 PCI_EXP_DEVSTA_FED
|
7235 PCI_EXP_DEVSTA_URD
);
7238 tg3_restore_pci_state(tp
);
7240 tg3_flag_clear(tp
, CHIP_RESETTING
);
7241 tg3_flag_clear(tp
, ERROR_PROCESSED
);
7244 if (tg3_flag(tp
, 5780_CLASS
))
7245 val
= tr32(MEMARB_MODE
);
7246 tw32(MEMARB_MODE
, val
| MEMARB_MODE_ENABLE
);
7248 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5750_A3
) {
7250 tw32(0x5000, 0x400);
7253 tw32(GRC_MODE
, tp
->grc_mode
);
7255 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A0
) {
7258 tw32(0xc4, val
| (1 << 15));
7261 if ((tp
->nic_sram_data_cfg
& NIC_SRAM_DATA_CFG_MINI_PCI
) != 0 &&
7262 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
7263 tp
->pci_clock_ctrl
|= CLOCK_CTRL_CLKRUN_OENABLE
;
7264 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A0
)
7265 tp
->pci_clock_ctrl
|= CLOCK_CTRL_FORCE_CLKRUN
;
7266 tw32(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
);
7269 if (tg3_flag(tp
, ENABLE_APE
))
7270 tp
->mac_mode
= MAC_MODE_APE_TX_EN
|
7271 MAC_MODE_APE_RX_EN
|
7272 MAC_MODE_TDE_ENABLE
;
7274 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
7275 tp
->mac_mode
|= MAC_MODE_PORT_MODE_TBI
;
7277 } else if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) {
7278 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
7283 tw32_f(MAC_MODE
, val
);
7286 tg3_ape_unlock(tp
, TG3_APE_LOCK_GRC
);
7288 err
= tg3_poll_fw(tp
);
7294 if (tg3_flag(tp
, PCI_EXPRESS
) &&
7295 tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A0
&&
7296 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5785
&&
7297 !tg3_flag(tp
, 57765_PLUS
)) {
7300 tw32(0x7c00, val
| (1 << 25));
7303 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
7304 val
= tr32(TG3_CPMU_CLCK_ORIDE
);
7305 tw32(TG3_CPMU_CLCK_ORIDE
, val
& ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN
);
7308 /* Reprobe ASF enable state. */
7309 tg3_flag_clear(tp
, ENABLE_ASF
);
7310 tg3_flag_clear(tp
, ASF_NEW_HANDSHAKE
);
7311 tg3_read_mem(tp
, NIC_SRAM_DATA_SIG
, &val
);
7312 if (val
== NIC_SRAM_DATA_SIG_MAGIC
) {
7315 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG
, &nic_cfg
);
7316 if (nic_cfg
& NIC_SRAM_DATA_CFG_ASF_ENABLE
) {
7317 tg3_flag_set(tp
, ENABLE_ASF
);
7318 tp
->last_event_jiffies
= jiffies
;
7319 if (tg3_flag(tp
, 5750_PLUS
))
7320 tg3_flag_set(tp
, ASF_NEW_HANDSHAKE
);
7327 /* tp->lock is held. */
7328 static void tg3_stop_fw(struct tg3
*tp
)
7330 if (tg3_flag(tp
, ENABLE_ASF
) && !tg3_flag(tp
, ENABLE_APE
)) {
7331 /* Wait for RX cpu to ACK the previous event. */
7332 tg3_wait_for_event_ack(tp
);
7334 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
, FWCMD_NICDRV_PAUSE_FW
);
7336 tg3_generate_fw_event(tp
);
7338 /* Wait for RX cpu to ACK this event. */
7339 tg3_wait_for_event_ack(tp
);
7343 /* tp->lock is held. */
7344 static int tg3_halt(struct tg3
*tp
, int kind
, int silent
)
7350 tg3_write_sig_pre_reset(tp
, kind
);
7352 tg3_abort_hw(tp
, silent
);
7353 err
= tg3_chip_reset(tp
);
7355 __tg3_set_mac_addr(tp
, 0);
7357 tg3_write_sig_legacy(tp
, kind
);
7358 tg3_write_sig_post_reset(tp
, kind
);
7366 #define RX_CPU_SCRATCH_BASE 0x30000
7367 #define RX_CPU_SCRATCH_SIZE 0x04000
7368 #define TX_CPU_SCRATCH_BASE 0x34000
7369 #define TX_CPU_SCRATCH_SIZE 0x04000
7371 /* tp->lock is held. */
7372 static int tg3_halt_cpu(struct tg3
*tp
, u32 offset
)
7376 BUG_ON(offset
== TX_CPU_BASE
&& tg3_flag(tp
, 5705_PLUS
));
7378 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
7379 u32 val
= tr32(GRC_VCPU_EXT_CTRL
);
7381 tw32(GRC_VCPU_EXT_CTRL
, val
| GRC_VCPU_EXT_CTRL_HALT_CPU
);
7384 if (offset
== RX_CPU_BASE
) {
7385 for (i
= 0; i
< 10000; i
++) {
7386 tw32(offset
+ CPU_STATE
, 0xffffffff);
7387 tw32(offset
+ CPU_MODE
, CPU_MODE_HALT
);
7388 if (tr32(offset
+ CPU_MODE
) & CPU_MODE_HALT
)
7392 tw32(offset
+ CPU_STATE
, 0xffffffff);
7393 tw32_f(offset
+ CPU_MODE
, CPU_MODE_HALT
);
7396 for (i
= 0; i
< 10000; i
++) {
7397 tw32(offset
+ CPU_STATE
, 0xffffffff);
7398 tw32(offset
+ CPU_MODE
, CPU_MODE_HALT
);
7399 if (tr32(offset
+ CPU_MODE
) & CPU_MODE_HALT
)
7405 netdev_err(tp
->dev
, "%s timed out, %s CPU\n",
7406 __func__
, offset
== RX_CPU_BASE
? "RX" : "TX");
7410 /* Clear firmware's nvram arbitration. */
7411 if (tg3_flag(tp
, NVRAM
))
7412 tw32(NVRAM_SWARB
, SWARB_REQ_CLR0
);
7417 unsigned int fw_base
;
7418 unsigned int fw_len
;
7419 const __be32
*fw_data
;
7422 /* tp->lock is held. */
7423 static int tg3_load_firmware_cpu(struct tg3
*tp
, u32 cpu_base
, u32 cpu_scratch_base
,
7424 int cpu_scratch_size
, struct fw_info
*info
)
7426 int err
, lock_err
, i
;
7427 void (*write_op
)(struct tg3
*, u32
, u32
);
7429 if (cpu_base
== TX_CPU_BASE
&& tg3_flag(tp
, 5705_PLUS
)) {
7431 "%s: Trying to load TX cpu firmware which is 5705\n",
7436 if (tg3_flag(tp
, 5705_PLUS
))
7437 write_op
= tg3_write_mem
;
7439 write_op
= tg3_write_indirect_reg32
;
7441 /* It is possible that bootcode is still loading at this point.
7442 * Get the nvram lock first before halting the cpu.
7444 lock_err
= tg3_nvram_lock(tp
);
7445 err
= tg3_halt_cpu(tp
, cpu_base
);
7447 tg3_nvram_unlock(tp
);
7451 for (i
= 0; i
< cpu_scratch_size
; i
+= sizeof(u32
))
7452 write_op(tp
, cpu_scratch_base
+ i
, 0);
7453 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
7454 tw32(cpu_base
+ CPU_MODE
, tr32(cpu_base
+CPU_MODE
)|CPU_MODE_HALT
);
7455 for (i
= 0; i
< (info
->fw_len
/ sizeof(u32
)); i
++)
7456 write_op(tp
, (cpu_scratch_base
+
7457 (info
->fw_base
& 0xffff) +
7459 be32_to_cpu(info
->fw_data
[i
]));
7467 /* tp->lock is held. */
7468 static int tg3_load_5701_a0_firmware_fix(struct tg3
*tp
)
7470 struct fw_info info
;
7471 const __be32
*fw_data
;
7474 fw_data
= (void *)tp
->fw
->data
;
7476 /* Firmware blob starts with version numbers, followed by
7477 start address and length. We are setting complete length.
7478 length = end_address_of_bss - start_address_of_text.
7479 Remainder is the blob to be loaded contiguously
7480 from start address. */
7482 info
.fw_base
= be32_to_cpu(fw_data
[1]);
7483 info
.fw_len
= tp
->fw
->size
- 12;
7484 info
.fw_data
= &fw_data
[3];
7486 err
= tg3_load_firmware_cpu(tp
, RX_CPU_BASE
,
7487 RX_CPU_SCRATCH_BASE
, RX_CPU_SCRATCH_SIZE
,
7492 err
= tg3_load_firmware_cpu(tp
, TX_CPU_BASE
,
7493 TX_CPU_SCRATCH_BASE
, TX_CPU_SCRATCH_SIZE
,
7498 /* Now startup only the RX cpu. */
7499 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
7500 tw32_f(RX_CPU_BASE
+ CPU_PC
, info
.fw_base
);
7502 for (i
= 0; i
< 5; i
++) {
7503 if (tr32(RX_CPU_BASE
+ CPU_PC
) == info
.fw_base
)
7505 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
7506 tw32(RX_CPU_BASE
+ CPU_MODE
, CPU_MODE_HALT
);
7507 tw32_f(RX_CPU_BASE
+ CPU_PC
, info
.fw_base
);
7511 netdev_err(tp
->dev
, "%s fails to set RX CPU PC, is %08x "
7512 "should be %08x\n", __func__
,
7513 tr32(RX_CPU_BASE
+ CPU_PC
), info
.fw_base
);
7516 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
7517 tw32_f(RX_CPU_BASE
+ CPU_MODE
, 0x00000000);
7522 /* tp->lock is held. */
7523 static int tg3_load_tso_firmware(struct tg3
*tp
)
7525 struct fw_info info
;
7526 const __be32
*fw_data
;
7527 unsigned long cpu_base
, cpu_scratch_base
, cpu_scratch_size
;
7530 if (tg3_flag(tp
, HW_TSO_1
) ||
7531 tg3_flag(tp
, HW_TSO_2
) ||
7532 tg3_flag(tp
, HW_TSO_3
))
7535 fw_data
= (void *)tp
->fw
->data
;
7537 /* Firmware blob starts with version numbers, followed by
7538 start address and length. We are setting complete length.
7539 length = end_address_of_bss - start_address_of_text.
7540 Remainder is the blob to be loaded contiguously
7541 from start address. */
7543 info
.fw_base
= be32_to_cpu(fw_data
[1]);
7544 cpu_scratch_size
= tp
->fw_len
;
7545 info
.fw_len
= tp
->fw
->size
- 12;
7546 info
.fw_data
= &fw_data
[3];
7548 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
7549 cpu_base
= RX_CPU_BASE
;
7550 cpu_scratch_base
= NIC_SRAM_MBUF_POOL_BASE5705
;
7552 cpu_base
= TX_CPU_BASE
;
7553 cpu_scratch_base
= TX_CPU_SCRATCH_BASE
;
7554 cpu_scratch_size
= TX_CPU_SCRATCH_SIZE
;
7557 err
= tg3_load_firmware_cpu(tp
, cpu_base
,
7558 cpu_scratch_base
, cpu_scratch_size
,
7563 /* Now startup the cpu. */
7564 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
7565 tw32_f(cpu_base
+ CPU_PC
, info
.fw_base
);
7567 for (i
= 0; i
< 5; i
++) {
7568 if (tr32(cpu_base
+ CPU_PC
) == info
.fw_base
)
7570 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
7571 tw32(cpu_base
+ CPU_MODE
, CPU_MODE_HALT
);
7572 tw32_f(cpu_base
+ CPU_PC
, info
.fw_base
);
7577 "%s fails to set CPU PC, is %08x should be %08x\n",
7578 __func__
, tr32(cpu_base
+ CPU_PC
), info
.fw_base
);
7581 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
7582 tw32_f(cpu_base
+ CPU_MODE
, 0x00000000);
7587 static int tg3_set_mac_addr(struct net_device
*dev
, void *p
)
7589 struct tg3
*tp
= netdev_priv(dev
);
7590 struct sockaddr
*addr
= p
;
7591 int err
= 0, skip_mac_1
= 0;
7593 if (!is_valid_ether_addr(addr
->sa_data
))
7596 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
7598 if (!netif_running(dev
))
7601 if (tg3_flag(tp
, ENABLE_ASF
)) {
7602 u32 addr0_high
, addr0_low
, addr1_high
, addr1_low
;
7604 addr0_high
= tr32(MAC_ADDR_0_HIGH
);
7605 addr0_low
= tr32(MAC_ADDR_0_LOW
);
7606 addr1_high
= tr32(MAC_ADDR_1_HIGH
);
7607 addr1_low
= tr32(MAC_ADDR_1_LOW
);
7609 /* Skip MAC addr 1 if ASF is using it. */
7610 if ((addr0_high
!= addr1_high
|| addr0_low
!= addr1_low
) &&
7611 !(addr1_high
== 0 && addr1_low
== 0))
7614 spin_lock_bh(&tp
->lock
);
7615 __tg3_set_mac_addr(tp
, skip_mac_1
);
7616 spin_unlock_bh(&tp
->lock
);
7621 /* tp->lock is held. */
7622 static void tg3_set_bdinfo(struct tg3
*tp
, u32 bdinfo_addr
,
7623 dma_addr_t mapping
, u32 maxlen_flags
,
7627 (bdinfo_addr
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
),
7628 ((u64
) mapping
>> 32));
7630 (bdinfo_addr
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
),
7631 ((u64
) mapping
& 0xffffffff));
7633 (bdinfo_addr
+ TG3_BDINFO_MAXLEN_FLAGS
),
7636 if (!tg3_flag(tp
, 5705_PLUS
))
7638 (bdinfo_addr
+ TG3_BDINFO_NIC_ADDR
),
7642 static void __tg3_set_rx_mode(struct net_device
*);
7643 static void __tg3_set_coalesce(struct tg3
*tp
, struct ethtool_coalesce
*ec
)
7647 if (!tg3_flag(tp
, ENABLE_TSS
)) {
7648 tw32(HOSTCC_TXCOL_TICKS
, ec
->tx_coalesce_usecs
);
7649 tw32(HOSTCC_TXMAX_FRAMES
, ec
->tx_max_coalesced_frames
);
7650 tw32(HOSTCC_TXCOAL_MAXF_INT
, ec
->tx_max_coalesced_frames_irq
);
7652 tw32(HOSTCC_TXCOL_TICKS
, 0);
7653 tw32(HOSTCC_TXMAX_FRAMES
, 0);
7654 tw32(HOSTCC_TXCOAL_MAXF_INT
, 0);
7657 if (!tg3_flag(tp
, ENABLE_RSS
)) {
7658 tw32(HOSTCC_RXCOL_TICKS
, ec
->rx_coalesce_usecs
);
7659 tw32(HOSTCC_RXMAX_FRAMES
, ec
->rx_max_coalesced_frames
);
7660 tw32(HOSTCC_RXCOAL_MAXF_INT
, ec
->rx_max_coalesced_frames_irq
);
7662 tw32(HOSTCC_RXCOL_TICKS
, 0);
7663 tw32(HOSTCC_RXMAX_FRAMES
, 0);
7664 tw32(HOSTCC_RXCOAL_MAXF_INT
, 0);
7667 if (!tg3_flag(tp
, 5705_PLUS
)) {
7668 u32 val
= ec
->stats_block_coalesce_usecs
;
7670 tw32(HOSTCC_RXCOAL_TICK_INT
, ec
->rx_coalesce_usecs_irq
);
7671 tw32(HOSTCC_TXCOAL_TICK_INT
, ec
->tx_coalesce_usecs_irq
);
7673 if (!netif_carrier_ok(tp
->dev
))
7676 tw32(HOSTCC_STAT_COAL_TICKS
, val
);
7679 for (i
= 0; i
< tp
->irq_cnt
- 1; i
++) {
7682 reg
= HOSTCC_RXCOL_TICKS_VEC1
+ i
* 0x18;
7683 tw32(reg
, ec
->rx_coalesce_usecs
);
7684 reg
= HOSTCC_RXMAX_FRAMES_VEC1
+ i
* 0x18;
7685 tw32(reg
, ec
->rx_max_coalesced_frames
);
7686 reg
= HOSTCC_RXCOAL_MAXF_INT_VEC1
+ i
* 0x18;
7687 tw32(reg
, ec
->rx_max_coalesced_frames_irq
);
7689 if (tg3_flag(tp
, ENABLE_TSS
)) {
7690 reg
= HOSTCC_TXCOL_TICKS_VEC1
+ i
* 0x18;
7691 tw32(reg
, ec
->tx_coalesce_usecs
);
7692 reg
= HOSTCC_TXMAX_FRAMES_VEC1
+ i
* 0x18;
7693 tw32(reg
, ec
->tx_max_coalesced_frames
);
7694 reg
= HOSTCC_TXCOAL_MAXF_INT_VEC1
+ i
* 0x18;
7695 tw32(reg
, ec
->tx_max_coalesced_frames_irq
);
7699 for (; i
< tp
->irq_max
- 1; i
++) {
7700 tw32(HOSTCC_RXCOL_TICKS_VEC1
+ i
* 0x18, 0);
7701 tw32(HOSTCC_RXMAX_FRAMES_VEC1
+ i
* 0x18, 0);
7702 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1
+ i
* 0x18, 0);
7704 if (tg3_flag(tp
, ENABLE_TSS
)) {
7705 tw32(HOSTCC_TXCOL_TICKS_VEC1
+ i
* 0x18, 0);
7706 tw32(HOSTCC_TXMAX_FRAMES_VEC1
+ i
* 0x18, 0);
7707 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1
+ i
* 0x18, 0);
7712 /* tp->lock is held. */
7713 static void tg3_rings_reset(struct tg3
*tp
)
7716 u32 stblk
, txrcb
, rxrcb
, limit
;
7717 struct tg3_napi
*tnapi
= &tp
->napi
[0];
7719 /* Disable all transmit rings but the first. */
7720 if (!tg3_flag(tp
, 5705_PLUS
))
7721 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 16;
7722 else if (tg3_flag(tp
, 5717_PLUS
))
7723 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 4;
7724 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
7725 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 2;
7727 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
;
7729 for (txrcb
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
;
7730 txrcb
< limit
; txrcb
+= TG3_BDINFO_SIZE
)
7731 tg3_write_mem(tp
, txrcb
+ TG3_BDINFO_MAXLEN_FLAGS
,
7732 BDINFO_FLAGS_DISABLED
);
7735 /* Disable all receive return rings but the first. */
7736 if (tg3_flag(tp
, 5717_PLUS
))
7737 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 17;
7738 else if (!tg3_flag(tp
, 5705_PLUS
))
7739 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 16;
7740 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
7741 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
7742 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 4;
7744 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
;
7746 for (rxrcb
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
;
7747 rxrcb
< limit
; rxrcb
+= TG3_BDINFO_SIZE
)
7748 tg3_write_mem(tp
, rxrcb
+ TG3_BDINFO_MAXLEN_FLAGS
,
7749 BDINFO_FLAGS_DISABLED
);
7751 /* Disable interrupts */
7752 tw32_mailbox_f(tp
->napi
[0].int_mbox
, 1);
7754 /* Zero mailbox registers. */
7755 if (tg3_flag(tp
, SUPPORT_MSIX
)) {
7756 for (i
= 1; i
< tp
->irq_max
; i
++) {
7757 tp
->napi
[i
].tx_prod
= 0;
7758 tp
->napi
[i
].tx_cons
= 0;
7759 if (tg3_flag(tp
, ENABLE_TSS
))
7760 tw32_mailbox(tp
->napi
[i
].prodmbox
, 0);
7761 tw32_rx_mbox(tp
->napi
[i
].consmbox
, 0);
7762 tw32_mailbox_f(tp
->napi
[i
].int_mbox
, 1);
7764 if (!tg3_flag(tp
, ENABLE_TSS
))
7765 tw32_mailbox(tp
->napi
[0].prodmbox
, 0);
7767 tp
->napi
[0].tx_prod
= 0;
7768 tp
->napi
[0].tx_cons
= 0;
7769 tw32_mailbox(tp
->napi
[0].prodmbox
, 0);
7770 tw32_rx_mbox(tp
->napi
[0].consmbox
, 0);
7773 /* Make sure the NIC-based send BD rings are disabled. */
7774 if (!tg3_flag(tp
, 5705_PLUS
)) {
7775 u32 mbox
= MAILBOX_SNDNIC_PROD_IDX_0
+ TG3_64BIT_REG_LOW
;
7776 for (i
= 0; i
< 16; i
++)
7777 tw32_tx_mbox(mbox
+ i
* 8, 0);
7780 txrcb
= NIC_SRAM_SEND_RCB
;
7781 rxrcb
= NIC_SRAM_RCV_RET_RCB
;
7783 /* Clear status block in ram. */
7784 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
7786 /* Set status block DMA address */
7787 tw32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
7788 ((u64
) tnapi
->status_mapping
>> 32));
7789 tw32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
7790 ((u64
) tnapi
->status_mapping
& 0xffffffff));
7792 if (tnapi
->tx_ring
) {
7793 tg3_set_bdinfo(tp
, txrcb
, tnapi
->tx_desc_mapping
,
7794 (TG3_TX_RING_SIZE
<<
7795 BDINFO_FLAGS_MAXLEN_SHIFT
),
7796 NIC_SRAM_TX_BUFFER_DESC
);
7797 txrcb
+= TG3_BDINFO_SIZE
;
7800 if (tnapi
->rx_rcb
) {
7801 tg3_set_bdinfo(tp
, rxrcb
, tnapi
->rx_rcb_mapping
,
7802 (tp
->rx_ret_ring_mask
+ 1) <<
7803 BDINFO_FLAGS_MAXLEN_SHIFT
, 0);
7804 rxrcb
+= TG3_BDINFO_SIZE
;
7807 stblk
= HOSTCC_STATBLCK_RING1
;
7809 for (i
= 1, tnapi
++; i
< tp
->irq_cnt
; i
++, tnapi
++) {
7810 u64 mapping
= (u64
)tnapi
->status_mapping
;
7811 tw32(stblk
+ TG3_64BIT_REG_HIGH
, mapping
>> 32);
7812 tw32(stblk
+ TG3_64BIT_REG_LOW
, mapping
& 0xffffffff);
7814 /* Clear status block in ram. */
7815 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
7817 if (tnapi
->tx_ring
) {
7818 tg3_set_bdinfo(tp
, txrcb
, tnapi
->tx_desc_mapping
,
7819 (TG3_TX_RING_SIZE
<<
7820 BDINFO_FLAGS_MAXLEN_SHIFT
),
7821 NIC_SRAM_TX_BUFFER_DESC
);
7822 txrcb
+= TG3_BDINFO_SIZE
;
7825 tg3_set_bdinfo(tp
, rxrcb
, tnapi
->rx_rcb_mapping
,
7826 ((tp
->rx_ret_ring_mask
+ 1) <<
7827 BDINFO_FLAGS_MAXLEN_SHIFT
), 0);
7830 rxrcb
+= TG3_BDINFO_SIZE
;
7834 static void tg3_setup_rxbd_thresholds(struct tg3
*tp
)
7836 u32 val
, bdcache_maxcnt
, host_rep_thresh
, nic_rep_thresh
;
7838 if (!tg3_flag(tp
, 5750_PLUS
) ||
7839 tg3_flag(tp
, 5780_CLASS
) ||
7840 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
||
7841 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
)
7842 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5700
;
7843 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
7844 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
)
7845 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5755
;
7847 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5906
;
7849 nic_rep_thresh
= min(bdcache_maxcnt
/ 2, tp
->rx_std_max_post
);
7850 host_rep_thresh
= max_t(u32
, tp
->rx_pending
/ 8, 1);
7852 val
= min(nic_rep_thresh
, host_rep_thresh
);
7853 tw32(RCVBDI_STD_THRESH
, val
);
7855 if (tg3_flag(tp
, 57765_PLUS
))
7856 tw32(STD_REPLENISH_LWM
, bdcache_maxcnt
);
7858 if (!tg3_flag(tp
, JUMBO_CAPABLE
) || tg3_flag(tp
, 5780_CLASS
))
7861 if (!tg3_flag(tp
, 5705_PLUS
))
7862 bdcache_maxcnt
= TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700
;
7864 bdcache_maxcnt
= TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717
;
7866 host_rep_thresh
= max_t(u32
, tp
->rx_jumbo_pending
/ 8, 1);
7868 val
= min(bdcache_maxcnt
/ 2, host_rep_thresh
);
7869 tw32(RCVBDI_JUMBO_THRESH
, val
);
7871 if (tg3_flag(tp
, 57765_PLUS
))
7872 tw32(JMB_REPLENISH_LWM
, bdcache_maxcnt
);
7875 /* tp->lock is held. */
7876 static int tg3_reset_hw(struct tg3
*tp
, int reset_phy
)
7878 u32 val
, rdmac_mode
;
7880 struct tg3_rx_prodring_set
*tpr
= &tp
->napi
[0].prodring
;
7882 tg3_disable_ints(tp
);
7886 tg3_write_sig_pre_reset(tp
, RESET_KIND_INIT
);
7888 if (tg3_flag(tp
, INIT_COMPLETE
))
7889 tg3_abort_hw(tp
, 1);
7891 /* Enable MAC control of LPI */
7892 if (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
) {
7893 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL
,
7894 TG3_CPMU_EEE_LNKIDL_PCIE_NL0
|
7895 TG3_CPMU_EEE_LNKIDL_UART_IDL
);
7897 tw32_f(TG3_CPMU_EEE_CTRL
,
7898 TG3_CPMU_EEE_CTRL_EXIT_20_1_US
);
7900 val
= TG3_CPMU_EEEMD_ERLY_L1_XIT_DET
|
7901 TG3_CPMU_EEEMD_LPI_IN_TX
|
7902 TG3_CPMU_EEEMD_LPI_IN_RX
|
7903 TG3_CPMU_EEEMD_EEE_ENABLE
;
7905 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5717
)
7906 val
|= TG3_CPMU_EEEMD_SND_IDX_DET_EN
;
7908 if (tg3_flag(tp
, ENABLE_APE
))
7909 val
|= TG3_CPMU_EEEMD_APE_TX_DET_EN
;
7911 tw32_f(TG3_CPMU_EEE_MODE
, val
);
7913 tw32_f(TG3_CPMU_EEE_DBTMR1
,
7914 TG3_CPMU_DBTMR1_PCIEXIT_2047US
|
7915 TG3_CPMU_DBTMR1_LNKIDLE_2047US
);
7917 tw32_f(TG3_CPMU_EEE_DBTMR2
,
7918 TG3_CPMU_DBTMR2_APE_TX_2047US
|
7919 TG3_CPMU_DBTMR2_TXIDXEQ_2047US
);
7925 err
= tg3_chip_reset(tp
);
7929 tg3_write_sig_legacy(tp
, RESET_KIND_INIT
);
7931 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5784_AX
) {
7932 val
= tr32(TG3_CPMU_CTRL
);
7933 val
&= ~(CPMU_CTRL_LINK_AWARE_MODE
| CPMU_CTRL_LINK_IDLE_MODE
);
7934 tw32(TG3_CPMU_CTRL
, val
);
7936 val
= tr32(TG3_CPMU_LSPD_10MB_CLK
);
7937 val
&= ~CPMU_LSPD_10MB_MACCLK_MASK
;
7938 val
|= CPMU_LSPD_10MB_MACCLK_6_25
;
7939 tw32(TG3_CPMU_LSPD_10MB_CLK
, val
);
7941 val
= tr32(TG3_CPMU_LNK_AWARE_PWRMD
);
7942 val
&= ~CPMU_LNK_AWARE_MACCLK_MASK
;
7943 val
|= CPMU_LNK_AWARE_MACCLK_6_25
;
7944 tw32(TG3_CPMU_LNK_AWARE_PWRMD
, val
);
7946 val
= tr32(TG3_CPMU_HST_ACC
);
7947 val
&= ~CPMU_HST_ACC_MACCLK_MASK
;
7948 val
|= CPMU_HST_ACC_MACCLK_6_25
;
7949 tw32(TG3_CPMU_HST_ACC
, val
);
7952 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
) {
7953 val
= tr32(PCIE_PWR_MGMT_THRESH
) & ~PCIE_PWR_MGMT_L1_THRESH_MSK
;
7954 val
|= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN
|
7955 PCIE_PWR_MGMT_L1_THRESH_4MS
;
7956 tw32(PCIE_PWR_MGMT_THRESH
, val
);
7958 val
= tr32(TG3_PCIE_EIDLE_DELAY
) & ~TG3_PCIE_EIDLE_DELAY_MASK
;
7959 tw32(TG3_PCIE_EIDLE_DELAY
, val
| TG3_PCIE_EIDLE_DELAY_13_CLKS
);
7961 tw32(TG3_CORR_ERR_STAT
, TG3_CORR_ERR_STAT_CLEAR
);
7963 val
= tr32(TG3_PCIE_LNKCTL
) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN
;
7964 tw32(TG3_PCIE_LNKCTL
, val
| TG3_PCIE_LNKCTL_L1_PLL_PD_DIS
);
7967 if (tg3_flag(tp
, L1PLLPD_EN
)) {
7968 u32 grc_mode
= tr32(GRC_MODE
);
7970 /* Access the lower 1K of PL PCIE block registers. */
7971 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
7972 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_PL_SEL
);
7974 val
= tr32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL1
);
7975 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL1
,
7976 val
| TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN
);
7978 tw32(GRC_MODE
, grc_mode
);
7981 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
) {
7982 if (tp
->pci_chip_rev_id
== CHIPREV_ID_57765_A0
) {
7983 u32 grc_mode
= tr32(GRC_MODE
);
7985 /* Access the lower 1K of PL PCIE block registers. */
7986 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
7987 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_PL_SEL
);
7989 val
= tr32(TG3_PCIE_TLDLPL_PORT
+
7990 TG3_PCIE_PL_LO_PHYCTL5
);
7991 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL5
,
7992 val
| TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ
);
7994 tw32(GRC_MODE
, grc_mode
);
7997 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_57765_AX
) {
7998 u32 grc_mode
= tr32(GRC_MODE
);
8000 /* Access the lower 1K of DL PCIE block registers. */
8001 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
8002 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_DL_SEL
);
8004 val
= tr32(TG3_PCIE_TLDLPL_PORT
+
8005 TG3_PCIE_DL_LO_FTSMAX
);
8006 val
&= ~TG3_PCIE_DL_LO_FTSMAX_MSK
;
8007 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_DL_LO_FTSMAX
,
8008 val
| TG3_PCIE_DL_LO_FTSMAX_VAL
);
8010 tw32(GRC_MODE
, grc_mode
);
8013 val
= tr32(TG3_CPMU_LSPD_10MB_CLK
);
8014 val
&= ~CPMU_LSPD_10MB_MACCLK_MASK
;
8015 val
|= CPMU_LSPD_10MB_MACCLK_6_25
;
8016 tw32(TG3_CPMU_LSPD_10MB_CLK
, val
);
8019 /* This works around an issue with Athlon chipsets on
8020 * B3 tigon3 silicon. This bit has no effect on any
8021 * other revision. But do not set this on PCI Express
8022 * chips and don't even touch the clocks if the CPMU is present.
8024 if (!tg3_flag(tp
, CPMU_PRESENT
)) {
8025 if (!tg3_flag(tp
, PCI_EXPRESS
))
8026 tp
->pci_clock_ctrl
|= CLOCK_CTRL_DELAY_PCI_GRANT
;
8027 tw32_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
);
8030 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5704_A0
&&
8031 tg3_flag(tp
, PCIX_MODE
)) {
8032 val
= tr32(TG3PCI_PCISTATE
);
8033 val
|= PCISTATE_RETRY_SAME_DMA
;
8034 tw32(TG3PCI_PCISTATE
, val
);
8037 if (tg3_flag(tp
, ENABLE_APE
)) {
8038 /* Allow reads and writes to the
8039 * APE register and memory space.
8041 val
= tr32(TG3PCI_PCISTATE
);
8042 val
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
8043 PCISTATE_ALLOW_APE_SHMEM_WR
|
8044 PCISTATE_ALLOW_APE_PSPACE_WR
;
8045 tw32(TG3PCI_PCISTATE
, val
);
8048 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5704_BX
) {
8049 /* Enable some hw fixes. */
8050 val
= tr32(TG3PCI_MSI_DATA
);
8051 val
|= (1 << 26) | (1 << 28) | (1 << 29);
8052 tw32(TG3PCI_MSI_DATA
, val
);
8055 /* Descriptor ring init may make accesses to the
8056 * NIC SRAM area to setup the TX descriptors, so we
8057 * can only do this after the hardware has been
8058 * successfully reset.
8060 err
= tg3_init_rings(tp
);
8064 if (tg3_flag(tp
, 57765_PLUS
)) {
8065 val
= tr32(TG3PCI_DMA_RW_CTRL
) &
8066 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT
;
8067 if (tp
->pci_chip_rev_id
== CHIPREV_ID_57765_A0
)
8068 val
&= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK
;
8069 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_57765
&&
8070 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5717
)
8071 val
|= DMA_RWCTRL_TAGGED_STAT_WA
;
8072 tw32(TG3PCI_DMA_RW_CTRL
, val
| tp
->dma_rwctrl
);
8073 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5784
&&
8074 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5761
) {
8075 /* This value is determined during the probe time DMA
8076 * engine test, tg3_test_dma.
8078 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
8081 tp
->grc_mode
&= ~(GRC_MODE_HOST_SENDBDS
|
8082 GRC_MODE_4X_NIC_SEND_RINGS
|
8083 GRC_MODE_NO_TX_PHDR_CSUM
|
8084 GRC_MODE_NO_RX_PHDR_CSUM
);
8085 tp
->grc_mode
|= GRC_MODE_HOST_SENDBDS
;
8087 /* Pseudo-header checksum is done by hardware logic and not
8088 * the offload processers, so make the chip do the pseudo-
8089 * header checksums on receive. For transmit it is more
8090 * convenient to do the pseudo-header checksum in software
8091 * as Linux does that on transmit for us in all cases.
8093 tp
->grc_mode
|= GRC_MODE_NO_TX_PHDR_CSUM
;
8097 (GRC_MODE_IRQ_ON_MAC_ATTN
| GRC_MODE_HOST_STACKUP
));
8099 /* Setup the timer prescalar register. Clock is always 66Mhz. */
8100 val
= tr32(GRC_MISC_CFG
);
8102 val
|= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT
);
8103 tw32(GRC_MISC_CFG
, val
);
8105 /* Initialize MBUF/DESC pool. */
8106 if (tg3_flag(tp
, 5750_PLUS
)) {
8108 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5705
) {
8109 tw32(BUFMGR_MB_POOL_ADDR
, NIC_SRAM_MBUF_POOL_BASE
);
8110 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
)
8111 tw32(BUFMGR_MB_POOL_SIZE
, NIC_SRAM_MBUF_POOL_SIZE64
);
8113 tw32(BUFMGR_MB_POOL_SIZE
, NIC_SRAM_MBUF_POOL_SIZE96
);
8114 tw32(BUFMGR_DMA_DESC_POOL_ADDR
, NIC_SRAM_DMA_DESC_POOL_BASE
);
8115 tw32(BUFMGR_DMA_DESC_POOL_SIZE
, NIC_SRAM_DMA_DESC_POOL_SIZE
);
8116 } else if (tg3_flag(tp
, TSO_CAPABLE
)) {
8119 fw_len
= tp
->fw_len
;
8120 fw_len
= (fw_len
+ (0x80 - 1)) & ~(0x80 - 1);
8121 tw32(BUFMGR_MB_POOL_ADDR
,
8122 NIC_SRAM_MBUF_POOL_BASE5705
+ fw_len
);
8123 tw32(BUFMGR_MB_POOL_SIZE
,
8124 NIC_SRAM_MBUF_POOL_SIZE5705
- fw_len
- 0xa00);
8127 if (tp
->dev
->mtu
<= ETH_DATA_LEN
) {
8128 tw32(BUFMGR_MB_RDMA_LOW_WATER
,
8129 tp
->bufmgr_config
.mbuf_read_dma_low_water
);
8130 tw32(BUFMGR_MB_MACRX_LOW_WATER
,
8131 tp
->bufmgr_config
.mbuf_mac_rx_low_water
);
8132 tw32(BUFMGR_MB_HIGH_WATER
,
8133 tp
->bufmgr_config
.mbuf_high_water
);
8135 tw32(BUFMGR_MB_RDMA_LOW_WATER
,
8136 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
);
8137 tw32(BUFMGR_MB_MACRX_LOW_WATER
,
8138 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
);
8139 tw32(BUFMGR_MB_HIGH_WATER
,
8140 tp
->bufmgr_config
.mbuf_high_water_jumbo
);
8142 tw32(BUFMGR_DMA_LOW_WATER
,
8143 tp
->bufmgr_config
.dma_low_water
);
8144 tw32(BUFMGR_DMA_HIGH_WATER
,
8145 tp
->bufmgr_config
.dma_high_water
);
8147 val
= BUFMGR_MODE_ENABLE
| BUFMGR_MODE_ATTN_ENABLE
;
8148 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
)
8149 val
|= BUFMGR_MODE_NO_TX_UNDERRUN
;
8150 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
8151 tp
->pci_chip_rev_id
== CHIPREV_ID_5719_A0
||
8152 tp
->pci_chip_rev_id
== CHIPREV_ID_5720_A0
)
8153 val
|= BUFMGR_MODE_MBLOW_ATTN_ENAB
;
8154 tw32(BUFMGR_MODE
, val
);
8155 for (i
= 0; i
< 2000; i
++) {
8156 if (tr32(BUFMGR_MODE
) & BUFMGR_MODE_ENABLE
)
8161 netdev_err(tp
->dev
, "%s cannot enable BUFMGR\n", __func__
);
8165 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5906_A1
)
8166 tw32(ISO_PKT_TX
, (tr32(ISO_PKT_TX
) & ~0x3) | 0x2);
8168 tg3_setup_rxbd_thresholds(tp
);
8170 /* Initialize TG3_BDINFO's at:
8171 * RCVDBDI_STD_BD: standard eth size rx ring
8172 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
8173 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
8176 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
8177 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
8178 * ring attribute flags
8179 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
8181 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8182 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8184 * The size of each ring is fixed in the firmware, but the location is
8187 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
8188 ((u64
) tpr
->rx_std_mapping
>> 32));
8189 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
8190 ((u64
) tpr
->rx_std_mapping
& 0xffffffff));
8191 if (!tg3_flag(tp
, 5717_PLUS
))
8192 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_NIC_ADDR
,
8193 NIC_SRAM_RX_BUFFER_DESC
);
8195 /* Disable the mini ring */
8196 if (!tg3_flag(tp
, 5705_PLUS
))
8197 tw32(RCVDBDI_MINI_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
8198 BDINFO_FLAGS_DISABLED
);
8200 /* Program the jumbo buffer descriptor ring control
8201 * blocks on those devices that have them.
8203 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
8204 (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
))) {
8206 if (tg3_flag(tp
, JUMBO_RING_ENABLE
)) {
8207 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
8208 ((u64
) tpr
->rx_jmb_mapping
>> 32));
8209 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
8210 ((u64
) tpr
->rx_jmb_mapping
& 0xffffffff));
8211 val
= TG3_RX_JMB_RING_SIZE(tp
) <<
8212 BDINFO_FLAGS_MAXLEN_SHIFT
;
8213 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
8214 val
| BDINFO_FLAGS_USE_EXT_RECV
);
8215 if (!tg3_flag(tp
, USE_JUMBO_BDFLAG
) ||
8216 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
8217 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_NIC_ADDR
,
8218 NIC_SRAM_RX_JUMBO_BUFFER_DESC
);
8220 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
8221 BDINFO_FLAGS_DISABLED
);
8224 if (tg3_flag(tp
, 57765_PLUS
)) {
8225 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
8226 val
= TG3_RX_STD_MAX_SIZE_5700
;
8228 val
= TG3_RX_STD_MAX_SIZE_5717
;
8229 val
<<= BDINFO_FLAGS_MAXLEN_SHIFT
;
8230 val
|= (TG3_RX_STD_DMA_SZ
<< 2);
8232 val
= TG3_RX_STD_DMA_SZ
<< BDINFO_FLAGS_MAXLEN_SHIFT
;
8234 val
= TG3_RX_STD_MAX_SIZE_5700
<< BDINFO_FLAGS_MAXLEN_SHIFT
;
8236 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_MAXLEN_FLAGS
, val
);
8238 tpr
->rx_std_prod_idx
= tp
->rx_pending
;
8239 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
, tpr
->rx_std_prod_idx
);
8241 tpr
->rx_jmb_prod_idx
=
8242 tg3_flag(tp
, JUMBO_RING_ENABLE
) ? tp
->rx_jumbo_pending
: 0;
8243 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
, tpr
->rx_jmb_prod_idx
);
8245 tg3_rings_reset(tp
);
8247 /* Initialize MAC address and backoff seed. */
8248 __tg3_set_mac_addr(tp
, 0);
8250 /* MTU + ethernet header + FCS + optional VLAN tag */
8251 tw32(MAC_RX_MTU_SIZE
,
8252 tp
->dev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
+ VLAN_HLEN
);
8254 /* The slot time is changed by tg3_setup_phy if we
8255 * run at gigabit with half duplex.
8257 val
= (2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
8258 (6 << TX_LENGTHS_IPG_SHIFT
) |
8259 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
);
8261 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
8262 val
|= tr32(MAC_TX_LENGTHS
) &
8263 (TX_LENGTHS_JMB_FRM_LEN_MSK
|
8264 TX_LENGTHS_CNT_DWN_VAL_MSK
);
8266 tw32(MAC_TX_LENGTHS
, val
);
8268 /* Receive rules. */
8269 tw32(MAC_RCV_RULE_CFG
, RCV_RULE_CFG_DEFAULT_CLASS
);
8270 tw32(RCVLPC_CONFIG
, 0x0181);
8272 /* Calculate RDMAC_MODE setting early, we need it to determine
8273 * the RCVLPC_STATE_ENABLE mask.
8275 rdmac_mode
= (RDMAC_MODE_ENABLE
| RDMAC_MODE_TGTABORT_ENAB
|
8276 RDMAC_MODE_MSTABORT_ENAB
| RDMAC_MODE_PARITYERR_ENAB
|
8277 RDMAC_MODE_ADDROFLOW_ENAB
| RDMAC_MODE_FIFOOFLOW_ENAB
|
8278 RDMAC_MODE_FIFOURUN_ENAB
| RDMAC_MODE_FIFOOREAD_ENAB
|
8279 RDMAC_MODE_LNGREAD_ENAB
);
8281 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
)
8282 rdmac_mode
|= RDMAC_MODE_MULT_DMA_RD_DIS
;
8284 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
8285 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
8286 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
)
8287 rdmac_mode
|= RDMAC_MODE_BD_SBD_CRPT_ENAB
|
8288 RDMAC_MODE_MBUF_RBD_CRPT_ENAB
|
8289 RDMAC_MODE_MBUF_SBD_CRPT_ENAB
;
8291 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
8292 tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A0
) {
8293 if (tg3_flag(tp
, TSO_CAPABLE
) &&
8294 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
8295 rdmac_mode
|= RDMAC_MODE_FIFO_SIZE_128
;
8296 } else if (!(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
) &&
8297 !tg3_flag(tp
, IS_5788
)) {
8298 rdmac_mode
|= RDMAC_MODE_FIFO_LONG_BURST
;
8302 if (tg3_flag(tp
, PCI_EXPRESS
))
8303 rdmac_mode
|= RDMAC_MODE_FIFO_LONG_BURST
;
8305 if (tg3_flag(tp
, HW_TSO_1
) ||
8306 tg3_flag(tp
, HW_TSO_2
) ||
8307 tg3_flag(tp
, HW_TSO_3
))
8308 rdmac_mode
|= RDMAC_MODE_IPV4_LSO_EN
;
8310 if (tg3_flag(tp
, 57765_PLUS
) ||
8311 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
8312 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
)
8313 rdmac_mode
|= RDMAC_MODE_IPV6_LSO_EN
;
8315 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
8316 rdmac_mode
|= tr32(RDMAC_MODE
) & RDMAC_MODE_H2BNC_VLAN_DET
;
8318 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
||
8319 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
8320 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
8321 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
||
8322 tg3_flag(tp
, 57765_PLUS
)) {
8323 val
= tr32(TG3_RDMA_RSRVCTRL_REG
);
8324 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
8325 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
8326 val
&= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK
|
8327 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK
|
8328 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK
);
8329 val
|= TG3_RDMA_RSRVCTRL_TXMRGN_320B
|
8330 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K
|
8331 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K
;
8333 tw32(TG3_RDMA_RSRVCTRL_REG
,
8334 val
| TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX
);
8337 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
8338 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
8339 val
= tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL
);
8340 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL
, val
|
8341 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K
|
8342 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K
);
8345 /* Receive/send statistics. */
8346 if (tg3_flag(tp
, 5750_PLUS
)) {
8347 val
= tr32(RCVLPC_STATS_ENABLE
);
8348 val
&= ~RCVLPC_STATSENAB_DACK_FIX
;
8349 tw32(RCVLPC_STATS_ENABLE
, val
);
8350 } else if ((rdmac_mode
& RDMAC_MODE_FIFO_SIZE_128
) &&
8351 tg3_flag(tp
, TSO_CAPABLE
)) {
8352 val
= tr32(RCVLPC_STATS_ENABLE
);
8353 val
&= ~RCVLPC_STATSENAB_LNGBRST_RFIX
;
8354 tw32(RCVLPC_STATS_ENABLE
, val
);
8356 tw32(RCVLPC_STATS_ENABLE
, 0xffffff);
8358 tw32(RCVLPC_STATSCTRL
, RCVLPC_STATSCTRL_ENABLE
);
8359 tw32(SNDDATAI_STATSENAB
, 0xffffff);
8360 tw32(SNDDATAI_STATSCTRL
,
8361 (SNDDATAI_SCTRL_ENABLE
|
8362 SNDDATAI_SCTRL_FASTUPD
));
8364 /* Setup host coalescing engine. */
8365 tw32(HOSTCC_MODE
, 0);
8366 for (i
= 0; i
< 2000; i
++) {
8367 if (!(tr32(HOSTCC_MODE
) & HOSTCC_MODE_ENABLE
))
8372 __tg3_set_coalesce(tp
, &tp
->coal
);
8374 if (!tg3_flag(tp
, 5705_PLUS
)) {
8375 /* Status/statistics block address. See tg3_timer,
8376 * the tg3_periodic_fetch_stats call there, and
8377 * tg3_get_stats to see how this works for 5705/5750 chips.
8379 tw32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
8380 ((u64
) tp
->stats_mapping
>> 32));
8381 tw32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
8382 ((u64
) tp
->stats_mapping
& 0xffffffff));
8383 tw32(HOSTCC_STATS_BLK_NIC_ADDR
, NIC_SRAM_STATS_BLK
);
8385 tw32(HOSTCC_STATUS_BLK_NIC_ADDR
, NIC_SRAM_STATUS_BLK
);
8387 /* Clear statistics and status block memory areas */
8388 for (i
= NIC_SRAM_STATS_BLK
;
8389 i
< NIC_SRAM_STATUS_BLK
+ TG3_HW_STATUS_SIZE
;
8391 tg3_write_mem(tp
, i
, 0);
8396 tw32(HOSTCC_MODE
, HOSTCC_MODE_ENABLE
| tp
->coalesce_mode
);
8398 tw32(RCVCC_MODE
, RCVCC_MODE_ENABLE
| RCVCC_MODE_ATTN_ENABLE
);
8399 tw32(RCVLPC_MODE
, RCVLPC_MODE_ENABLE
);
8400 if (!tg3_flag(tp
, 5705_PLUS
))
8401 tw32(RCVLSC_MODE
, RCVLSC_MODE_ENABLE
| RCVLSC_MODE_ATTN_ENABLE
);
8403 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) {
8404 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
8405 /* reset to prevent losing 1st rx packet intermittently */
8406 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
8410 if (tg3_flag(tp
, ENABLE_APE
))
8411 tp
->mac_mode
= MAC_MODE_APE_TX_EN
| MAC_MODE_APE_RX_EN
;
8414 tp
->mac_mode
|= MAC_MODE_TXSTAT_ENABLE
| MAC_MODE_RXSTAT_ENABLE
|
8415 MAC_MODE_TDE_ENABLE
| MAC_MODE_RDE_ENABLE
| MAC_MODE_FHDE_ENABLE
;
8416 if (!tg3_flag(tp
, 5705_PLUS
) &&
8417 !(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
8418 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
)
8419 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
8420 tw32_f(MAC_MODE
, tp
->mac_mode
| MAC_MODE_RXSTAT_CLEAR
| MAC_MODE_TXSTAT_CLEAR
);
8423 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8424 * If TG3_FLAG_IS_NIC is zero, we should read the
8425 * register to preserve the GPIO settings for LOMs. The GPIOs,
8426 * whether used as inputs or outputs, are set by boot code after
8429 if (!tg3_flag(tp
, IS_NIC
)) {
8432 gpio_mask
= GRC_LCLCTRL_GPIO_OE0
| GRC_LCLCTRL_GPIO_OE1
|
8433 GRC_LCLCTRL_GPIO_OE2
| GRC_LCLCTRL_GPIO_OUTPUT0
|
8434 GRC_LCLCTRL_GPIO_OUTPUT1
| GRC_LCLCTRL_GPIO_OUTPUT2
;
8436 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
)
8437 gpio_mask
|= GRC_LCLCTRL_GPIO_OE3
|
8438 GRC_LCLCTRL_GPIO_OUTPUT3
;
8440 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
)
8441 gpio_mask
|= GRC_LCLCTRL_GPIO_UART_SEL
;
8443 tp
->grc_local_ctrl
&= ~gpio_mask
;
8444 tp
->grc_local_ctrl
|= tr32(GRC_LOCAL_CTRL
) & gpio_mask
;
8446 /* GPIO1 must be driven high for eeprom write protect */
8447 if (tg3_flag(tp
, EEPROM_WRITE_PROT
))
8448 tp
->grc_local_ctrl
|= (GRC_LCLCTRL_GPIO_OE1
|
8449 GRC_LCLCTRL_GPIO_OUTPUT1
);
8451 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
8454 if (tg3_flag(tp
, USING_MSIX
) && tp
->irq_cnt
> 1) {
8455 val
= tr32(MSGINT_MODE
);
8456 val
|= MSGINT_MODE_MULTIVEC_EN
| MSGINT_MODE_ENABLE
;
8457 tw32(MSGINT_MODE
, val
);
8460 if (!tg3_flag(tp
, 5705_PLUS
)) {
8461 tw32_f(DMAC_MODE
, DMAC_MODE_ENABLE
);
8465 val
= (WDMAC_MODE_ENABLE
| WDMAC_MODE_TGTABORT_ENAB
|
8466 WDMAC_MODE_MSTABORT_ENAB
| WDMAC_MODE_PARITYERR_ENAB
|
8467 WDMAC_MODE_ADDROFLOW_ENAB
| WDMAC_MODE_FIFOOFLOW_ENAB
|
8468 WDMAC_MODE_FIFOURUN_ENAB
| WDMAC_MODE_FIFOOREAD_ENAB
|
8469 WDMAC_MODE_LNGREAD_ENAB
);
8471 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
8472 tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A0
) {
8473 if (tg3_flag(tp
, TSO_CAPABLE
) &&
8474 (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A1
||
8475 tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A2
)) {
8477 } else if (!(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
) &&
8478 !tg3_flag(tp
, IS_5788
)) {
8479 val
|= WDMAC_MODE_RX_ACCEL
;
8483 /* Enable host coalescing bug fix */
8484 if (tg3_flag(tp
, 5755_PLUS
))
8485 val
|= WDMAC_MODE_STATUS_TAG_FIX
;
8487 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
)
8488 val
|= WDMAC_MODE_BURST_ALL_DATA
;
8490 tw32_f(WDMAC_MODE
, val
);
8493 if (tg3_flag(tp
, PCIX_MODE
)) {
8496 pci_read_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
8498 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
) {
8499 pcix_cmd
&= ~PCI_X_CMD_MAX_READ
;
8500 pcix_cmd
|= PCI_X_CMD_READ_2K
;
8501 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
8502 pcix_cmd
&= ~(PCI_X_CMD_MAX_SPLIT
| PCI_X_CMD_MAX_READ
);
8503 pcix_cmd
|= PCI_X_CMD_READ_2K
;
8505 pci_write_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
8509 tw32_f(RDMAC_MODE
, rdmac_mode
);
8512 tw32(RCVDCC_MODE
, RCVDCC_MODE_ENABLE
| RCVDCC_MODE_ATTN_ENABLE
);
8513 if (!tg3_flag(tp
, 5705_PLUS
))
8514 tw32(MBFREE_MODE
, MBFREE_MODE_ENABLE
);
8516 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
8518 SNDDATAC_MODE_ENABLE
| SNDDATAC_MODE_CDELAY
);
8520 tw32(SNDDATAC_MODE
, SNDDATAC_MODE_ENABLE
);
8522 tw32(SNDBDC_MODE
, SNDBDC_MODE_ENABLE
| SNDBDC_MODE_ATTN_ENABLE
);
8523 tw32(RCVBDI_MODE
, RCVBDI_MODE_ENABLE
| RCVBDI_MODE_RCB_ATTN_ENAB
);
8524 val
= RCVDBDI_MODE_ENABLE
| RCVDBDI_MODE_INV_RING_SZ
;
8525 if (tg3_flag(tp
, LRG_PROD_RING_CAP
))
8526 val
|= RCVDBDI_MODE_LRG_RING_SZ
;
8527 tw32(RCVDBDI_MODE
, val
);
8528 tw32(SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
);
8529 if (tg3_flag(tp
, HW_TSO_1
) ||
8530 tg3_flag(tp
, HW_TSO_2
) ||
8531 tg3_flag(tp
, HW_TSO_3
))
8532 tw32(SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
| 0x8);
8533 val
= SNDBDI_MODE_ENABLE
| SNDBDI_MODE_ATTN_ENABLE
;
8534 if (tg3_flag(tp
, ENABLE_TSS
))
8535 val
|= SNDBDI_MODE_MULTI_TXQ_EN
;
8536 tw32(SNDBDI_MODE
, val
);
8537 tw32(SNDBDS_MODE
, SNDBDS_MODE_ENABLE
| SNDBDS_MODE_ATTN_ENABLE
);
8539 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
) {
8540 err
= tg3_load_5701_a0_firmware_fix(tp
);
8545 if (tg3_flag(tp
, TSO_CAPABLE
)) {
8546 err
= tg3_load_tso_firmware(tp
);
8551 tp
->tx_mode
= TX_MODE_ENABLE
;
8553 if (tg3_flag(tp
, 5755_PLUS
) ||
8554 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
8555 tp
->tx_mode
|= TX_MODE_MBUF_LOCKUP_FIX
;
8557 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
8558 val
= TX_MODE_JMB_FRM_LEN
| TX_MODE_CNT_DN_MODE
;
8559 tp
->tx_mode
&= ~val
;
8560 tp
->tx_mode
|= tr32(MAC_TX_MODE
) & val
;
8563 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
8566 if (tg3_flag(tp
, ENABLE_RSS
)) {
8567 u32 reg
= MAC_RSS_INDIR_TBL_0
;
8568 u8
*ent
= (u8
*)&val
;
8570 /* Setup the indirection table */
8571 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++) {
8572 int idx
= i
% sizeof(val
);
8574 ent
[idx
] = i
% (tp
->irq_cnt
- 1);
8575 if (idx
== sizeof(val
) - 1) {
8581 /* Setup the "secret" hash key. */
8582 tw32(MAC_RSS_HASH_KEY_0
, 0x5f865437);
8583 tw32(MAC_RSS_HASH_KEY_1
, 0xe4ac62cc);
8584 tw32(MAC_RSS_HASH_KEY_2
, 0x50103a45);
8585 tw32(MAC_RSS_HASH_KEY_3
, 0x36621985);
8586 tw32(MAC_RSS_HASH_KEY_4
, 0xbf14c0e8);
8587 tw32(MAC_RSS_HASH_KEY_5
, 0x1bc27a1e);
8588 tw32(MAC_RSS_HASH_KEY_6
, 0x84f4b556);
8589 tw32(MAC_RSS_HASH_KEY_7
, 0x094ea6fe);
8590 tw32(MAC_RSS_HASH_KEY_8
, 0x7dda01e7);
8591 tw32(MAC_RSS_HASH_KEY_9
, 0xc04d7481);
8594 tp
->rx_mode
= RX_MODE_ENABLE
;
8595 if (tg3_flag(tp
, 5755_PLUS
))
8596 tp
->rx_mode
|= RX_MODE_IPV6_CSUM_ENABLE
;
8598 if (tg3_flag(tp
, ENABLE_RSS
))
8599 tp
->rx_mode
|= RX_MODE_RSS_ENABLE
|
8600 RX_MODE_RSS_ITBL_HASH_BITS_7
|
8601 RX_MODE_RSS_IPV6_HASH_EN
|
8602 RX_MODE_RSS_TCP_IPV6_HASH_EN
|
8603 RX_MODE_RSS_IPV4_HASH_EN
|
8604 RX_MODE_RSS_TCP_IPV4_HASH_EN
;
8606 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
8609 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
8611 tw32(MAC_MI_STAT
, MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
8612 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
8613 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
8616 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
8619 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
8620 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) &&
8621 !(tp
->phy_flags
& TG3_PHYFLG_SERDES_PREEMPHASIS
)) {
8622 /* Set drive transmission level to 1.2V */
8623 /* only if the signal pre-emphasis bit is not set */
8624 val
= tr32(MAC_SERDES_CFG
);
8627 tw32(MAC_SERDES_CFG
, val
);
8629 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5703_A1
)
8630 tw32(MAC_SERDES_CFG
, 0x616000);
8633 /* Prevent chip from dropping frames when flow control
8636 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
8640 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME
, val
);
8642 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
&&
8643 (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
8644 /* Use hardware link auto-negotiation */
8645 tg3_flag_set(tp
, HW_AUTONEG
);
8648 if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
8649 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
) {
8652 tmp
= tr32(SERDES_RX_CTRL
);
8653 tw32(SERDES_RX_CTRL
, tmp
| SERDES_RX_SIG_DETECT
);
8654 tp
->grc_local_ctrl
&= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT
;
8655 tp
->grc_local_ctrl
|= GRC_LCLCTRL_USE_SIG_DETECT
;
8656 tw32(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
8659 if (!tg3_flag(tp
, USE_PHYLIB
)) {
8660 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) {
8661 tp
->phy_flags
&= ~TG3_PHYFLG_IS_LOW_POWER
;
8662 tp
->link_config
.speed
= tp
->link_config
.orig_speed
;
8663 tp
->link_config
.duplex
= tp
->link_config
.orig_duplex
;
8664 tp
->link_config
.autoneg
= tp
->link_config
.orig_autoneg
;
8667 err
= tg3_setup_phy(tp
, 0);
8671 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
8672 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
8675 /* Clear CRC stats. */
8676 if (!tg3_readphy(tp
, MII_TG3_TEST1
, &tmp
)) {
8677 tg3_writephy(tp
, MII_TG3_TEST1
,
8678 tmp
| MII_TG3_TEST1_CRC_EN
);
8679 tg3_readphy(tp
, MII_TG3_RXR_COUNTERS
, &tmp
);
8684 __tg3_set_rx_mode(tp
->dev
);
8686 /* Initialize receive rules. */
8687 tw32(MAC_RCV_RULE_0
, 0xc2000000 & RCV_RULE_DISABLE_MASK
);
8688 tw32(MAC_RCV_VALUE_0
, 0xffffffff & RCV_RULE_DISABLE_MASK
);
8689 tw32(MAC_RCV_RULE_1
, 0x86000004 & RCV_RULE_DISABLE_MASK
);
8690 tw32(MAC_RCV_VALUE_1
, 0xffffffff & RCV_RULE_DISABLE_MASK
);
8692 if (tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, 5780_CLASS
))
8696 if (tg3_flag(tp
, ENABLE_ASF
))
8700 tw32(MAC_RCV_RULE_15
, 0); tw32(MAC_RCV_VALUE_15
, 0);
8702 tw32(MAC_RCV_RULE_14
, 0); tw32(MAC_RCV_VALUE_14
, 0);
8704 tw32(MAC_RCV_RULE_13
, 0); tw32(MAC_RCV_VALUE_13
, 0);
8706 tw32(MAC_RCV_RULE_12
, 0); tw32(MAC_RCV_VALUE_12
, 0);
8708 tw32(MAC_RCV_RULE_11
, 0); tw32(MAC_RCV_VALUE_11
, 0);
8710 tw32(MAC_RCV_RULE_10
, 0); tw32(MAC_RCV_VALUE_10
, 0);
8712 tw32(MAC_RCV_RULE_9
, 0); tw32(MAC_RCV_VALUE_9
, 0);
8714 tw32(MAC_RCV_RULE_8
, 0); tw32(MAC_RCV_VALUE_8
, 0);
8716 tw32(MAC_RCV_RULE_7
, 0); tw32(MAC_RCV_VALUE_7
, 0);
8718 tw32(MAC_RCV_RULE_6
, 0); tw32(MAC_RCV_VALUE_6
, 0);
8720 tw32(MAC_RCV_RULE_5
, 0); tw32(MAC_RCV_VALUE_5
, 0);
8722 tw32(MAC_RCV_RULE_4
, 0); tw32(MAC_RCV_VALUE_4
, 0);
8724 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
8726 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
8734 if (tg3_flag(tp
, ENABLE_APE
))
8735 /* Write our heartbeat update interval to APE. */
8736 tg3_ape_write32(tp
, TG3_APE_HOST_HEARTBEAT_INT_MS
,
8737 APE_HOST_HEARTBEAT_INT_DISABLE
);
8739 tg3_write_sig_post_reset(tp
, RESET_KIND_INIT
);
8744 /* Called at device open time to get the chip ready for
8745 * packet processing. Invoked with tp->lock held.
8747 static int tg3_init_hw(struct tg3
*tp
, int reset_phy
)
8749 tg3_switch_clocks(tp
);
8751 tw32(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
8753 return tg3_reset_hw(tp
, reset_phy
);
8756 #define TG3_STAT_ADD32(PSTAT, REG) \
8757 do { u32 __val = tr32(REG); \
8758 (PSTAT)->low += __val; \
8759 if ((PSTAT)->low < __val) \
8760 (PSTAT)->high += 1; \
8763 static void tg3_periodic_fetch_stats(struct tg3
*tp
)
8765 struct tg3_hw_stats
*sp
= tp
->hw_stats
;
8767 if (!netif_carrier_ok(tp
->dev
))
8770 TG3_STAT_ADD32(&sp
->tx_octets
, MAC_TX_STATS_OCTETS
);
8771 TG3_STAT_ADD32(&sp
->tx_collisions
, MAC_TX_STATS_COLLISIONS
);
8772 TG3_STAT_ADD32(&sp
->tx_xon_sent
, MAC_TX_STATS_XON_SENT
);
8773 TG3_STAT_ADD32(&sp
->tx_xoff_sent
, MAC_TX_STATS_XOFF_SENT
);
8774 TG3_STAT_ADD32(&sp
->tx_mac_errors
, MAC_TX_STATS_MAC_ERRORS
);
8775 TG3_STAT_ADD32(&sp
->tx_single_collisions
, MAC_TX_STATS_SINGLE_COLLISIONS
);
8776 TG3_STAT_ADD32(&sp
->tx_mult_collisions
, MAC_TX_STATS_MULT_COLLISIONS
);
8777 TG3_STAT_ADD32(&sp
->tx_deferred
, MAC_TX_STATS_DEFERRED
);
8778 TG3_STAT_ADD32(&sp
->tx_excessive_collisions
, MAC_TX_STATS_EXCESSIVE_COL
);
8779 TG3_STAT_ADD32(&sp
->tx_late_collisions
, MAC_TX_STATS_LATE_COL
);
8780 TG3_STAT_ADD32(&sp
->tx_ucast_packets
, MAC_TX_STATS_UCAST
);
8781 TG3_STAT_ADD32(&sp
->tx_mcast_packets
, MAC_TX_STATS_MCAST
);
8782 TG3_STAT_ADD32(&sp
->tx_bcast_packets
, MAC_TX_STATS_BCAST
);
8784 TG3_STAT_ADD32(&sp
->rx_octets
, MAC_RX_STATS_OCTETS
);
8785 TG3_STAT_ADD32(&sp
->rx_fragments
, MAC_RX_STATS_FRAGMENTS
);
8786 TG3_STAT_ADD32(&sp
->rx_ucast_packets
, MAC_RX_STATS_UCAST
);
8787 TG3_STAT_ADD32(&sp
->rx_mcast_packets
, MAC_RX_STATS_MCAST
);
8788 TG3_STAT_ADD32(&sp
->rx_bcast_packets
, MAC_RX_STATS_BCAST
);
8789 TG3_STAT_ADD32(&sp
->rx_fcs_errors
, MAC_RX_STATS_FCS_ERRORS
);
8790 TG3_STAT_ADD32(&sp
->rx_align_errors
, MAC_RX_STATS_ALIGN_ERRORS
);
8791 TG3_STAT_ADD32(&sp
->rx_xon_pause_rcvd
, MAC_RX_STATS_XON_PAUSE_RECVD
);
8792 TG3_STAT_ADD32(&sp
->rx_xoff_pause_rcvd
, MAC_RX_STATS_XOFF_PAUSE_RECVD
);
8793 TG3_STAT_ADD32(&sp
->rx_mac_ctrl_rcvd
, MAC_RX_STATS_MAC_CTRL_RECVD
);
8794 TG3_STAT_ADD32(&sp
->rx_xoff_entered
, MAC_RX_STATS_XOFF_ENTERED
);
8795 TG3_STAT_ADD32(&sp
->rx_frame_too_long_errors
, MAC_RX_STATS_FRAME_TOO_LONG
);
8796 TG3_STAT_ADD32(&sp
->rx_jabbers
, MAC_RX_STATS_JABBERS
);
8797 TG3_STAT_ADD32(&sp
->rx_undersize_packets
, MAC_RX_STATS_UNDERSIZE
);
8799 TG3_STAT_ADD32(&sp
->rxbds_empty
, RCVLPC_NO_RCV_BD_CNT
);
8800 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5717
&&
8801 tp
->pci_chip_rev_id
!= CHIPREV_ID_5719_A0
&&
8802 tp
->pci_chip_rev_id
!= CHIPREV_ID_5720_A0
) {
8803 TG3_STAT_ADD32(&sp
->rx_discards
, RCVLPC_IN_DISCARDS_CNT
);
8805 u32 val
= tr32(HOSTCC_FLOW_ATTN
);
8806 val
= (val
& HOSTCC_FLOW_ATTN_MBUF_LWM
) ? 1 : 0;
8808 tw32(HOSTCC_FLOW_ATTN
, HOSTCC_FLOW_ATTN_MBUF_LWM
);
8809 sp
->rx_discards
.low
+= val
;
8810 if (sp
->rx_discards
.low
< val
)
8811 sp
->rx_discards
.high
+= 1;
8813 sp
->mbuf_lwm_thresh_hit
= sp
->rx_discards
;
8815 TG3_STAT_ADD32(&sp
->rx_errors
, RCVLPC_IN_ERRORS_CNT
);
8818 static void tg3_timer(unsigned long __opaque
)
8820 struct tg3
*tp
= (struct tg3
*) __opaque
;
8825 spin_lock(&tp
->lock
);
8827 if (!tg3_flag(tp
, TAGGED_STATUS
)) {
8828 /* All of this garbage is because when using non-tagged
8829 * IRQ status the mailbox/status_block protocol the chip
8830 * uses with the cpu is race prone.
8832 if (tp
->napi
[0].hw_status
->status
& SD_STATUS_UPDATED
) {
8833 tw32(GRC_LOCAL_CTRL
,
8834 tp
->grc_local_ctrl
| GRC_LCLCTRL_SETINT
);
8836 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
8837 HOSTCC_MODE_ENABLE
| HOSTCC_MODE_NOW
);
8840 if (!(tr32(WDMAC_MODE
) & WDMAC_MODE_ENABLE
)) {
8841 tg3_flag_set(tp
, RESTART_TIMER
);
8842 spin_unlock(&tp
->lock
);
8843 schedule_work(&tp
->reset_task
);
8848 /* This part only runs once per second. */
8849 if (!--tp
->timer_counter
) {
8850 if (tg3_flag(tp
, 5705_PLUS
))
8851 tg3_periodic_fetch_stats(tp
);
8853 if (tp
->setlpicnt
&& !--tp
->setlpicnt
)
8854 tg3_phy_eee_enable(tp
);
8856 if (tg3_flag(tp
, USE_LINKCHG_REG
)) {
8860 mac_stat
= tr32(MAC_STATUS
);
8863 if (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
) {
8864 if (mac_stat
& MAC_STATUS_MI_INTERRUPT
)
8866 } else if (mac_stat
& MAC_STATUS_LNKSTATE_CHANGED
)
8870 tg3_setup_phy(tp
, 0);
8871 } else if (tg3_flag(tp
, POLL_SERDES
)) {
8872 u32 mac_stat
= tr32(MAC_STATUS
);
8875 if (netif_carrier_ok(tp
->dev
) &&
8876 (mac_stat
& MAC_STATUS_LNKSTATE_CHANGED
)) {
8879 if (!netif_carrier_ok(tp
->dev
) &&
8880 (mac_stat
& (MAC_STATUS_PCS_SYNCED
|
8881 MAC_STATUS_SIGNAL_DET
))) {
8885 if (!tp
->serdes_counter
) {
8888 ~MAC_MODE_PORT_MODE_MASK
));
8890 tw32_f(MAC_MODE
, tp
->mac_mode
);
8893 tg3_setup_phy(tp
, 0);
8895 } else if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
8896 tg3_flag(tp
, 5780_CLASS
)) {
8897 tg3_serdes_parallel_detect(tp
);
8900 tp
->timer_counter
= tp
->timer_multiplier
;
8903 /* Heartbeat is only sent once every 2 seconds.
8905 * The heartbeat is to tell the ASF firmware that the host
8906 * driver is still alive. In the event that the OS crashes,
8907 * ASF needs to reset the hardware to free up the FIFO space
8908 * that may be filled with rx packets destined for the host.
8909 * If the FIFO is full, ASF will no longer function properly.
8911 * Unintended resets have been reported on real time kernels
8912 * where the timer doesn't run on time. Netpoll will also have
8915 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
8916 * to check the ring condition when the heartbeat is expiring
8917 * before doing the reset. This will prevent most unintended
8920 if (!--tp
->asf_counter
) {
8921 if (tg3_flag(tp
, ENABLE_ASF
) && !tg3_flag(tp
, ENABLE_APE
)) {
8922 tg3_wait_for_event_ack(tp
);
8924 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
,
8925 FWCMD_NICDRV_ALIVE3
);
8926 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_LEN_MBOX
, 4);
8927 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
,
8928 TG3_FW_UPDATE_TIMEOUT_SEC
);
8930 tg3_generate_fw_event(tp
);
8932 tp
->asf_counter
= tp
->asf_multiplier
;
8935 spin_unlock(&tp
->lock
);
8938 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
8939 add_timer(&tp
->timer
);
8942 static int tg3_request_irq(struct tg3
*tp
, int irq_num
)
8945 unsigned long flags
;
8947 struct tg3_napi
*tnapi
= &tp
->napi
[irq_num
];
8949 if (tp
->irq_cnt
== 1)
8950 name
= tp
->dev
->name
;
8952 name
= &tnapi
->irq_lbl
[0];
8953 snprintf(name
, IFNAMSIZ
, "%s-%d", tp
->dev
->name
, irq_num
);
8954 name
[IFNAMSIZ
-1] = 0;
8957 if (tg3_flag(tp
, USING_MSI
) || tg3_flag(tp
, USING_MSIX
)) {
8959 if (tg3_flag(tp
, 1SHOT_MSI
))
8964 if (tg3_flag(tp
, TAGGED_STATUS
))
8965 fn
= tg3_interrupt_tagged
;
8966 flags
= IRQF_SHARED
;
8969 return request_irq(tnapi
->irq_vec
, fn
, flags
, name
, tnapi
);
8972 static int tg3_test_interrupt(struct tg3
*tp
)
8974 struct tg3_napi
*tnapi
= &tp
->napi
[0];
8975 struct net_device
*dev
= tp
->dev
;
8976 int err
, i
, intr_ok
= 0;
8979 if (!netif_running(dev
))
8982 tg3_disable_ints(tp
);
8984 free_irq(tnapi
->irq_vec
, tnapi
);
8987 * Turn off MSI one shot mode. Otherwise this test has no
8988 * observable way to know whether the interrupt was delivered.
8990 if (tg3_flag(tp
, 57765_PLUS
) && tg3_flag(tp
, USING_MSI
)) {
8991 val
= tr32(MSGINT_MODE
) | MSGINT_MODE_ONE_SHOT_DISABLE
;
8992 tw32(MSGINT_MODE
, val
);
8995 err
= request_irq(tnapi
->irq_vec
, tg3_test_isr
,
8996 IRQF_SHARED
| IRQF_SAMPLE_RANDOM
, dev
->name
, tnapi
);
9000 tnapi
->hw_status
->status
&= ~SD_STATUS_UPDATED
;
9001 tg3_enable_ints(tp
);
9003 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
9006 for (i
= 0; i
< 5; i
++) {
9007 u32 int_mbox
, misc_host_ctrl
;
9009 int_mbox
= tr32_mailbox(tnapi
->int_mbox
);
9010 misc_host_ctrl
= tr32(TG3PCI_MISC_HOST_CTRL
);
9012 if ((int_mbox
!= 0) ||
9013 (misc_host_ctrl
& MISC_HOST_CTRL_MASK_PCI_INT
)) {
9021 tg3_disable_ints(tp
);
9023 free_irq(tnapi
->irq_vec
, tnapi
);
9025 err
= tg3_request_irq(tp
, 0);
9031 /* Reenable MSI one shot mode. */
9032 if (tg3_flag(tp
, 57765_PLUS
) && tg3_flag(tp
, USING_MSI
)) {
9033 val
= tr32(MSGINT_MODE
) & ~MSGINT_MODE_ONE_SHOT_DISABLE
;
9034 tw32(MSGINT_MODE
, val
);
9042 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9043 * successfully restored
9045 static int tg3_test_msi(struct tg3
*tp
)
9050 if (!tg3_flag(tp
, USING_MSI
))
9053 /* Turn off SERR reporting in case MSI terminates with Master
9056 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
9057 pci_write_config_word(tp
->pdev
, PCI_COMMAND
,
9058 pci_cmd
& ~PCI_COMMAND_SERR
);
9060 err
= tg3_test_interrupt(tp
);
9062 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
9067 /* other failures */
9071 /* MSI test failed, go back to INTx mode */
9072 netdev_warn(tp
->dev
, "No interrupt was generated using MSI. Switching "
9073 "to INTx mode. Please report this failure to the PCI "
9074 "maintainer and include system chipset information\n");
9076 free_irq(tp
->napi
[0].irq_vec
, &tp
->napi
[0]);
9078 pci_disable_msi(tp
->pdev
);
9080 tg3_flag_clear(tp
, USING_MSI
);
9081 tp
->napi
[0].irq_vec
= tp
->pdev
->irq
;
9083 err
= tg3_request_irq(tp
, 0);
9087 /* Need to reset the chip because the MSI cycle may have terminated
9088 * with Master Abort.
9090 tg3_full_lock(tp
, 1);
9092 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
9093 err
= tg3_init_hw(tp
, 1);
9095 tg3_full_unlock(tp
);
9098 free_irq(tp
->napi
[0].irq_vec
, &tp
->napi
[0]);
9103 static int tg3_request_firmware(struct tg3
*tp
)
9105 const __be32
*fw_data
;
9107 if (request_firmware(&tp
->fw
, tp
->fw_needed
, &tp
->pdev
->dev
)) {
9108 netdev_err(tp
->dev
, "Failed to load firmware \"%s\"\n",
9113 fw_data
= (void *)tp
->fw
->data
;
9115 /* Firmware blob starts with version numbers, followed by
9116 * start address and _full_ length including BSS sections
9117 * (which must be longer than the actual data, of course
9120 tp
->fw_len
= be32_to_cpu(fw_data
[2]); /* includes bss */
9121 if (tp
->fw_len
< (tp
->fw
->size
- 12)) {
9122 netdev_err(tp
->dev
, "bogus length %d in \"%s\"\n",
9123 tp
->fw_len
, tp
->fw_needed
);
9124 release_firmware(tp
->fw
);
9129 /* We no longer need firmware; we have it. */
9130 tp
->fw_needed
= NULL
;
9134 static bool tg3_enable_msix(struct tg3
*tp
)
9136 int i
, rc
, cpus
= num_online_cpus();
9137 struct msix_entry msix_ent
[tp
->irq_max
];
9140 /* Just fallback to the simpler MSI mode. */
9144 * We want as many rx rings enabled as there are cpus.
9145 * The first MSIX vector only deals with link interrupts, etc,
9146 * so we add one to the number of vectors we are requesting.
9148 tp
->irq_cnt
= min_t(unsigned, cpus
+ 1, tp
->irq_max
);
9150 for (i
= 0; i
< tp
->irq_max
; i
++) {
9151 msix_ent
[i
].entry
= i
;
9152 msix_ent
[i
].vector
= 0;
9155 rc
= pci_enable_msix(tp
->pdev
, msix_ent
, tp
->irq_cnt
);
9158 } else if (rc
!= 0) {
9159 if (pci_enable_msix(tp
->pdev
, msix_ent
, rc
))
9161 netdev_notice(tp
->dev
, "Requested %d MSI-X vectors, received %d\n",
9166 for (i
= 0; i
< tp
->irq_max
; i
++)
9167 tp
->napi
[i
].irq_vec
= msix_ent
[i
].vector
;
9169 netif_set_real_num_tx_queues(tp
->dev
, 1);
9170 rc
= tp
->irq_cnt
> 1 ? tp
->irq_cnt
- 1 : 1;
9171 if (netif_set_real_num_rx_queues(tp
->dev
, rc
)) {
9172 pci_disable_msix(tp
->pdev
);
9176 if (tp
->irq_cnt
> 1) {
9177 tg3_flag_set(tp
, ENABLE_RSS
);
9179 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
9180 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
9181 tg3_flag_set(tp
, ENABLE_TSS
);
9182 netif_set_real_num_tx_queues(tp
->dev
, tp
->irq_cnt
- 1);
9189 static void tg3_ints_init(struct tg3
*tp
)
9191 if ((tg3_flag(tp
, SUPPORT_MSI
) || tg3_flag(tp
, SUPPORT_MSIX
)) &&
9192 !tg3_flag(tp
, TAGGED_STATUS
)) {
9193 /* All MSI supporting chips should support tagged
9194 * status. Assert that this is the case.
9196 netdev_warn(tp
->dev
,
9197 "MSI without TAGGED_STATUS? Not using MSI\n");
9201 if (tg3_flag(tp
, SUPPORT_MSIX
) && tg3_enable_msix(tp
))
9202 tg3_flag_set(tp
, USING_MSIX
);
9203 else if (tg3_flag(tp
, SUPPORT_MSI
) && pci_enable_msi(tp
->pdev
) == 0)
9204 tg3_flag_set(tp
, USING_MSI
);
9206 if (tg3_flag(tp
, USING_MSI
) || tg3_flag(tp
, USING_MSIX
)) {
9207 u32 msi_mode
= tr32(MSGINT_MODE
);
9208 if (tg3_flag(tp
, USING_MSIX
) && tp
->irq_cnt
> 1)
9209 msi_mode
|= MSGINT_MODE_MULTIVEC_EN
;
9210 tw32(MSGINT_MODE
, msi_mode
| MSGINT_MODE_ENABLE
);
9213 if (!tg3_flag(tp
, USING_MSIX
)) {
9215 tp
->napi
[0].irq_vec
= tp
->pdev
->irq
;
9216 netif_set_real_num_tx_queues(tp
->dev
, 1);
9217 netif_set_real_num_rx_queues(tp
->dev
, 1);
9221 static void tg3_ints_fini(struct tg3
*tp
)
9223 if (tg3_flag(tp
, USING_MSIX
))
9224 pci_disable_msix(tp
->pdev
);
9225 else if (tg3_flag(tp
, USING_MSI
))
9226 pci_disable_msi(tp
->pdev
);
9227 tg3_flag_clear(tp
, USING_MSI
);
9228 tg3_flag_clear(tp
, USING_MSIX
);
9229 tg3_flag_clear(tp
, ENABLE_RSS
);
9230 tg3_flag_clear(tp
, ENABLE_TSS
);
9233 static int tg3_open(struct net_device
*dev
)
9235 struct tg3
*tp
= netdev_priv(dev
);
9238 if (tp
->fw_needed
) {
9239 err
= tg3_request_firmware(tp
);
9240 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
) {
9244 netdev_warn(tp
->dev
, "TSO capability disabled\n");
9245 tg3_flag_clear(tp
, TSO_CAPABLE
);
9246 } else if (!tg3_flag(tp
, TSO_CAPABLE
)) {
9247 netdev_notice(tp
->dev
, "TSO capability restored\n");
9248 tg3_flag_set(tp
, TSO_CAPABLE
);
9252 netif_carrier_off(tp
->dev
);
9254 err
= tg3_power_up(tp
);
9258 tg3_full_lock(tp
, 0);
9260 tg3_disable_ints(tp
);
9261 tg3_flag_clear(tp
, INIT_COMPLETE
);
9263 tg3_full_unlock(tp
);
9266 * Setup interrupts first so we know how
9267 * many NAPI resources to allocate
9271 /* The placement of this call is tied
9272 * to the setup and use of Host TX descriptors.
9274 err
= tg3_alloc_consistent(tp
);
9280 tg3_napi_enable(tp
);
9282 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
9283 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
9284 err
= tg3_request_irq(tp
, i
);
9286 for (i
--; i
>= 0; i
--)
9287 free_irq(tnapi
->irq_vec
, tnapi
);
9295 tg3_full_lock(tp
, 0);
9297 err
= tg3_init_hw(tp
, 1);
9299 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
9302 if (tg3_flag(tp
, TAGGED_STATUS
))
9303 tp
->timer_offset
= HZ
;
9305 tp
->timer_offset
= HZ
/ 10;
9307 BUG_ON(tp
->timer_offset
> HZ
);
9308 tp
->timer_counter
= tp
->timer_multiplier
=
9309 (HZ
/ tp
->timer_offset
);
9310 tp
->asf_counter
= tp
->asf_multiplier
=
9311 ((HZ
/ tp
->timer_offset
) * 2);
9313 init_timer(&tp
->timer
);
9314 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
9315 tp
->timer
.data
= (unsigned long) tp
;
9316 tp
->timer
.function
= tg3_timer
;
9319 tg3_full_unlock(tp
);
9324 if (tg3_flag(tp
, USING_MSI
)) {
9325 err
= tg3_test_msi(tp
);
9328 tg3_full_lock(tp
, 0);
9329 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
9331 tg3_full_unlock(tp
);
9336 if (!tg3_flag(tp
, 57765_PLUS
) && tg3_flag(tp
, USING_MSI
)) {
9337 u32 val
= tr32(PCIE_TRANSACTION_CFG
);
9339 tw32(PCIE_TRANSACTION_CFG
,
9340 val
| PCIE_TRANS_CFG_1SHOT_MSI
);
9346 tg3_full_lock(tp
, 0);
9348 add_timer(&tp
->timer
);
9349 tg3_flag_set(tp
, INIT_COMPLETE
);
9350 tg3_enable_ints(tp
);
9352 tg3_full_unlock(tp
);
9354 netif_tx_start_all_queues(dev
);
9357 * Reset loopback feature if it was turned on while the device was down
9358 * make sure that it's installed properly now.
9360 if (dev
->features
& NETIF_F_LOOPBACK
)
9361 tg3_set_loopback(dev
, dev
->features
);
9366 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--) {
9367 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
9368 free_irq(tnapi
->irq_vec
, tnapi
);
9372 tg3_napi_disable(tp
);
9374 tg3_free_consistent(tp
);
9381 static struct rtnl_link_stats64
*tg3_get_stats64(struct net_device
*,
9382 struct rtnl_link_stats64
*);
9383 static struct tg3_ethtool_stats
*tg3_get_estats(struct tg3
*);
9385 static int tg3_close(struct net_device
*dev
)
9388 struct tg3
*tp
= netdev_priv(dev
);
9390 tg3_napi_disable(tp
);
9391 cancel_work_sync(&tp
->reset_task
);
9393 netif_tx_stop_all_queues(dev
);
9395 del_timer_sync(&tp
->timer
);
9399 tg3_full_lock(tp
, 1);
9401 tg3_disable_ints(tp
);
9403 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
9405 tg3_flag_clear(tp
, INIT_COMPLETE
);
9407 tg3_full_unlock(tp
);
9409 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--) {
9410 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
9411 free_irq(tnapi
->irq_vec
, tnapi
);
9416 tg3_get_stats64(tp
->dev
, &tp
->net_stats_prev
);
9418 memcpy(&tp
->estats_prev
, tg3_get_estats(tp
),
9419 sizeof(tp
->estats_prev
));
9423 tg3_free_consistent(tp
);
9427 netif_carrier_off(tp
->dev
);
9432 static inline u64
get_stat64(tg3_stat64_t
*val
)
9434 return ((u64
)val
->high
<< 32) | ((u64
)val
->low
);
9437 static u64
calc_crc_errors(struct tg3
*tp
)
9439 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
9441 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
9442 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
9443 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
)) {
9446 spin_lock_bh(&tp
->lock
);
9447 if (!tg3_readphy(tp
, MII_TG3_TEST1
, &val
)) {
9448 tg3_writephy(tp
, MII_TG3_TEST1
,
9449 val
| MII_TG3_TEST1_CRC_EN
);
9450 tg3_readphy(tp
, MII_TG3_RXR_COUNTERS
, &val
);
9453 spin_unlock_bh(&tp
->lock
);
9455 tp
->phy_crc_errors
+= val
;
9457 return tp
->phy_crc_errors
;
9460 return get_stat64(&hw_stats
->rx_fcs_errors
);
9463 #define ESTAT_ADD(member) \
9464 estats->member = old_estats->member + \
9465 get_stat64(&hw_stats->member)
9467 static struct tg3_ethtool_stats
*tg3_get_estats(struct tg3
*tp
)
9469 struct tg3_ethtool_stats
*estats
= &tp
->estats
;
9470 struct tg3_ethtool_stats
*old_estats
= &tp
->estats_prev
;
9471 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
9476 ESTAT_ADD(rx_octets
);
9477 ESTAT_ADD(rx_fragments
);
9478 ESTAT_ADD(rx_ucast_packets
);
9479 ESTAT_ADD(rx_mcast_packets
);
9480 ESTAT_ADD(rx_bcast_packets
);
9481 ESTAT_ADD(rx_fcs_errors
);
9482 ESTAT_ADD(rx_align_errors
);
9483 ESTAT_ADD(rx_xon_pause_rcvd
);
9484 ESTAT_ADD(rx_xoff_pause_rcvd
);
9485 ESTAT_ADD(rx_mac_ctrl_rcvd
);
9486 ESTAT_ADD(rx_xoff_entered
);
9487 ESTAT_ADD(rx_frame_too_long_errors
);
9488 ESTAT_ADD(rx_jabbers
);
9489 ESTAT_ADD(rx_undersize_packets
);
9490 ESTAT_ADD(rx_in_length_errors
);
9491 ESTAT_ADD(rx_out_length_errors
);
9492 ESTAT_ADD(rx_64_or_less_octet_packets
);
9493 ESTAT_ADD(rx_65_to_127_octet_packets
);
9494 ESTAT_ADD(rx_128_to_255_octet_packets
);
9495 ESTAT_ADD(rx_256_to_511_octet_packets
);
9496 ESTAT_ADD(rx_512_to_1023_octet_packets
);
9497 ESTAT_ADD(rx_1024_to_1522_octet_packets
);
9498 ESTAT_ADD(rx_1523_to_2047_octet_packets
);
9499 ESTAT_ADD(rx_2048_to_4095_octet_packets
);
9500 ESTAT_ADD(rx_4096_to_8191_octet_packets
);
9501 ESTAT_ADD(rx_8192_to_9022_octet_packets
);
9503 ESTAT_ADD(tx_octets
);
9504 ESTAT_ADD(tx_collisions
);
9505 ESTAT_ADD(tx_xon_sent
);
9506 ESTAT_ADD(tx_xoff_sent
);
9507 ESTAT_ADD(tx_flow_control
);
9508 ESTAT_ADD(tx_mac_errors
);
9509 ESTAT_ADD(tx_single_collisions
);
9510 ESTAT_ADD(tx_mult_collisions
);
9511 ESTAT_ADD(tx_deferred
);
9512 ESTAT_ADD(tx_excessive_collisions
);
9513 ESTAT_ADD(tx_late_collisions
);
9514 ESTAT_ADD(tx_collide_2times
);
9515 ESTAT_ADD(tx_collide_3times
);
9516 ESTAT_ADD(tx_collide_4times
);
9517 ESTAT_ADD(tx_collide_5times
);
9518 ESTAT_ADD(tx_collide_6times
);
9519 ESTAT_ADD(tx_collide_7times
);
9520 ESTAT_ADD(tx_collide_8times
);
9521 ESTAT_ADD(tx_collide_9times
);
9522 ESTAT_ADD(tx_collide_10times
);
9523 ESTAT_ADD(tx_collide_11times
);
9524 ESTAT_ADD(tx_collide_12times
);
9525 ESTAT_ADD(tx_collide_13times
);
9526 ESTAT_ADD(tx_collide_14times
);
9527 ESTAT_ADD(tx_collide_15times
);
9528 ESTAT_ADD(tx_ucast_packets
);
9529 ESTAT_ADD(tx_mcast_packets
);
9530 ESTAT_ADD(tx_bcast_packets
);
9531 ESTAT_ADD(tx_carrier_sense_errors
);
9532 ESTAT_ADD(tx_discards
);
9533 ESTAT_ADD(tx_errors
);
9535 ESTAT_ADD(dma_writeq_full
);
9536 ESTAT_ADD(dma_write_prioq_full
);
9537 ESTAT_ADD(rxbds_empty
);
9538 ESTAT_ADD(rx_discards
);
9539 ESTAT_ADD(rx_errors
);
9540 ESTAT_ADD(rx_threshold_hit
);
9542 ESTAT_ADD(dma_readq_full
);
9543 ESTAT_ADD(dma_read_prioq_full
);
9544 ESTAT_ADD(tx_comp_queue_full
);
9546 ESTAT_ADD(ring_set_send_prod_index
);
9547 ESTAT_ADD(ring_status_update
);
9548 ESTAT_ADD(nic_irqs
);
9549 ESTAT_ADD(nic_avoided_irqs
);
9550 ESTAT_ADD(nic_tx_threshold_hit
);
9552 ESTAT_ADD(mbuf_lwm_thresh_hit
);
9557 static struct rtnl_link_stats64
*tg3_get_stats64(struct net_device
*dev
,
9558 struct rtnl_link_stats64
*stats
)
9560 struct tg3
*tp
= netdev_priv(dev
);
9561 struct rtnl_link_stats64
*old_stats
= &tp
->net_stats_prev
;
9562 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
9567 stats
->rx_packets
= old_stats
->rx_packets
+
9568 get_stat64(&hw_stats
->rx_ucast_packets
) +
9569 get_stat64(&hw_stats
->rx_mcast_packets
) +
9570 get_stat64(&hw_stats
->rx_bcast_packets
);
9572 stats
->tx_packets
= old_stats
->tx_packets
+
9573 get_stat64(&hw_stats
->tx_ucast_packets
) +
9574 get_stat64(&hw_stats
->tx_mcast_packets
) +
9575 get_stat64(&hw_stats
->tx_bcast_packets
);
9577 stats
->rx_bytes
= old_stats
->rx_bytes
+
9578 get_stat64(&hw_stats
->rx_octets
);
9579 stats
->tx_bytes
= old_stats
->tx_bytes
+
9580 get_stat64(&hw_stats
->tx_octets
);
9582 stats
->rx_errors
= old_stats
->rx_errors
+
9583 get_stat64(&hw_stats
->rx_errors
);
9584 stats
->tx_errors
= old_stats
->tx_errors
+
9585 get_stat64(&hw_stats
->tx_errors
) +
9586 get_stat64(&hw_stats
->tx_mac_errors
) +
9587 get_stat64(&hw_stats
->tx_carrier_sense_errors
) +
9588 get_stat64(&hw_stats
->tx_discards
);
9590 stats
->multicast
= old_stats
->multicast
+
9591 get_stat64(&hw_stats
->rx_mcast_packets
);
9592 stats
->collisions
= old_stats
->collisions
+
9593 get_stat64(&hw_stats
->tx_collisions
);
9595 stats
->rx_length_errors
= old_stats
->rx_length_errors
+
9596 get_stat64(&hw_stats
->rx_frame_too_long_errors
) +
9597 get_stat64(&hw_stats
->rx_undersize_packets
);
9599 stats
->rx_over_errors
= old_stats
->rx_over_errors
+
9600 get_stat64(&hw_stats
->rxbds_empty
);
9601 stats
->rx_frame_errors
= old_stats
->rx_frame_errors
+
9602 get_stat64(&hw_stats
->rx_align_errors
);
9603 stats
->tx_aborted_errors
= old_stats
->tx_aborted_errors
+
9604 get_stat64(&hw_stats
->tx_discards
);
9605 stats
->tx_carrier_errors
= old_stats
->tx_carrier_errors
+
9606 get_stat64(&hw_stats
->tx_carrier_sense_errors
);
9608 stats
->rx_crc_errors
= old_stats
->rx_crc_errors
+
9609 calc_crc_errors(tp
);
9611 stats
->rx_missed_errors
= old_stats
->rx_missed_errors
+
9612 get_stat64(&hw_stats
->rx_discards
);
9614 stats
->rx_dropped
= tp
->rx_dropped
;
9619 static inline u32
calc_crc(unsigned char *buf
, int len
)
9627 for (j
= 0; j
< len
; j
++) {
9630 for (k
= 0; k
< 8; k
++) {
9643 static void tg3_set_multi(struct tg3
*tp
, unsigned int accept_all
)
9645 /* accept or reject all multicast frames */
9646 tw32(MAC_HASH_REG_0
, accept_all
? 0xffffffff : 0);
9647 tw32(MAC_HASH_REG_1
, accept_all
? 0xffffffff : 0);
9648 tw32(MAC_HASH_REG_2
, accept_all
? 0xffffffff : 0);
9649 tw32(MAC_HASH_REG_3
, accept_all
? 0xffffffff : 0);
9652 static void __tg3_set_rx_mode(struct net_device
*dev
)
9654 struct tg3
*tp
= netdev_priv(dev
);
9657 rx_mode
= tp
->rx_mode
& ~(RX_MODE_PROMISC
|
9658 RX_MODE_KEEP_VLAN_TAG
);
9660 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9661 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9664 if (!tg3_flag(tp
, ENABLE_ASF
))
9665 rx_mode
|= RX_MODE_KEEP_VLAN_TAG
;
9668 if (dev
->flags
& IFF_PROMISC
) {
9669 /* Promiscuous mode. */
9670 rx_mode
|= RX_MODE_PROMISC
;
9671 } else if (dev
->flags
& IFF_ALLMULTI
) {
9672 /* Accept all multicast. */
9673 tg3_set_multi(tp
, 1);
9674 } else if (netdev_mc_empty(dev
)) {
9675 /* Reject all multicast. */
9676 tg3_set_multi(tp
, 0);
9678 /* Accept one or more multicast(s). */
9679 struct netdev_hw_addr
*ha
;
9680 u32 mc_filter
[4] = { 0, };
9685 netdev_for_each_mc_addr(ha
, dev
) {
9686 crc
= calc_crc(ha
->addr
, ETH_ALEN
);
9688 regidx
= (bit
& 0x60) >> 5;
9690 mc_filter
[regidx
] |= (1 << bit
);
9693 tw32(MAC_HASH_REG_0
, mc_filter
[0]);
9694 tw32(MAC_HASH_REG_1
, mc_filter
[1]);
9695 tw32(MAC_HASH_REG_2
, mc_filter
[2]);
9696 tw32(MAC_HASH_REG_3
, mc_filter
[3]);
9699 if (rx_mode
!= tp
->rx_mode
) {
9700 tp
->rx_mode
= rx_mode
;
9701 tw32_f(MAC_RX_MODE
, rx_mode
);
9706 static void tg3_set_rx_mode(struct net_device
*dev
)
9708 struct tg3
*tp
= netdev_priv(dev
);
9710 if (!netif_running(dev
))
9713 tg3_full_lock(tp
, 0);
9714 __tg3_set_rx_mode(dev
);
9715 tg3_full_unlock(tp
);
9718 static int tg3_get_regs_len(struct net_device
*dev
)
9720 return TG3_REG_BLK_SIZE
;
9723 static void tg3_get_regs(struct net_device
*dev
,
9724 struct ethtool_regs
*regs
, void *_p
)
9726 struct tg3
*tp
= netdev_priv(dev
);
9730 memset(_p
, 0, TG3_REG_BLK_SIZE
);
9732 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
9735 tg3_full_lock(tp
, 0);
9737 tg3_dump_legacy_regs(tp
, (u32
*)_p
);
9739 tg3_full_unlock(tp
);
9742 static int tg3_get_eeprom_len(struct net_device
*dev
)
9744 struct tg3
*tp
= netdev_priv(dev
);
9746 return tp
->nvram_size
;
9749 static int tg3_get_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
, u8
*data
)
9751 struct tg3
*tp
= netdev_priv(dev
);
9754 u32 i
, offset
, len
, b_offset
, b_count
;
9757 if (tg3_flag(tp
, NO_NVRAM
))
9760 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
9763 offset
= eeprom
->offset
;
9767 eeprom
->magic
= TG3_EEPROM_MAGIC
;
9770 /* adjustments to start on required 4 byte boundary */
9771 b_offset
= offset
& 3;
9772 b_count
= 4 - b_offset
;
9773 if (b_count
> len
) {
9774 /* i.e. offset=1 len=2 */
9777 ret
= tg3_nvram_read_be32(tp
, offset
-b_offset
, &val
);
9780 memcpy(data
, ((char *)&val
) + b_offset
, b_count
);
9783 eeprom
->len
+= b_count
;
9786 /* read bytes up to the last 4 byte boundary */
9787 pd
= &data
[eeprom
->len
];
9788 for (i
= 0; i
< (len
- (len
& 3)); i
+= 4) {
9789 ret
= tg3_nvram_read_be32(tp
, offset
+ i
, &val
);
9794 memcpy(pd
+ i
, &val
, 4);
9799 /* read last bytes not ending on 4 byte boundary */
9800 pd
= &data
[eeprom
->len
];
9802 b_offset
= offset
+ len
- b_count
;
9803 ret
= tg3_nvram_read_be32(tp
, b_offset
, &val
);
9806 memcpy(pd
, &val
, b_count
);
9807 eeprom
->len
+= b_count
;
9812 static int tg3_nvram_write_block(struct tg3
*tp
, u32 offset
, u32 len
, u8
*buf
);
9814 static int tg3_set_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
, u8
*data
)
9816 struct tg3
*tp
= netdev_priv(dev
);
9818 u32 offset
, len
, b_offset
, odd_len
;
9822 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
9825 if (tg3_flag(tp
, NO_NVRAM
) ||
9826 eeprom
->magic
!= TG3_EEPROM_MAGIC
)
9829 offset
= eeprom
->offset
;
9832 if ((b_offset
= (offset
& 3))) {
9833 /* adjustments to start on required 4 byte boundary */
9834 ret
= tg3_nvram_read_be32(tp
, offset
-b_offset
, &start
);
9845 /* adjustments to end on required 4 byte boundary */
9847 len
= (len
+ 3) & ~3;
9848 ret
= tg3_nvram_read_be32(tp
, offset
+len
-4, &end
);
9854 if (b_offset
|| odd_len
) {
9855 buf
= kmalloc(len
, GFP_KERNEL
);
9859 memcpy(buf
, &start
, 4);
9861 memcpy(buf
+len
-4, &end
, 4);
9862 memcpy(buf
+ b_offset
, data
, eeprom
->len
);
9865 ret
= tg3_nvram_write_block(tp
, offset
, len
, buf
);
9873 static int tg3_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
9875 struct tg3
*tp
= netdev_priv(dev
);
9877 if (tg3_flag(tp
, USE_PHYLIB
)) {
9878 struct phy_device
*phydev
;
9879 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
9881 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
9882 return phy_ethtool_gset(phydev
, cmd
);
9885 cmd
->supported
= (SUPPORTED_Autoneg
);
9887 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
9888 cmd
->supported
|= (SUPPORTED_1000baseT_Half
|
9889 SUPPORTED_1000baseT_Full
);
9891 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
9892 cmd
->supported
|= (SUPPORTED_100baseT_Half
|
9893 SUPPORTED_100baseT_Full
|
9894 SUPPORTED_10baseT_Half
|
9895 SUPPORTED_10baseT_Full
|
9897 cmd
->port
= PORT_TP
;
9899 cmd
->supported
|= SUPPORTED_FIBRE
;
9900 cmd
->port
= PORT_FIBRE
;
9903 cmd
->advertising
= tp
->link_config
.advertising
;
9904 if (netif_running(dev
)) {
9905 ethtool_cmd_speed_set(cmd
, tp
->link_config
.active_speed
);
9906 cmd
->duplex
= tp
->link_config
.active_duplex
;
9908 ethtool_cmd_speed_set(cmd
, SPEED_INVALID
);
9909 cmd
->duplex
= DUPLEX_INVALID
;
9911 cmd
->phy_address
= tp
->phy_addr
;
9912 cmd
->transceiver
= XCVR_INTERNAL
;
9913 cmd
->autoneg
= tp
->link_config
.autoneg
;
9919 static int tg3_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
9921 struct tg3
*tp
= netdev_priv(dev
);
9922 u32 speed
= ethtool_cmd_speed(cmd
);
9924 if (tg3_flag(tp
, USE_PHYLIB
)) {
9925 struct phy_device
*phydev
;
9926 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
9928 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
9929 return phy_ethtool_sset(phydev
, cmd
);
9932 if (cmd
->autoneg
!= AUTONEG_ENABLE
&&
9933 cmd
->autoneg
!= AUTONEG_DISABLE
)
9936 if (cmd
->autoneg
== AUTONEG_DISABLE
&&
9937 cmd
->duplex
!= DUPLEX_FULL
&&
9938 cmd
->duplex
!= DUPLEX_HALF
)
9941 if (cmd
->autoneg
== AUTONEG_ENABLE
) {
9942 u32 mask
= ADVERTISED_Autoneg
|
9944 ADVERTISED_Asym_Pause
;
9946 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
9947 mask
|= ADVERTISED_1000baseT_Half
|
9948 ADVERTISED_1000baseT_Full
;
9950 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
9951 mask
|= ADVERTISED_100baseT_Half
|
9952 ADVERTISED_100baseT_Full
|
9953 ADVERTISED_10baseT_Half
|
9954 ADVERTISED_10baseT_Full
|
9957 mask
|= ADVERTISED_FIBRE
;
9959 if (cmd
->advertising
& ~mask
)
9962 mask
&= (ADVERTISED_1000baseT_Half
|
9963 ADVERTISED_1000baseT_Full
|
9964 ADVERTISED_100baseT_Half
|
9965 ADVERTISED_100baseT_Full
|
9966 ADVERTISED_10baseT_Half
|
9967 ADVERTISED_10baseT_Full
);
9969 cmd
->advertising
&= mask
;
9971 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) {
9972 if (speed
!= SPEED_1000
)
9975 if (cmd
->duplex
!= DUPLEX_FULL
)
9978 if (speed
!= SPEED_100
&&
9984 tg3_full_lock(tp
, 0);
9986 tp
->link_config
.autoneg
= cmd
->autoneg
;
9987 if (cmd
->autoneg
== AUTONEG_ENABLE
) {
9988 tp
->link_config
.advertising
= (cmd
->advertising
|
9989 ADVERTISED_Autoneg
);
9990 tp
->link_config
.speed
= SPEED_INVALID
;
9991 tp
->link_config
.duplex
= DUPLEX_INVALID
;
9993 tp
->link_config
.advertising
= 0;
9994 tp
->link_config
.speed
= speed
;
9995 tp
->link_config
.duplex
= cmd
->duplex
;
9998 tp
->link_config
.orig_speed
= tp
->link_config
.speed
;
9999 tp
->link_config
.orig_duplex
= tp
->link_config
.duplex
;
10000 tp
->link_config
.orig_autoneg
= tp
->link_config
.autoneg
;
10002 if (netif_running(dev
))
10003 tg3_setup_phy(tp
, 1);
10005 tg3_full_unlock(tp
);
10010 static void tg3_get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
10012 struct tg3
*tp
= netdev_priv(dev
);
10014 strcpy(info
->driver
, DRV_MODULE_NAME
);
10015 strcpy(info
->version
, DRV_MODULE_VERSION
);
10016 strcpy(info
->fw_version
, tp
->fw_ver
);
10017 strcpy(info
->bus_info
, pci_name(tp
->pdev
));
10020 static void tg3_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
10022 struct tg3
*tp
= netdev_priv(dev
);
10024 if (tg3_flag(tp
, WOL_CAP
) && device_can_wakeup(&tp
->pdev
->dev
))
10025 wol
->supported
= WAKE_MAGIC
;
10027 wol
->supported
= 0;
10029 if (tg3_flag(tp
, WOL_ENABLE
) && device_can_wakeup(&tp
->pdev
->dev
))
10030 wol
->wolopts
= WAKE_MAGIC
;
10031 memset(&wol
->sopass
, 0, sizeof(wol
->sopass
));
10034 static int tg3_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
10036 struct tg3
*tp
= netdev_priv(dev
);
10037 struct device
*dp
= &tp
->pdev
->dev
;
10039 if (wol
->wolopts
& ~WAKE_MAGIC
)
10041 if ((wol
->wolopts
& WAKE_MAGIC
) &&
10042 !(tg3_flag(tp
, WOL_CAP
) && device_can_wakeup(dp
)))
10045 device_set_wakeup_enable(dp
, wol
->wolopts
& WAKE_MAGIC
);
10047 spin_lock_bh(&tp
->lock
);
10048 if (device_may_wakeup(dp
))
10049 tg3_flag_set(tp
, WOL_ENABLE
);
10051 tg3_flag_clear(tp
, WOL_ENABLE
);
10052 spin_unlock_bh(&tp
->lock
);
10057 static u32
tg3_get_msglevel(struct net_device
*dev
)
10059 struct tg3
*tp
= netdev_priv(dev
);
10060 return tp
->msg_enable
;
10063 static void tg3_set_msglevel(struct net_device
*dev
, u32 value
)
10065 struct tg3
*tp
= netdev_priv(dev
);
10066 tp
->msg_enable
= value
;
10069 static int tg3_nway_reset(struct net_device
*dev
)
10071 struct tg3
*tp
= netdev_priv(dev
);
10074 if (!netif_running(dev
))
10077 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
10080 if (tg3_flag(tp
, USE_PHYLIB
)) {
10081 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
10083 r
= phy_start_aneg(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
10087 spin_lock_bh(&tp
->lock
);
10089 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
10090 if (!tg3_readphy(tp
, MII_BMCR
, &bmcr
) &&
10091 ((bmcr
& BMCR_ANENABLE
) ||
10092 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
))) {
10093 tg3_writephy(tp
, MII_BMCR
, bmcr
| BMCR_ANRESTART
|
10097 spin_unlock_bh(&tp
->lock
);
10103 static void tg3_get_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
10105 struct tg3
*tp
= netdev_priv(dev
);
10107 ering
->rx_max_pending
= tp
->rx_std_ring_mask
;
10108 ering
->rx_mini_max_pending
= 0;
10109 if (tg3_flag(tp
, JUMBO_RING_ENABLE
))
10110 ering
->rx_jumbo_max_pending
= tp
->rx_jmb_ring_mask
;
10112 ering
->rx_jumbo_max_pending
= 0;
10114 ering
->tx_max_pending
= TG3_TX_RING_SIZE
- 1;
10116 ering
->rx_pending
= tp
->rx_pending
;
10117 ering
->rx_mini_pending
= 0;
10118 if (tg3_flag(tp
, JUMBO_RING_ENABLE
))
10119 ering
->rx_jumbo_pending
= tp
->rx_jumbo_pending
;
10121 ering
->rx_jumbo_pending
= 0;
10123 ering
->tx_pending
= tp
->napi
[0].tx_pending
;
10126 static int tg3_set_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
10128 struct tg3
*tp
= netdev_priv(dev
);
10129 int i
, irq_sync
= 0, err
= 0;
10131 if ((ering
->rx_pending
> tp
->rx_std_ring_mask
) ||
10132 (ering
->rx_jumbo_pending
> tp
->rx_jmb_ring_mask
) ||
10133 (ering
->tx_pending
> TG3_TX_RING_SIZE
- 1) ||
10134 (ering
->tx_pending
<= MAX_SKB_FRAGS
) ||
10135 (tg3_flag(tp
, TSO_BUG
) &&
10136 (ering
->tx_pending
<= (MAX_SKB_FRAGS
* 3))))
10139 if (netif_running(dev
)) {
10141 tg3_netif_stop(tp
);
10145 tg3_full_lock(tp
, irq_sync
);
10147 tp
->rx_pending
= ering
->rx_pending
;
10149 if (tg3_flag(tp
, MAX_RXPEND_64
) &&
10150 tp
->rx_pending
> 63)
10151 tp
->rx_pending
= 63;
10152 tp
->rx_jumbo_pending
= ering
->rx_jumbo_pending
;
10154 for (i
= 0; i
< tp
->irq_max
; i
++)
10155 tp
->napi
[i
].tx_pending
= ering
->tx_pending
;
10157 if (netif_running(dev
)) {
10158 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
10159 err
= tg3_restart_hw(tp
, 1);
10161 tg3_netif_start(tp
);
10164 tg3_full_unlock(tp
);
10166 if (irq_sync
&& !err
)
10172 static void tg3_get_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
10174 struct tg3
*tp
= netdev_priv(dev
);
10176 epause
->autoneg
= !!tg3_flag(tp
, PAUSE_AUTONEG
);
10178 if (tp
->link_config
.active_flowctrl
& FLOW_CTRL_RX
)
10179 epause
->rx_pause
= 1;
10181 epause
->rx_pause
= 0;
10183 if (tp
->link_config
.active_flowctrl
& FLOW_CTRL_TX
)
10184 epause
->tx_pause
= 1;
10186 epause
->tx_pause
= 0;
10189 static int tg3_set_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
10191 struct tg3
*tp
= netdev_priv(dev
);
10194 if (tg3_flag(tp
, USE_PHYLIB
)) {
10196 struct phy_device
*phydev
;
10198 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
10200 if (!(phydev
->supported
& SUPPORTED_Pause
) ||
10201 (!(phydev
->supported
& SUPPORTED_Asym_Pause
) &&
10202 (epause
->rx_pause
!= epause
->tx_pause
)))
10205 tp
->link_config
.flowctrl
= 0;
10206 if (epause
->rx_pause
) {
10207 tp
->link_config
.flowctrl
|= FLOW_CTRL_RX
;
10209 if (epause
->tx_pause
) {
10210 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
10211 newadv
= ADVERTISED_Pause
;
10213 newadv
= ADVERTISED_Pause
|
10214 ADVERTISED_Asym_Pause
;
10215 } else if (epause
->tx_pause
) {
10216 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
10217 newadv
= ADVERTISED_Asym_Pause
;
10221 if (epause
->autoneg
)
10222 tg3_flag_set(tp
, PAUSE_AUTONEG
);
10224 tg3_flag_clear(tp
, PAUSE_AUTONEG
);
10226 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
10227 u32 oldadv
= phydev
->advertising
&
10228 (ADVERTISED_Pause
| ADVERTISED_Asym_Pause
);
10229 if (oldadv
!= newadv
) {
10230 phydev
->advertising
&=
10231 ~(ADVERTISED_Pause
|
10232 ADVERTISED_Asym_Pause
);
10233 phydev
->advertising
|= newadv
;
10234 if (phydev
->autoneg
) {
10236 * Always renegotiate the link to
10237 * inform our link partner of our
10238 * flow control settings, even if the
10239 * flow control is forced. Let
10240 * tg3_adjust_link() do the final
10241 * flow control setup.
10243 return phy_start_aneg(phydev
);
10247 if (!epause
->autoneg
)
10248 tg3_setup_flow_control(tp
, 0, 0);
10250 tp
->link_config
.orig_advertising
&=
10251 ~(ADVERTISED_Pause
|
10252 ADVERTISED_Asym_Pause
);
10253 tp
->link_config
.orig_advertising
|= newadv
;
10258 if (netif_running(dev
)) {
10259 tg3_netif_stop(tp
);
10263 tg3_full_lock(tp
, irq_sync
);
10265 if (epause
->autoneg
)
10266 tg3_flag_set(tp
, PAUSE_AUTONEG
);
10268 tg3_flag_clear(tp
, PAUSE_AUTONEG
);
10269 if (epause
->rx_pause
)
10270 tp
->link_config
.flowctrl
|= FLOW_CTRL_RX
;
10272 tp
->link_config
.flowctrl
&= ~FLOW_CTRL_RX
;
10273 if (epause
->tx_pause
)
10274 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
10276 tp
->link_config
.flowctrl
&= ~FLOW_CTRL_TX
;
10278 if (netif_running(dev
)) {
10279 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
10280 err
= tg3_restart_hw(tp
, 1);
10282 tg3_netif_start(tp
);
10285 tg3_full_unlock(tp
);
10291 static int tg3_get_sset_count(struct net_device
*dev
, int sset
)
10295 return TG3_NUM_TEST
;
10297 return TG3_NUM_STATS
;
10299 return -EOPNOTSUPP
;
10303 static void tg3_get_strings(struct net_device
*dev
, u32 stringset
, u8
*buf
)
10305 switch (stringset
) {
10307 memcpy(buf
, ðtool_stats_keys
, sizeof(ethtool_stats_keys
));
10310 memcpy(buf
, ðtool_test_keys
, sizeof(ethtool_test_keys
));
10313 WARN_ON(1); /* we need a WARN() */
10318 static int tg3_set_phys_id(struct net_device
*dev
,
10319 enum ethtool_phys_id_state state
)
10321 struct tg3
*tp
= netdev_priv(dev
);
10323 if (!netif_running(tp
->dev
))
10327 case ETHTOOL_ID_ACTIVE
:
10328 return 1; /* cycle on/off once per second */
10330 case ETHTOOL_ID_ON
:
10331 tw32(MAC_LED_CTRL
, LED_CTRL_LNKLED_OVERRIDE
|
10332 LED_CTRL_1000MBPS_ON
|
10333 LED_CTRL_100MBPS_ON
|
10334 LED_CTRL_10MBPS_ON
|
10335 LED_CTRL_TRAFFIC_OVERRIDE
|
10336 LED_CTRL_TRAFFIC_BLINK
|
10337 LED_CTRL_TRAFFIC_LED
);
10340 case ETHTOOL_ID_OFF
:
10341 tw32(MAC_LED_CTRL
, LED_CTRL_LNKLED_OVERRIDE
|
10342 LED_CTRL_TRAFFIC_OVERRIDE
);
10345 case ETHTOOL_ID_INACTIVE
:
10346 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
10353 static void tg3_get_ethtool_stats(struct net_device
*dev
,
10354 struct ethtool_stats
*estats
, u64
*tmp_stats
)
10356 struct tg3
*tp
= netdev_priv(dev
);
10357 memcpy(tmp_stats
, tg3_get_estats(tp
), sizeof(tp
->estats
));
10360 static __be32
* tg3_vpd_readblock(struct tg3
*tp
)
10364 u32 offset
= 0, len
= 0;
10367 if (tg3_flag(tp
, NO_NVRAM
) || tg3_nvram_read(tp
, 0, &magic
))
10370 if (magic
== TG3_EEPROM_MAGIC
) {
10371 for (offset
= TG3_NVM_DIR_START
;
10372 offset
< TG3_NVM_DIR_END
;
10373 offset
+= TG3_NVM_DIRENT_SIZE
) {
10374 if (tg3_nvram_read(tp
, offset
, &val
))
10377 if ((val
>> TG3_NVM_DIRTYPE_SHIFT
) ==
10378 TG3_NVM_DIRTYPE_EXTVPD
)
10382 if (offset
!= TG3_NVM_DIR_END
) {
10383 len
= (val
& TG3_NVM_DIRTYPE_LENMSK
) * 4;
10384 if (tg3_nvram_read(tp
, offset
+ 4, &offset
))
10387 offset
= tg3_nvram_logical_addr(tp
, offset
);
10391 if (!offset
|| !len
) {
10392 offset
= TG3_NVM_VPD_OFF
;
10393 len
= TG3_NVM_VPD_LEN
;
10396 buf
= kmalloc(len
, GFP_KERNEL
);
10400 if (magic
== TG3_EEPROM_MAGIC
) {
10401 for (i
= 0; i
< len
; i
+= 4) {
10402 /* The data is in little-endian format in NVRAM.
10403 * Use the big-endian read routines to preserve
10404 * the byte order as it exists in NVRAM.
10406 if (tg3_nvram_read_be32(tp
, offset
+ i
, &buf
[i
/4]))
10412 unsigned int pos
= 0;
10414 ptr
= (u8
*)&buf
[0];
10415 for (i
= 0; pos
< len
&& i
< 3; i
++, pos
+= cnt
, ptr
+= cnt
) {
10416 cnt
= pci_read_vpd(tp
->pdev
, pos
,
10418 if (cnt
== -ETIMEDOUT
|| cnt
== -EINTR
)
10434 #define NVRAM_TEST_SIZE 0x100
10435 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
10436 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
10437 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
10438 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10439 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10441 static int tg3_test_nvram(struct tg3
*tp
)
10445 int i
, j
, k
, err
= 0, size
;
10447 if (tg3_flag(tp
, NO_NVRAM
))
10450 if (tg3_nvram_read(tp
, 0, &magic
) != 0)
10453 if (magic
== TG3_EEPROM_MAGIC
)
10454 size
= NVRAM_TEST_SIZE
;
10455 else if ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) == TG3_EEPROM_MAGIC_FW
) {
10456 if ((magic
& TG3_EEPROM_SB_FORMAT_MASK
) ==
10457 TG3_EEPROM_SB_FORMAT_1
) {
10458 switch (magic
& TG3_EEPROM_SB_REVISION_MASK
) {
10459 case TG3_EEPROM_SB_REVISION_0
:
10460 size
= NVRAM_SELFBOOT_FORMAT1_0_SIZE
;
10462 case TG3_EEPROM_SB_REVISION_2
:
10463 size
= NVRAM_SELFBOOT_FORMAT1_2_SIZE
;
10465 case TG3_EEPROM_SB_REVISION_3
:
10466 size
= NVRAM_SELFBOOT_FORMAT1_3_SIZE
;
10473 } else if ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) == TG3_EEPROM_MAGIC_HW
)
10474 size
= NVRAM_SELFBOOT_HW_SIZE
;
10478 buf
= kmalloc(size
, GFP_KERNEL
);
10483 for (i
= 0, j
= 0; i
< size
; i
+= 4, j
++) {
10484 err
= tg3_nvram_read_be32(tp
, i
, &buf
[j
]);
10491 /* Selfboot format */
10492 magic
= be32_to_cpu(buf
[0]);
10493 if ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) ==
10494 TG3_EEPROM_MAGIC_FW
) {
10495 u8
*buf8
= (u8
*) buf
, csum8
= 0;
10497 if ((magic
& TG3_EEPROM_SB_REVISION_MASK
) ==
10498 TG3_EEPROM_SB_REVISION_2
) {
10499 /* For rev 2, the csum doesn't include the MBA. */
10500 for (i
= 0; i
< TG3_EEPROM_SB_F1R2_MBA_OFF
; i
++)
10502 for (i
= TG3_EEPROM_SB_F1R2_MBA_OFF
+ 4; i
< size
; i
++)
10505 for (i
= 0; i
< size
; i
++)
10518 if ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) ==
10519 TG3_EEPROM_MAGIC_HW
) {
10520 u8 data
[NVRAM_SELFBOOT_DATA_SIZE
];
10521 u8 parity
[NVRAM_SELFBOOT_DATA_SIZE
];
10522 u8
*buf8
= (u8
*) buf
;
10524 /* Separate the parity bits and the data bytes. */
10525 for (i
= 0, j
= 0, k
= 0; i
< NVRAM_SELFBOOT_HW_SIZE
; i
++) {
10526 if ((i
== 0) || (i
== 8)) {
10530 for (l
= 0, msk
= 0x80; l
< 7; l
++, msk
>>= 1)
10531 parity
[k
++] = buf8
[i
] & msk
;
10533 } else if (i
== 16) {
10537 for (l
= 0, msk
= 0x20; l
< 6; l
++, msk
>>= 1)
10538 parity
[k
++] = buf8
[i
] & msk
;
10541 for (l
= 0, msk
= 0x80; l
< 8; l
++, msk
>>= 1)
10542 parity
[k
++] = buf8
[i
] & msk
;
10545 data
[j
++] = buf8
[i
];
10549 for (i
= 0; i
< NVRAM_SELFBOOT_DATA_SIZE
; i
++) {
10550 u8 hw8
= hweight8(data
[i
]);
10552 if ((hw8
& 0x1) && parity
[i
])
10554 else if (!(hw8
& 0x1) && !parity
[i
])
10563 /* Bootstrap checksum at offset 0x10 */
10564 csum
= calc_crc((unsigned char *) buf
, 0x10);
10565 if (csum
!= le32_to_cpu(buf
[0x10/4]))
10568 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10569 csum
= calc_crc((unsigned char *) &buf
[0x74/4], 0x88);
10570 if (csum
!= le32_to_cpu(buf
[0xfc/4]))
10575 buf
= tg3_vpd_readblock(tp
);
10579 i
= pci_vpd_find_tag((u8
*)buf
, 0, TG3_NVM_VPD_LEN
,
10580 PCI_VPD_LRDT_RO_DATA
);
10582 j
= pci_vpd_lrdt_size(&((u8
*)buf
)[i
]);
10586 if (i
+ PCI_VPD_LRDT_TAG_SIZE
+ j
> TG3_NVM_VPD_LEN
)
10589 i
+= PCI_VPD_LRDT_TAG_SIZE
;
10590 j
= pci_vpd_find_info_keyword((u8
*)buf
, i
, j
,
10591 PCI_VPD_RO_KEYWORD_CHKSUM
);
10595 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
10597 for (i
= 0; i
<= j
; i
++)
10598 csum8
+= ((u8
*)buf
)[i
];
10612 #define TG3_SERDES_TIMEOUT_SEC 2
10613 #define TG3_COPPER_TIMEOUT_SEC 6
10615 static int tg3_test_link(struct tg3
*tp
)
10619 if (!netif_running(tp
->dev
))
10622 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
10623 max
= TG3_SERDES_TIMEOUT_SEC
;
10625 max
= TG3_COPPER_TIMEOUT_SEC
;
10627 for (i
= 0; i
< max
; i
++) {
10628 if (netif_carrier_ok(tp
->dev
))
10631 if (msleep_interruptible(1000))
10638 /* Only test the commonly used registers */
10639 static int tg3_test_registers(struct tg3
*tp
)
10641 int i
, is_5705
, is_5750
;
10642 u32 offset
, read_mask
, write_mask
, val
, save_val
, read_val
;
10646 #define TG3_FL_5705 0x1
10647 #define TG3_FL_NOT_5705 0x2
10648 #define TG3_FL_NOT_5788 0x4
10649 #define TG3_FL_NOT_5750 0x8
10653 /* MAC Control Registers */
10654 { MAC_MODE
, TG3_FL_NOT_5705
,
10655 0x00000000, 0x00ef6f8c },
10656 { MAC_MODE
, TG3_FL_5705
,
10657 0x00000000, 0x01ef6b8c },
10658 { MAC_STATUS
, TG3_FL_NOT_5705
,
10659 0x03800107, 0x00000000 },
10660 { MAC_STATUS
, TG3_FL_5705
,
10661 0x03800100, 0x00000000 },
10662 { MAC_ADDR_0_HIGH
, 0x0000,
10663 0x00000000, 0x0000ffff },
10664 { MAC_ADDR_0_LOW
, 0x0000,
10665 0x00000000, 0xffffffff },
10666 { MAC_RX_MTU_SIZE
, 0x0000,
10667 0x00000000, 0x0000ffff },
10668 { MAC_TX_MODE
, 0x0000,
10669 0x00000000, 0x00000070 },
10670 { MAC_TX_LENGTHS
, 0x0000,
10671 0x00000000, 0x00003fff },
10672 { MAC_RX_MODE
, TG3_FL_NOT_5705
,
10673 0x00000000, 0x000007fc },
10674 { MAC_RX_MODE
, TG3_FL_5705
,
10675 0x00000000, 0x000007dc },
10676 { MAC_HASH_REG_0
, 0x0000,
10677 0x00000000, 0xffffffff },
10678 { MAC_HASH_REG_1
, 0x0000,
10679 0x00000000, 0xffffffff },
10680 { MAC_HASH_REG_2
, 0x0000,
10681 0x00000000, 0xffffffff },
10682 { MAC_HASH_REG_3
, 0x0000,
10683 0x00000000, 0xffffffff },
10685 /* Receive Data and Receive BD Initiator Control Registers. */
10686 { RCVDBDI_JUMBO_BD
+0, TG3_FL_NOT_5705
,
10687 0x00000000, 0xffffffff },
10688 { RCVDBDI_JUMBO_BD
+4, TG3_FL_NOT_5705
,
10689 0x00000000, 0xffffffff },
10690 { RCVDBDI_JUMBO_BD
+8, TG3_FL_NOT_5705
,
10691 0x00000000, 0x00000003 },
10692 { RCVDBDI_JUMBO_BD
+0xc, TG3_FL_NOT_5705
,
10693 0x00000000, 0xffffffff },
10694 { RCVDBDI_STD_BD
+0, 0x0000,
10695 0x00000000, 0xffffffff },
10696 { RCVDBDI_STD_BD
+4, 0x0000,
10697 0x00000000, 0xffffffff },
10698 { RCVDBDI_STD_BD
+8, 0x0000,
10699 0x00000000, 0xffff0002 },
10700 { RCVDBDI_STD_BD
+0xc, 0x0000,
10701 0x00000000, 0xffffffff },
10703 /* Receive BD Initiator Control Registers. */
10704 { RCVBDI_STD_THRESH
, TG3_FL_NOT_5705
,
10705 0x00000000, 0xffffffff },
10706 { RCVBDI_STD_THRESH
, TG3_FL_5705
,
10707 0x00000000, 0x000003ff },
10708 { RCVBDI_JUMBO_THRESH
, TG3_FL_NOT_5705
,
10709 0x00000000, 0xffffffff },
10711 /* Host Coalescing Control Registers. */
10712 { HOSTCC_MODE
, TG3_FL_NOT_5705
,
10713 0x00000000, 0x00000004 },
10714 { HOSTCC_MODE
, TG3_FL_5705
,
10715 0x00000000, 0x000000f6 },
10716 { HOSTCC_RXCOL_TICKS
, TG3_FL_NOT_5705
,
10717 0x00000000, 0xffffffff },
10718 { HOSTCC_RXCOL_TICKS
, TG3_FL_5705
,
10719 0x00000000, 0x000003ff },
10720 { HOSTCC_TXCOL_TICKS
, TG3_FL_NOT_5705
,
10721 0x00000000, 0xffffffff },
10722 { HOSTCC_TXCOL_TICKS
, TG3_FL_5705
,
10723 0x00000000, 0x000003ff },
10724 { HOSTCC_RXMAX_FRAMES
, TG3_FL_NOT_5705
,
10725 0x00000000, 0xffffffff },
10726 { HOSTCC_RXMAX_FRAMES
, TG3_FL_5705
| TG3_FL_NOT_5788
,
10727 0x00000000, 0x000000ff },
10728 { HOSTCC_TXMAX_FRAMES
, TG3_FL_NOT_5705
,
10729 0x00000000, 0xffffffff },
10730 { HOSTCC_TXMAX_FRAMES
, TG3_FL_5705
| TG3_FL_NOT_5788
,
10731 0x00000000, 0x000000ff },
10732 { HOSTCC_RXCOAL_TICK_INT
, TG3_FL_NOT_5705
,
10733 0x00000000, 0xffffffff },
10734 { HOSTCC_TXCOAL_TICK_INT
, TG3_FL_NOT_5705
,
10735 0x00000000, 0xffffffff },
10736 { HOSTCC_RXCOAL_MAXF_INT
, TG3_FL_NOT_5705
,
10737 0x00000000, 0xffffffff },
10738 { HOSTCC_RXCOAL_MAXF_INT
, TG3_FL_5705
| TG3_FL_NOT_5788
,
10739 0x00000000, 0x000000ff },
10740 { HOSTCC_TXCOAL_MAXF_INT
, TG3_FL_NOT_5705
,
10741 0x00000000, 0xffffffff },
10742 { HOSTCC_TXCOAL_MAXF_INT
, TG3_FL_5705
| TG3_FL_NOT_5788
,
10743 0x00000000, 0x000000ff },
10744 { HOSTCC_STAT_COAL_TICKS
, TG3_FL_NOT_5705
,
10745 0x00000000, 0xffffffff },
10746 { HOSTCC_STATS_BLK_HOST_ADDR
, TG3_FL_NOT_5705
,
10747 0x00000000, 0xffffffff },
10748 { HOSTCC_STATS_BLK_HOST_ADDR
+4, TG3_FL_NOT_5705
,
10749 0x00000000, 0xffffffff },
10750 { HOSTCC_STATUS_BLK_HOST_ADDR
, 0x0000,
10751 0x00000000, 0xffffffff },
10752 { HOSTCC_STATUS_BLK_HOST_ADDR
+4, 0x0000,
10753 0x00000000, 0xffffffff },
10754 { HOSTCC_STATS_BLK_NIC_ADDR
, 0x0000,
10755 0xffffffff, 0x00000000 },
10756 { HOSTCC_STATUS_BLK_NIC_ADDR
, 0x0000,
10757 0xffffffff, 0x00000000 },
10759 /* Buffer Manager Control Registers. */
10760 { BUFMGR_MB_POOL_ADDR
, TG3_FL_NOT_5750
,
10761 0x00000000, 0x007fff80 },
10762 { BUFMGR_MB_POOL_SIZE
, TG3_FL_NOT_5750
,
10763 0x00000000, 0x007fffff },
10764 { BUFMGR_MB_RDMA_LOW_WATER
, 0x0000,
10765 0x00000000, 0x0000003f },
10766 { BUFMGR_MB_MACRX_LOW_WATER
, 0x0000,
10767 0x00000000, 0x000001ff },
10768 { BUFMGR_MB_HIGH_WATER
, 0x0000,
10769 0x00000000, 0x000001ff },
10770 { BUFMGR_DMA_DESC_POOL_ADDR
, TG3_FL_NOT_5705
,
10771 0xffffffff, 0x00000000 },
10772 { BUFMGR_DMA_DESC_POOL_SIZE
, TG3_FL_NOT_5705
,
10773 0xffffffff, 0x00000000 },
10775 /* Mailbox Registers */
10776 { GRCMBOX_RCVSTD_PROD_IDX
+4, 0x0000,
10777 0x00000000, 0x000001ff },
10778 { GRCMBOX_RCVJUMBO_PROD_IDX
+4, TG3_FL_NOT_5705
,
10779 0x00000000, 0x000001ff },
10780 { GRCMBOX_RCVRET_CON_IDX_0
+4, 0x0000,
10781 0x00000000, 0x000007ff },
10782 { GRCMBOX_SNDHOST_PROD_IDX_0
+4, 0x0000,
10783 0x00000000, 0x000001ff },
10785 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
10788 is_5705
= is_5750
= 0;
10789 if (tg3_flag(tp
, 5705_PLUS
)) {
10791 if (tg3_flag(tp
, 5750_PLUS
))
10795 for (i
= 0; reg_tbl
[i
].offset
!= 0xffff; i
++) {
10796 if (is_5705
&& (reg_tbl
[i
].flags
& TG3_FL_NOT_5705
))
10799 if (!is_5705
&& (reg_tbl
[i
].flags
& TG3_FL_5705
))
10802 if (tg3_flag(tp
, IS_5788
) &&
10803 (reg_tbl
[i
].flags
& TG3_FL_NOT_5788
))
10806 if (is_5750
&& (reg_tbl
[i
].flags
& TG3_FL_NOT_5750
))
10809 offset
= (u32
) reg_tbl
[i
].offset
;
10810 read_mask
= reg_tbl
[i
].read_mask
;
10811 write_mask
= reg_tbl
[i
].write_mask
;
10813 /* Save the original register content */
10814 save_val
= tr32(offset
);
10816 /* Determine the read-only value. */
10817 read_val
= save_val
& read_mask
;
10819 /* Write zero to the register, then make sure the read-only bits
10820 * are not changed and the read/write bits are all zeros.
10824 val
= tr32(offset
);
10826 /* Test the read-only and read/write bits. */
10827 if (((val
& read_mask
) != read_val
) || (val
& write_mask
))
10830 /* Write ones to all the bits defined by RdMask and WrMask, then
10831 * make sure the read-only bits are not changed and the
10832 * read/write bits are all ones.
10834 tw32(offset
, read_mask
| write_mask
);
10836 val
= tr32(offset
);
10838 /* Test the read-only bits. */
10839 if ((val
& read_mask
) != read_val
)
10842 /* Test the read/write bits. */
10843 if ((val
& write_mask
) != write_mask
)
10846 tw32(offset
, save_val
);
10852 if (netif_msg_hw(tp
))
10853 netdev_err(tp
->dev
,
10854 "Register test failed at offset %x\n", offset
);
10855 tw32(offset
, save_val
);
10859 static int tg3_do_mem_test(struct tg3
*tp
, u32 offset
, u32 len
)
10861 static const u32 test_pattern
[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
10865 for (i
= 0; i
< ARRAY_SIZE(test_pattern
); i
++) {
10866 for (j
= 0; j
< len
; j
+= 4) {
10869 tg3_write_mem(tp
, offset
+ j
, test_pattern
[i
]);
10870 tg3_read_mem(tp
, offset
+ j
, &val
);
10871 if (val
!= test_pattern
[i
])
10878 static int tg3_test_memory(struct tg3
*tp
)
10880 static struct mem_entry
{
10883 } mem_tbl_570x
[] = {
10884 { 0x00000000, 0x00b50},
10885 { 0x00002000, 0x1c000},
10886 { 0xffffffff, 0x00000}
10887 }, mem_tbl_5705
[] = {
10888 { 0x00000100, 0x0000c},
10889 { 0x00000200, 0x00008},
10890 { 0x00004000, 0x00800},
10891 { 0x00006000, 0x01000},
10892 { 0x00008000, 0x02000},
10893 { 0x00010000, 0x0e000},
10894 { 0xffffffff, 0x00000}
10895 }, mem_tbl_5755
[] = {
10896 { 0x00000200, 0x00008},
10897 { 0x00004000, 0x00800},
10898 { 0x00006000, 0x00800},
10899 { 0x00008000, 0x02000},
10900 { 0x00010000, 0x0c000},
10901 { 0xffffffff, 0x00000}
10902 }, mem_tbl_5906
[] = {
10903 { 0x00000200, 0x00008},
10904 { 0x00004000, 0x00400},
10905 { 0x00006000, 0x00400},
10906 { 0x00008000, 0x01000},
10907 { 0x00010000, 0x01000},
10908 { 0xffffffff, 0x00000}
10909 }, mem_tbl_5717
[] = {
10910 { 0x00000200, 0x00008},
10911 { 0x00010000, 0x0a000},
10912 { 0x00020000, 0x13c00},
10913 { 0xffffffff, 0x00000}
10914 }, mem_tbl_57765
[] = {
10915 { 0x00000200, 0x00008},
10916 { 0x00004000, 0x00800},
10917 { 0x00006000, 0x09800},
10918 { 0x00010000, 0x0a000},
10919 { 0xffffffff, 0x00000}
10921 struct mem_entry
*mem_tbl
;
10925 if (tg3_flag(tp
, 5717_PLUS
))
10926 mem_tbl
= mem_tbl_5717
;
10927 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
10928 mem_tbl
= mem_tbl_57765
;
10929 else if (tg3_flag(tp
, 5755_PLUS
))
10930 mem_tbl
= mem_tbl_5755
;
10931 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
10932 mem_tbl
= mem_tbl_5906
;
10933 else if (tg3_flag(tp
, 5705_PLUS
))
10934 mem_tbl
= mem_tbl_5705
;
10936 mem_tbl
= mem_tbl_570x
;
10938 for (i
= 0; mem_tbl
[i
].offset
!= 0xffffffff; i
++) {
10939 err
= tg3_do_mem_test(tp
, mem_tbl
[i
].offset
, mem_tbl
[i
].len
);
10947 #define TG3_MAC_LOOPBACK 0
10948 #define TG3_PHY_LOOPBACK 1
10949 #define TG3_TSO_LOOPBACK 2
10951 #define TG3_TSO_MSS 500
10953 #define TG3_TSO_IP_HDR_LEN 20
10954 #define TG3_TSO_TCP_HDR_LEN 20
10955 #define TG3_TSO_TCP_OPT_LEN 12
10957 static const u8 tg3_tso_header
[] = {
10959 0x45, 0x00, 0x00, 0x00,
10960 0x00, 0x00, 0x40, 0x00,
10961 0x40, 0x06, 0x00, 0x00,
10962 0x0a, 0x00, 0x00, 0x01,
10963 0x0a, 0x00, 0x00, 0x02,
10964 0x0d, 0x00, 0xe0, 0x00,
10965 0x00, 0x00, 0x01, 0x00,
10966 0x00, 0x00, 0x02, 0x00,
10967 0x80, 0x10, 0x10, 0x00,
10968 0x14, 0x09, 0x00, 0x00,
10969 0x01, 0x01, 0x08, 0x0a,
10970 0x11, 0x11, 0x11, 0x11,
10971 0x11, 0x11, 0x11, 0x11,
10974 static int tg3_run_loopback(struct tg3
*tp
, u32 pktsz
, int loopback_mode
)
10976 u32 mac_mode
, rx_start_idx
, rx_idx
, tx_idx
, opaque_key
;
10977 u32 base_flags
= 0, mss
= 0, desc_idx
, coal_now
, data_off
, val
;
10978 struct sk_buff
*skb
, *rx_skb
;
10981 int num_pkts
, tx_len
, rx_len
, i
, err
;
10982 struct tg3_rx_buffer_desc
*desc
;
10983 struct tg3_napi
*tnapi
, *rnapi
;
10984 struct tg3_rx_prodring_set
*tpr
= &tp
->napi
[0].prodring
;
10986 tnapi
= &tp
->napi
[0];
10987 rnapi
= &tp
->napi
[0];
10988 if (tp
->irq_cnt
> 1) {
10989 if (tg3_flag(tp
, ENABLE_RSS
))
10990 rnapi
= &tp
->napi
[1];
10991 if (tg3_flag(tp
, ENABLE_TSS
))
10992 tnapi
= &tp
->napi
[1];
10994 coal_now
= tnapi
->coal_now
| rnapi
->coal_now
;
10996 if (loopback_mode
== TG3_MAC_LOOPBACK
) {
10997 /* HW errata - mac loopback fails in some cases on 5780.
10998 * Normal traffic and PHY loopback are not affected by
10999 * errata. Also, the MAC loopback test is deprecated for
11000 * all newer ASIC revisions.
11002 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5780
||
11003 tg3_flag(tp
, CPMU_PRESENT
))
11006 mac_mode
= tp
->mac_mode
&
11007 ~(MAC_MODE_PORT_MODE_MASK
| MAC_MODE_HALF_DUPLEX
);
11008 mac_mode
|= MAC_MODE_PORT_INT_LPBACK
;
11009 if (!tg3_flag(tp
, 5705_PLUS
))
11010 mac_mode
|= MAC_MODE_LINK_POLARITY
;
11011 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
11012 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
11014 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
11015 tw32(MAC_MODE
, mac_mode
);
11017 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
11018 tg3_phy_fet_toggle_apd(tp
, false);
11019 val
= BMCR_LOOPBACK
| BMCR_FULLDPLX
| BMCR_SPEED100
;
11021 val
= BMCR_LOOPBACK
| BMCR_FULLDPLX
| BMCR_SPEED1000
;
11023 tg3_phy_toggle_automdix(tp
, 0);
11025 tg3_writephy(tp
, MII_BMCR
, val
);
11028 mac_mode
= tp
->mac_mode
&
11029 ~(MAC_MODE_PORT_MODE_MASK
| MAC_MODE_HALF_DUPLEX
);
11030 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
11031 tg3_writephy(tp
, MII_TG3_FET_PTEST
,
11032 MII_TG3_FET_PTEST_FRC_TX_LINK
|
11033 MII_TG3_FET_PTEST_FRC_TX_LOCK
);
11034 /* The write needs to be flushed for the AC131 */
11035 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
)
11036 tg3_readphy(tp
, MII_TG3_FET_PTEST
, &val
);
11037 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
11039 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
11041 /* reset to prevent losing 1st rx packet intermittently */
11042 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) {
11043 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
11045 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
11047 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
) {
11048 u32 masked_phy_id
= tp
->phy_id
& TG3_PHY_ID_MASK
;
11049 if (masked_phy_id
== TG3_PHY_ID_BCM5401
)
11050 mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
11051 else if (masked_phy_id
== TG3_PHY_ID_BCM5411
)
11052 mac_mode
|= MAC_MODE_LINK_POLARITY
;
11053 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
11054 MII_TG3_EXT_CTRL_LNK3_LED_MODE
);
11056 tw32(MAC_MODE
, mac_mode
);
11058 /* Wait for link */
11059 for (i
= 0; i
< 100; i
++) {
11060 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
11069 skb
= netdev_alloc_skb(tp
->dev
, tx_len
);
11073 tx_data
= skb_put(skb
, tx_len
);
11074 memcpy(tx_data
, tp
->dev
->dev_addr
, 6);
11075 memset(tx_data
+ 6, 0x0, 8);
11077 tw32(MAC_RX_MTU_SIZE
, tx_len
+ ETH_FCS_LEN
);
11079 if (loopback_mode
== TG3_TSO_LOOPBACK
) {
11080 struct iphdr
*iph
= (struct iphdr
*)&tx_data
[ETH_HLEN
];
11082 u32 hdr_len
= TG3_TSO_IP_HDR_LEN
+ TG3_TSO_TCP_HDR_LEN
+
11083 TG3_TSO_TCP_OPT_LEN
;
11085 memcpy(tx_data
+ ETH_ALEN
* 2, tg3_tso_header
,
11086 sizeof(tg3_tso_header
));
11089 val
= tx_len
- ETH_ALEN
* 2 - sizeof(tg3_tso_header
);
11090 num_pkts
= DIV_ROUND_UP(val
, TG3_TSO_MSS
);
11092 /* Set the total length field in the IP header */
11093 iph
->tot_len
= htons((u16
)(mss
+ hdr_len
));
11095 base_flags
= (TXD_FLAG_CPU_PRE_DMA
|
11096 TXD_FLAG_CPU_POST_DMA
);
11098 if (tg3_flag(tp
, HW_TSO_1
) ||
11099 tg3_flag(tp
, HW_TSO_2
) ||
11100 tg3_flag(tp
, HW_TSO_3
)) {
11102 val
= ETH_HLEN
+ TG3_TSO_IP_HDR_LEN
;
11103 th
= (struct tcphdr
*)&tx_data
[val
];
11106 base_flags
|= TXD_FLAG_TCPUDP_CSUM
;
11108 if (tg3_flag(tp
, HW_TSO_3
)) {
11109 mss
|= (hdr_len
& 0xc) << 12;
11110 if (hdr_len
& 0x10)
11111 base_flags
|= 0x00000010;
11112 base_flags
|= (hdr_len
& 0x3e0) << 5;
11113 } else if (tg3_flag(tp
, HW_TSO_2
))
11114 mss
|= hdr_len
<< 9;
11115 else if (tg3_flag(tp
, HW_TSO_1
) ||
11116 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
11117 mss
|= (TG3_TSO_TCP_OPT_LEN
<< 9);
11119 base_flags
|= (TG3_TSO_TCP_OPT_LEN
<< 10);
11122 data_off
= ETH_ALEN
* 2 + sizeof(tg3_tso_header
);
11125 data_off
= ETH_HLEN
;
11128 for (i
= data_off
; i
< tx_len
; i
++)
11129 tx_data
[i
] = (u8
) (i
& 0xff);
11131 map
= pci_map_single(tp
->pdev
, skb
->data
, tx_len
, PCI_DMA_TODEVICE
);
11132 if (pci_dma_mapping_error(tp
->pdev
, map
)) {
11133 dev_kfree_skb(skb
);
11137 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
11142 rx_start_idx
= rnapi
->hw_status
->idx
[0].rx_producer
;
11144 tg3_set_txd(tnapi
, tnapi
->tx_prod
, map
, tx_len
,
11145 base_flags
, (mss
<< 1) | 1);
11149 tw32_tx_mbox(tnapi
->prodmbox
, tnapi
->tx_prod
);
11150 tr32_mailbox(tnapi
->prodmbox
);
11154 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
11155 for (i
= 0; i
< 35; i
++) {
11156 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
11161 tx_idx
= tnapi
->hw_status
->idx
[0].tx_consumer
;
11162 rx_idx
= rnapi
->hw_status
->idx
[0].rx_producer
;
11163 if ((tx_idx
== tnapi
->tx_prod
) &&
11164 (rx_idx
== (rx_start_idx
+ num_pkts
)))
11168 pci_unmap_single(tp
->pdev
, map
, tx_len
, PCI_DMA_TODEVICE
);
11169 dev_kfree_skb(skb
);
11171 if (tx_idx
!= tnapi
->tx_prod
)
11174 if (rx_idx
!= rx_start_idx
+ num_pkts
)
11178 while (rx_idx
!= rx_start_idx
) {
11179 desc
= &rnapi
->rx_rcb
[rx_start_idx
++];
11180 desc_idx
= desc
->opaque
& RXD_OPAQUE_INDEX_MASK
;
11181 opaque_key
= desc
->opaque
& RXD_OPAQUE_RING_MASK
;
11183 if ((desc
->err_vlan
& RXD_ERR_MASK
) != 0 &&
11184 (desc
->err_vlan
!= RXD_ERR_ODD_NIBBLE_RCVD_MII
))
11187 rx_len
= ((desc
->idx_len
& RXD_LEN_MASK
) >> RXD_LEN_SHIFT
)
11190 if (loopback_mode
!= TG3_TSO_LOOPBACK
) {
11191 if (rx_len
!= tx_len
)
11194 if (pktsz
<= TG3_RX_STD_DMA_SZ
- ETH_FCS_LEN
) {
11195 if (opaque_key
!= RXD_OPAQUE_RING_STD
)
11198 if (opaque_key
!= RXD_OPAQUE_RING_JUMBO
)
11201 } else if ((desc
->type_flags
& RXD_FLAG_TCPUDP_CSUM
) &&
11202 (desc
->ip_tcp_csum
& RXD_TCPCSUM_MASK
)
11203 >> RXD_TCPCSUM_SHIFT
!= 0xffff) {
11207 if (opaque_key
== RXD_OPAQUE_RING_STD
) {
11208 rx_skb
= tpr
->rx_std_buffers
[desc_idx
].skb
;
11209 map
= dma_unmap_addr(&tpr
->rx_std_buffers
[desc_idx
],
11211 } else if (opaque_key
== RXD_OPAQUE_RING_JUMBO
) {
11212 rx_skb
= tpr
->rx_jmb_buffers
[desc_idx
].skb
;
11213 map
= dma_unmap_addr(&tpr
->rx_jmb_buffers
[desc_idx
],
11218 pci_dma_sync_single_for_cpu(tp
->pdev
, map
, rx_len
,
11219 PCI_DMA_FROMDEVICE
);
11221 for (i
= data_off
; i
< rx_len
; i
++, val
++) {
11222 if (*(rx_skb
->data
+ i
) != (u8
) (val
& 0xff))
11229 /* tg3_free_rings will unmap and free the rx_skb */
11234 #define TG3_STD_LOOPBACK_FAILED 1
11235 #define TG3_JMB_LOOPBACK_FAILED 2
11236 #define TG3_TSO_LOOPBACK_FAILED 4
11238 #define TG3_MAC_LOOPBACK_SHIFT 0
11239 #define TG3_PHY_LOOPBACK_SHIFT 4
11240 #define TG3_LOOPBACK_FAILED 0x00000077
11242 static int tg3_test_loopback(struct tg3
*tp
)
11245 u32 eee_cap
, cpmuctrl
= 0;
11247 if (!netif_running(tp
->dev
))
11248 return TG3_LOOPBACK_FAILED
;
11250 eee_cap
= tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
;
11251 tp
->phy_flags
&= ~TG3_PHYFLG_EEE_CAP
;
11253 err
= tg3_reset_hw(tp
, 1);
11255 err
= TG3_LOOPBACK_FAILED
;
11259 if (tg3_flag(tp
, ENABLE_RSS
)) {
11262 /* Reroute all rx packets to the 1st queue */
11263 for (i
= MAC_RSS_INDIR_TBL_0
;
11264 i
< MAC_RSS_INDIR_TBL_0
+ TG3_RSS_INDIR_TBL_SIZE
; i
+= 4)
11268 /* Turn off gphy autopowerdown. */
11269 if (tp
->phy_flags
& TG3_PHYFLG_ENABLE_APD
)
11270 tg3_phy_toggle_apd(tp
, false);
11272 if (tg3_flag(tp
, CPMU_PRESENT
)) {
11276 tw32(TG3_CPMU_MUTEX_REQ
, CPMU_MUTEX_REQ_DRIVER
);
11278 /* Wait for up to 40 microseconds to acquire lock. */
11279 for (i
= 0; i
< 4; i
++) {
11280 status
= tr32(TG3_CPMU_MUTEX_GNT
);
11281 if (status
== CPMU_MUTEX_GNT_DRIVER
)
11286 if (status
!= CPMU_MUTEX_GNT_DRIVER
) {
11287 err
= TG3_LOOPBACK_FAILED
;
11291 /* Turn off link-based power management. */
11292 cpmuctrl
= tr32(TG3_CPMU_CTRL
);
11293 tw32(TG3_CPMU_CTRL
,
11294 cpmuctrl
& ~(CPMU_CTRL_LINK_SPEED_MODE
|
11295 CPMU_CTRL_LINK_AWARE_MODE
));
11298 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, TG3_MAC_LOOPBACK
))
11299 err
|= TG3_STD_LOOPBACK_FAILED
<< TG3_MAC_LOOPBACK_SHIFT
;
11301 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
11302 tg3_run_loopback(tp
, 9000 + ETH_HLEN
, TG3_MAC_LOOPBACK
))
11303 err
|= TG3_JMB_LOOPBACK_FAILED
<< TG3_MAC_LOOPBACK_SHIFT
;
11305 if (tg3_flag(tp
, CPMU_PRESENT
)) {
11306 tw32(TG3_CPMU_CTRL
, cpmuctrl
);
11308 /* Release the mutex */
11309 tw32(TG3_CPMU_MUTEX_GNT
, CPMU_MUTEX_GNT_DRIVER
);
11312 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
11313 !tg3_flag(tp
, USE_PHYLIB
)) {
11314 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, TG3_PHY_LOOPBACK
))
11315 err
|= TG3_STD_LOOPBACK_FAILED
<<
11316 TG3_PHY_LOOPBACK_SHIFT
;
11317 if (tg3_flag(tp
, TSO_CAPABLE
) &&
11318 tg3_run_loopback(tp
, ETH_FRAME_LEN
, TG3_TSO_LOOPBACK
))
11319 err
|= TG3_TSO_LOOPBACK_FAILED
<<
11320 TG3_PHY_LOOPBACK_SHIFT
;
11321 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
11322 tg3_run_loopback(tp
, 9000 + ETH_HLEN
, TG3_PHY_LOOPBACK
))
11323 err
|= TG3_JMB_LOOPBACK_FAILED
<<
11324 TG3_PHY_LOOPBACK_SHIFT
;
11327 /* Re-enable gphy autopowerdown. */
11328 if (tp
->phy_flags
& TG3_PHYFLG_ENABLE_APD
)
11329 tg3_phy_toggle_apd(tp
, true);
11332 tp
->phy_flags
|= eee_cap
;
11337 static void tg3_self_test(struct net_device
*dev
, struct ethtool_test
*etest
,
11340 struct tg3
*tp
= netdev_priv(dev
);
11342 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
11345 memset(data
, 0, sizeof(u64
) * TG3_NUM_TEST
);
11347 if (tg3_test_nvram(tp
) != 0) {
11348 etest
->flags
|= ETH_TEST_FL_FAILED
;
11351 if (tg3_test_link(tp
) != 0) {
11352 etest
->flags
|= ETH_TEST_FL_FAILED
;
11355 if (etest
->flags
& ETH_TEST_FL_OFFLINE
) {
11356 int err
, err2
= 0, irq_sync
= 0;
11358 if (netif_running(dev
)) {
11360 tg3_netif_stop(tp
);
11364 tg3_full_lock(tp
, irq_sync
);
11366 tg3_halt(tp
, RESET_KIND_SUSPEND
, 1);
11367 err
= tg3_nvram_lock(tp
);
11368 tg3_halt_cpu(tp
, RX_CPU_BASE
);
11369 if (!tg3_flag(tp
, 5705_PLUS
))
11370 tg3_halt_cpu(tp
, TX_CPU_BASE
);
11372 tg3_nvram_unlock(tp
);
11374 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
11377 if (tg3_test_registers(tp
) != 0) {
11378 etest
->flags
|= ETH_TEST_FL_FAILED
;
11381 if (tg3_test_memory(tp
) != 0) {
11382 etest
->flags
|= ETH_TEST_FL_FAILED
;
11385 if ((data
[4] = tg3_test_loopback(tp
)) != 0)
11386 etest
->flags
|= ETH_TEST_FL_FAILED
;
11388 tg3_full_unlock(tp
);
11390 if (tg3_test_interrupt(tp
) != 0) {
11391 etest
->flags
|= ETH_TEST_FL_FAILED
;
11395 tg3_full_lock(tp
, 0);
11397 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11398 if (netif_running(dev
)) {
11399 tg3_flag_set(tp
, INIT_COMPLETE
);
11400 err2
= tg3_restart_hw(tp
, 1);
11402 tg3_netif_start(tp
);
11405 tg3_full_unlock(tp
);
11407 if (irq_sync
&& !err2
)
11410 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
11411 tg3_power_down(tp
);
11415 static int tg3_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
11417 struct mii_ioctl_data
*data
= if_mii(ifr
);
11418 struct tg3
*tp
= netdev_priv(dev
);
11421 if (tg3_flag(tp
, USE_PHYLIB
)) {
11422 struct phy_device
*phydev
;
11423 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
11425 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
11426 return phy_mii_ioctl(phydev
, ifr
, cmd
);
11431 data
->phy_id
= tp
->phy_addr
;
11434 case SIOCGMIIREG
: {
11437 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
11438 break; /* We have no PHY */
11440 if (!netif_running(dev
))
11443 spin_lock_bh(&tp
->lock
);
11444 err
= tg3_readphy(tp
, data
->reg_num
& 0x1f, &mii_regval
);
11445 spin_unlock_bh(&tp
->lock
);
11447 data
->val_out
= mii_regval
;
11453 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
11454 break; /* We have no PHY */
11456 if (!netif_running(dev
))
11459 spin_lock_bh(&tp
->lock
);
11460 err
= tg3_writephy(tp
, data
->reg_num
& 0x1f, data
->val_in
);
11461 spin_unlock_bh(&tp
->lock
);
11469 return -EOPNOTSUPP
;
11472 static int tg3_get_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*ec
)
11474 struct tg3
*tp
= netdev_priv(dev
);
11476 memcpy(ec
, &tp
->coal
, sizeof(*ec
));
11480 static int tg3_set_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*ec
)
11482 struct tg3
*tp
= netdev_priv(dev
);
11483 u32 max_rxcoal_tick_int
= 0, max_txcoal_tick_int
= 0;
11484 u32 max_stat_coal_ticks
= 0, min_stat_coal_ticks
= 0;
11486 if (!tg3_flag(tp
, 5705_PLUS
)) {
11487 max_rxcoal_tick_int
= MAX_RXCOAL_TICK_INT
;
11488 max_txcoal_tick_int
= MAX_TXCOAL_TICK_INT
;
11489 max_stat_coal_ticks
= MAX_STAT_COAL_TICKS
;
11490 min_stat_coal_ticks
= MIN_STAT_COAL_TICKS
;
11493 if ((ec
->rx_coalesce_usecs
> MAX_RXCOL_TICKS
) ||
11494 (ec
->tx_coalesce_usecs
> MAX_TXCOL_TICKS
) ||
11495 (ec
->rx_max_coalesced_frames
> MAX_RXMAX_FRAMES
) ||
11496 (ec
->tx_max_coalesced_frames
> MAX_TXMAX_FRAMES
) ||
11497 (ec
->rx_coalesce_usecs_irq
> max_rxcoal_tick_int
) ||
11498 (ec
->tx_coalesce_usecs_irq
> max_txcoal_tick_int
) ||
11499 (ec
->rx_max_coalesced_frames_irq
> MAX_RXCOAL_MAXF_INT
) ||
11500 (ec
->tx_max_coalesced_frames_irq
> MAX_TXCOAL_MAXF_INT
) ||
11501 (ec
->stats_block_coalesce_usecs
> max_stat_coal_ticks
) ||
11502 (ec
->stats_block_coalesce_usecs
< min_stat_coal_ticks
))
11505 /* No rx interrupts will be generated if both are zero */
11506 if ((ec
->rx_coalesce_usecs
== 0) &&
11507 (ec
->rx_max_coalesced_frames
== 0))
11510 /* No tx interrupts will be generated if both are zero */
11511 if ((ec
->tx_coalesce_usecs
== 0) &&
11512 (ec
->tx_max_coalesced_frames
== 0))
11515 /* Only copy relevant parameters, ignore all others. */
11516 tp
->coal
.rx_coalesce_usecs
= ec
->rx_coalesce_usecs
;
11517 tp
->coal
.tx_coalesce_usecs
= ec
->tx_coalesce_usecs
;
11518 tp
->coal
.rx_max_coalesced_frames
= ec
->rx_max_coalesced_frames
;
11519 tp
->coal
.tx_max_coalesced_frames
= ec
->tx_max_coalesced_frames
;
11520 tp
->coal
.rx_coalesce_usecs_irq
= ec
->rx_coalesce_usecs_irq
;
11521 tp
->coal
.tx_coalesce_usecs_irq
= ec
->tx_coalesce_usecs_irq
;
11522 tp
->coal
.rx_max_coalesced_frames_irq
= ec
->rx_max_coalesced_frames_irq
;
11523 tp
->coal
.tx_max_coalesced_frames_irq
= ec
->tx_max_coalesced_frames_irq
;
11524 tp
->coal
.stats_block_coalesce_usecs
= ec
->stats_block_coalesce_usecs
;
11526 if (netif_running(dev
)) {
11527 tg3_full_lock(tp
, 0);
11528 __tg3_set_coalesce(tp
, &tp
->coal
);
11529 tg3_full_unlock(tp
);
11534 static const struct ethtool_ops tg3_ethtool_ops
= {
11535 .get_settings
= tg3_get_settings
,
11536 .set_settings
= tg3_set_settings
,
11537 .get_drvinfo
= tg3_get_drvinfo
,
11538 .get_regs_len
= tg3_get_regs_len
,
11539 .get_regs
= tg3_get_regs
,
11540 .get_wol
= tg3_get_wol
,
11541 .set_wol
= tg3_set_wol
,
11542 .get_msglevel
= tg3_get_msglevel
,
11543 .set_msglevel
= tg3_set_msglevel
,
11544 .nway_reset
= tg3_nway_reset
,
11545 .get_link
= ethtool_op_get_link
,
11546 .get_eeprom_len
= tg3_get_eeprom_len
,
11547 .get_eeprom
= tg3_get_eeprom
,
11548 .set_eeprom
= tg3_set_eeprom
,
11549 .get_ringparam
= tg3_get_ringparam
,
11550 .set_ringparam
= tg3_set_ringparam
,
11551 .get_pauseparam
= tg3_get_pauseparam
,
11552 .set_pauseparam
= tg3_set_pauseparam
,
11553 .self_test
= tg3_self_test
,
11554 .get_strings
= tg3_get_strings
,
11555 .set_phys_id
= tg3_set_phys_id
,
11556 .get_ethtool_stats
= tg3_get_ethtool_stats
,
11557 .get_coalesce
= tg3_get_coalesce
,
11558 .set_coalesce
= tg3_set_coalesce
,
11559 .get_sset_count
= tg3_get_sset_count
,
11562 static void __devinit
tg3_get_eeprom_size(struct tg3
*tp
)
11564 u32 cursize
, val
, magic
;
11566 tp
->nvram_size
= EEPROM_CHIP_SIZE
;
11568 if (tg3_nvram_read(tp
, 0, &magic
) != 0)
11571 if ((magic
!= TG3_EEPROM_MAGIC
) &&
11572 ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) != TG3_EEPROM_MAGIC_FW
) &&
11573 ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) != TG3_EEPROM_MAGIC_HW
))
11577 * Size the chip by reading offsets at increasing powers of two.
11578 * When we encounter our validation signature, we know the addressing
11579 * has wrapped around, and thus have our chip size.
11583 while (cursize
< tp
->nvram_size
) {
11584 if (tg3_nvram_read(tp
, cursize
, &val
) != 0)
11593 tp
->nvram_size
= cursize
;
11596 static void __devinit
tg3_get_nvram_size(struct tg3
*tp
)
11600 if (tg3_flag(tp
, NO_NVRAM
) || tg3_nvram_read(tp
, 0, &val
) != 0)
11603 /* Selfboot format */
11604 if (val
!= TG3_EEPROM_MAGIC
) {
11605 tg3_get_eeprom_size(tp
);
11609 if (tg3_nvram_read(tp
, 0xf0, &val
) == 0) {
11611 /* This is confusing. We want to operate on the
11612 * 16-bit value at offset 0xf2. The tg3_nvram_read()
11613 * call will read from NVRAM and byteswap the data
11614 * according to the byteswapping settings for all
11615 * other register accesses. This ensures the data we
11616 * want will always reside in the lower 16-bits.
11617 * However, the data in NVRAM is in LE format, which
11618 * means the data from the NVRAM read will always be
11619 * opposite the endianness of the CPU. The 16-bit
11620 * byteswap then brings the data to CPU endianness.
11622 tp
->nvram_size
= swab16((u16
)(val
& 0x0000ffff)) * 1024;
11626 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
11629 static void __devinit
tg3_get_nvram_info(struct tg3
*tp
)
11633 nvcfg1
= tr32(NVRAM_CFG1
);
11634 if (nvcfg1
& NVRAM_CFG1_FLASHIF_ENAB
) {
11635 tg3_flag_set(tp
, FLASH
);
11637 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
11638 tw32(NVRAM_CFG1
, nvcfg1
);
11641 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
||
11642 tg3_flag(tp
, 5780_CLASS
)) {
11643 switch (nvcfg1
& NVRAM_CFG1_VENDOR_MASK
) {
11644 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED
:
11645 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
11646 tp
->nvram_pagesize
= ATMEL_AT45DB0X1B_PAGE_SIZE
;
11647 tg3_flag_set(tp
, NVRAM_BUFFERED
);
11649 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED
:
11650 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
11651 tp
->nvram_pagesize
= ATMEL_AT25F512_PAGE_SIZE
;
11653 case FLASH_VENDOR_ATMEL_EEPROM
:
11654 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
11655 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
11656 tg3_flag_set(tp
, NVRAM_BUFFERED
);
11658 case FLASH_VENDOR_ST
:
11659 tp
->nvram_jedecnum
= JEDEC_ST
;
11660 tp
->nvram_pagesize
= ST_M45PEX0_PAGE_SIZE
;
11661 tg3_flag_set(tp
, NVRAM_BUFFERED
);
11663 case FLASH_VENDOR_SAIFUN
:
11664 tp
->nvram_jedecnum
= JEDEC_SAIFUN
;
11665 tp
->nvram_pagesize
= SAIFUN_SA25F0XX_PAGE_SIZE
;
11667 case FLASH_VENDOR_SST_SMALL
:
11668 case FLASH_VENDOR_SST_LARGE
:
11669 tp
->nvram_jedecnum
= JEDEC_SST
;
11670 tp
->nvram_pagesize
= SST_25VF0X0_PAGE_SIZE
;
11674 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
11675 tp
->nvram_pagesize
= ATMEL_AT45DB0X1B_PAGE_SIZE
;
11676 tg3_flag_set(tp
, NVRAM_BUFFERED
);
11680 static void __devinit
tg3_nvram_get_pagesize(struct tg3
*tp
, u32 nvmcfg1
)
11682 switch (nvmcfg1
& NVRAM_CFG1_5752PAGE_SIZE_MASK
) {
11683 case FLASH_5752PAGE_SIZE_256
:
11684 tp
->nvram_pagesize
= 256;
11686 case FLASH_5752PAGE_SIZE_512
:
11687 tp
->nvram_pagesize
= 512;
11689 case FLASH_5752PAGE_SIZE_1K
:
11690 tp
->nvram_pagesize
= 1024;
11692 case FLASH_5752PAGE_SIZE_2K
:
11693 tp
->nvram_pagesize
= 2048;
11695 case FLASH_5752PAGE_SIZE_4K
:
11696 tp
->nvram_pagesize
= 4096;
11698 case FLASH_5752PAGE_SIZE_264
:
11699 tp
->nvram_pagesize
= 264;
11701 case FLASH_5752PAGE_SIZE_528
:
11702 tp
->nvram_pagesize
= 528;
11707 static void __devinit
tg3_get_5752_nvram_info(struct tg3
*tp
)
11711 nvcfg1
= tr32(NVRAM_CFG1
);
11713 /* NVRAM protection for TPM */
11714 if (nvcfg1
& (1 << 27))
11715 tg3_flag_set(tp
, PROTECTED_NVRAM
);
11717 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
11718 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ
:
11719 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ
:
11720 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
11721 tg3_flag_set(tp
, NVRAM_BUFFERED
);
11723 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
11724 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
11725 tg3_flag_set(tp
, NVRAM_BUFFERED
);
11726 tg3_flag_set(tp
, FLASH
);
11728 case FLASH_5752VENDOR_ST_M45PE10
:
11729 case FLASH_5752VENDOR_ST_M45PE20
:
11730 case FLASH_5752VENDOR_ST_M45PE40
:
11731 tp
->nvram_jedecnum
= JEDEC_ST
;
11732 tg3_flag_set(tp
, NVRAM_BUFFERED
);
11733 tg3_flag_set(tp
, FLASH
);
11737 if (tg3_flag(tp
, FLASH
)) {
11738 tg3_nvram_get_pagesize(tp
, nvcfg1
);
11740 /* For eeprom, set pagesize to maximum eeprom size */
11741 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
11743 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
11744 tw32(NVRAM_CFG1
, nvcfg1
);
11748 static void __devinit
tg3_get_5755_nvram_info(struct tg3
*tp
)
11750 u32 nvcfg1
, protect
= 0;
11752 nvcfg1
= tr32(NVRAM_CFG1
);
11754 /* NVRAM protection for TPM */
11755 if (nvcfg1
& (1 << 27)) {
11756 tg3_flag_set(tp
, PROTECTED_NVRAM
);
11760 nvcfg1
&= NVRAM_CFG1_5752VENDOR_MASK
;
11762 case FLASH_5755VENDOR_ATMEL_FLASH_1
:
11763 case FLASH_5755VENDOR_ATMEL_FLASH_2
:
11764 case FLASH_5755VENDOR_ATMEL_FLASH_3
:
11765 case FLASH_5755VENDOR_ATMEL_FLASH_5
:
11766 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
11767 tg3_flag_set(tp
, NVRAM_BUFFERED
);
11768 tg3_flag_set(tp
, FLASH
);
11769 tp
->nvram_pagesize
= 264;
11770 if (nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_1
||
11771 nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_5
)
11772 tp
->nvram_size
= (protect
? 0x3e200 :
11773 TG3_NVRAM_SIZE_512KB
);
11774 else if (nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_2
)
11775 tp
->nvram_size
= (protect
? 0x1f200 :
11776 TG3_NVRAM_SIZE_256KB
);
11778 tp
->nvram_size
= (protect
? 0x1f200 :
11779 TG3_NVRAM_SIZE_128KB
);
11781 case FLASH_5752VENDOR_ST_M45PE10
:
11782 case FLASH_5752VENDOR_ST_M45PE20
:
11783 case FLASH_5752VENDOR_ST_M45PE40
:
11784 tp
->nvram_jedecnum
= JEDEC_ST
;
11785 tg3_flag_set(tp
, NVRAM_BUFFERED
);
11786 tg3_flag_set(tp
, FLASH
);
11787 tp
->nvram_pagesize
= 256;
11788 if (nvcfg1
== FLASH_5752VENDOR_ST_M45PE10
)
11789 tp
->nvram_size
= (protect
?
11790 TG3_NVRAM_SIZE_64KB
:
11791 TG3_NVRAM_SIZE_128KB
);
11792 else if (nvcfg1
== FLASH_5752VENDOR_ST_M45PE20
)
11793 tp
->nvram_size
= (protect
?
11794 TG3_NVRAM_SIZE_64KB
:
11795 TG3_NVRAM_SIZE_256KB
);
11797 tp
->nvram_size
= (protect
?
11798 TG3_NVRAM_SIZE_128KB
:
11799 TG3_NVRAM_SIZE_512KB
);
11804 static void __devinit
tg3_get_5787_nvram_info(struct tg3
*tp
)
11808 nvcfg1
= tr32(NVRAM_CFG1
);
11810 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
11811 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ
:
11812 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ
:
11813 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ
:
11814 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ
:
11815 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
11816 tg3_flag_set(tp
, NVRAM_BUFFERED
);
11817 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
11819 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
11820 tw32(NVRAM_CFG1
, nvcfg1
);
11822 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
11823 case FLASH_5755VENDOR_ATMEL_FLASH_1
:
11824 case FLASH_5755VENDOR_ATMEL_FLASH_2
:
11825 case FLASH_5755VENDOR_ATMEL_FLASH_3
:
11826 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
11827 tg3_flag_set(tp
, NVRAM_BUFFERED
);
11828 tg3_flag_set(tp
, FLASH
);
11829 tp
->nvram_pagesize
= 264;
11831 case FLASH_5752VENDOR_ST_M45PE10
:
11832 case FLASH_5752VENDOR_ST_M45PE20
:
11833 case FLASH_5752VENDOR_ST_M45PE40
:
11834 tp
->nvram_jedecnum
= JEDEC_ST
;
11835 tg3_flag_set(tp
, NVRAM_BUFFERED
);
11836 tg3_flag_set(tp
, FLASH
);
11837 tp
->nvram_pagesize
= 256;
11842 static void __devinit
tg3_get_5761_nvram_info(struct tg3
*tp
)
11844 u32 nvcfg1
, protect
= 0;
11846 nvcfg1
= tr32(NVRAM_CFG1
);
11848 /* NVRAM protection for TPM */
11849 if (nvcfg1
& (1 << 27)) {
11850 tg3_flag_set(tp
, PROTECTED_NVRAM
);
11854 nvcfg1
&= NVRAM_CFG1_5752VENDOR_MASK
;
11856 case FLASH_5761VENDOR_ATMEL_ADB021D
:
11857 case FLASH_5761VENDOR_ATMEL_ADB041D
:
11858 case FLASH_5761VENDOR_ATMEL_ADB081D
:
11859 case FLASH_5761VENDOR_ATMEL_ADB161D
:
11860 case FLASH_5761VENDOR_ATMEL_MDB021D
:
11861 case FLASH_5761VENDOR_ATMEL_MDB041D
:
11862 case FLASH_5761VENDOR_ATMEL_MDB081D
:
11863 case FLASH_5761VENDOR_ATMEL_MDB161D
:
11864 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
11865 tg3_flag_set(tp
, NVRAM_BUFFERED
);
11866 tg3_flag_set(tp
, FLASH
);
11867 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
11868 tp
->nvram_pagesize
= 256;
11870 case FLASH_5761VENDOR_ST_A_M45PE20
:
11871 case FLASH_5761VENDOR_ST_A_M45PE40
:
11872 case FLASH_5761VENDOR_ST_A_M45PE80
:
11873 case FLASH_5761VENDOR_ST_A_M45PE16
:
11874 case FLASH_5761VENDOR_ST_M_M45PE20
:
11875 case FLASH_5761VENDOR_ST_M_M45PE40
:
11876 case FLASH_5761VENDOR_ST_M_M45PE80
:
11877 case FLASH_5761VENDOR_ST_M_M45PE16
:
11878 tp
->nvram_jedecnum
= JEDEC_ST
;
11879 tg3_flag_set(tp
, NVRAM_BUFFERED
);
11880 tg3_flag_set(tp
, FLASH
);
11881 tp
->nvram_pagesize
= 256;
11886 tp
->nvram_size
= tr32(NVRAM_ADDR_LOCKOUT
);
11889 case FLASH_5761VENDOR_ATMEL_ADB161D
:
11890 case FLASH_5761VENDOR_ATMEL_MDB161D
:
11891 case FLASH_5761VENDOR_ST_A_M45PE16
:
11892 case FLASH_5761VENDOR_ST_M_M45PE16
:
11893 tp
->nvram_size
= TG3_NVRAM_SIZE_2MB
;
11895 case FLASH_5761VENDOR_ATMEL_ADB081D
:
11896 case FLASH_5761VENDOR_ATMEL_MDB081D
:
11897 case FLASH_5761VENDOR_ST_A_M45PE80
:
11898 case FLASH_5761VENDOR_ST_M_M45PE80
:
11899 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
11901 case FLASH_5761VENDOR_ATMEL_ADB041D
:
11902 case FLASH_5761VENDOR_ATMEL_MDB041D
:
11903 case FLASH_5761VENDOR_ST_A_M45PE40
:
11904 case FLASH_5761VENDOR_ST_M_M45PE40
:
11905 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
11907 case FLASH_5761VENDOR_ATMEL_ADB021D
:
11908 case FLASH_5761VENDOR_ATMEL_MDB021D
:
11909 case FLASH_5761VENDOR_ST_A_M45PE20
:
11910 case FLASH_5761VENDOR_ST_M_M45PE20
:
11911 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
11917 static void __devinit
tg3_get_5906_nvram_info(struct tg3
*tp
)
11919 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
11920 tg3_flag_set(tp
, NVRAM_BUFFERED
);
11921 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
11924 static void __devinit
tg3_get_57780_nvram_info(struct tg3
*tp
)
11928 nvcfg1
= tr32(NVRAM_CFG1
);
11930 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
11931 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ
:
11932 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ
:
11933 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
11934 tg3_flag_set(tp
, NVRAM_BUFFERED
);
11935 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
11937 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
11938 tw32(NVRAM_CFG1
, nvcfg1
);
11940 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
11941 case FLASH_57780VENDOR_ATMEL_AT45DB011D
:
11942 case FLASH_57780VENDOR_ATMEL_AT45DB011B
:
11943 case FLASH_57780VENDOR_ATMEL_AT45DB021D
:
11944 case FLASH_57780VENDOR_ATMEL_AT45DB021B
:
11945 case FLASH_57780VENDOR_ATMEL_AT45DB041D
:
11946 case FLASH_57780VENDOR_ATMEL_AT45DB041B
:
11947 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
11948 tg3_flag_set(tp
, NVRAM_BUFFERED
);
11949 tg3_flag_set(tp
, FLASH
);
11951 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
11952 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
11953 case FLASH_57780VENDOR_ATMEL_AT45DB011D
:
11954 case FLASH_57780VENDOR_ATMEL_AT45DB011B
:
11955 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
11957 case FLASH_57780VENDOR_ATMEL_AT45DB021D
:
11958 case FLASH_57780VENDOR_ATMEL_AT45DB021B
:
11959 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
11961 case FLASH_57780VENDOR_ATMEL_AT45DB041D
:
11962 case FLASH_57780VENDOR_ATMEL_AT45DB041B
:
11963 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
11967 case FLASH_5752VENDOR_ST_M45PE10
:
11968 case FLASH_5752VENDOR_ST_M45PE20
:
11969 case FLASH_5752VENDOR_ST_M45PE40
:
11970 tp
->nvram_jedecnum
= JEDEC_ST
;
11971 tg3_flag_set(tp
, NVRAM_BUFFERED
);
11972 tg3_flag_set(tp
, FLASH
);
11974 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
11975 case FLASH_5752VENDOR_ST_M45PE10
:
11976 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
11978 case FLASH_5752VENDOR_ST_M45PE20
:
11979 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
11981 case FLASH_5752VENDOR_ST_M45PE40
:
11982 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
11987 tg3_flag_set(tp
, NO_NVRAM
);
11991 tg3_nvram_get_pagesize(tp
, nvcfg1
);
11992 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
11993 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
11997 static void __devinit
tg3_get_5717_nvram_info(struct tg3
*tp
)
12001 nvcfg1
= tr32(NVRAM_CFG1
);
12003 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
12004 case FLASH_5717VENDOR_ATMEL_EEPROM
:
12005 case FLASH_5717VENDOR_MICRO_EEPROM
:
12006 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12007 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12008 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
12010 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
12011 tw32(NVRAM_CFG1
, nvcfg1
);
12013 case FLASH_5717VENDOR_ATMEL_MDB011D
:
12014 case FLASH_5717VENDOR_ATMEL_ADB011B
:
12015 case FLASH_5717VENDOR_ATMEL_ADB011D
:
12016 case FLASH_5717VENDOR_ATMEL_MDB021D
:
12017 case FLASH_5717VENDOR_ATMEL_ADB021B
:
12018 case FLASH_5717VENDOR_ATMEL_ADB021D
:
12019 case FLASH_5717VENDOR_ATMEL_45USPT
:
12020 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12021 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12022 tg3_flag_set(tp
, FLASH
);
12024 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
12025 case FLASH_5717VENDOR_ATMEL_MDB021D
:
12026 /* Detect size with tg3_nvram_get_size() */
12028 case FLASH_5717VENDOR_ATMEL_ADB021B
:
12029 case FLASH_5717VENDOR_ATMEL_ADB021D
:
12030 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
12033 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
12037 case FLASH_5717VENDOR_ST_M_M25PE10
:
12038 case FLASH_5717VENDOR_ST_A_M25PE10
:
12039 case FLASH_5717VENDOR_ST_M_M45PE10
:
12040 case FLASH_5717VENDOR_ST_A_M45PE10
:
12041 case FLASH_5717VENDOR_ST_M_M25PE20
:
12042 case FLASH_5717VENDOR_ST_A_M25PE20
:
12043 case FLASH_5717VENDOR_ST_M_M45PE20
:
12044 case FLASH_5717VENDOR_ST_A_M45PE20
:
12045 case FLASH_5717VENDOR_ST_25USPT
:
12046 case FLASH_5717VENDOR_ST_45USPT
:
12047 tp
->nvram_jedecnum
= JEDEC_ST
;
12048 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12049 tg3_flag_set(tp
, FLASH
);
12051 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
12052 case FLASH_5717VENDOR_ST_M_M25PE20
:
12053 case FLASH_5717VENDOR_ST_M_M45PE20
:
12054 /* Detect size with tg3_nvram_get_size() */
12056 case FLASH_5717VENDOR_ST_A_M25PE20
:
12057 case FLASH_5717VENDOR_ST_A_M45PE20
:
12058 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
12061 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
12066 tg3_flag_set(tp
, NO_NVRAM
);
12070 tg3_nvram_get_pagesize(tp
, nvcfg1
);
12071 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
12072 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
12075 static void __devinit
tg3_get_5720_nvram_info(struct tg3
*tp
)
12077 u32 nvcfg1
, nvmpinstrp
;
12079 nvcfg1
= tr32(NVRAM_CFG1
);
12080 nvmpinstrp
= nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
;
12082 switch (nvmpinstrp
) {
12083 case FLASH_5720_EEPROM_HD
:
12084 case FLASH_5720_EEPROM_LD
:
12085 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12086 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12088 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
12089 tw32(NVRAM_CFG1
, nvcfg1
);
12090 if (nvmpinstrp
== FLASH_5720_EEPROM_HD
)
12091 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
12093 tp
->nvram_pagesize
= ATMEL_AT24C02_CHIP_SIZE
;
12095 case FLASH_5720VENDOR_M_ATMEL_DB011D
:
12096 case FLASH_5720VENDOR_A_ATMEL_DB011B
:
12097 case FLASH_5720VENDOR_A_ATMEL_DB011D
:
12098 case FLASH_5720VENDOR_M_ATMEL_DB021D
:
12099 case FLASH_5720VENDOR_A_ATMEL_DB021B
:
12100 case FLASH_5720VENDOR_A_ATMEL_DB021D
:
12101 case FLASH_5720VENDOR_M_ATMEL_DB041D
:
12102 case FLASH_5720VENDOR_A_ATMEL_DB041B
:
12103 case FLASH_5720VENDOR_A_ATMEL_DB041D
:
12104 case FLASH_5720VENDOR_M_ATMEL_DB081D
:
12105 case FLASH_5720VENDOR_A_ATMEL_DB081D
:
12106 case FLASH_5720VENDOR_ATMEL_45USPT
:
12107 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12108 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12109 tg3_flag_set(tp
, FLASH
);
12111 switch (nvmpinstrp
) {
12112 case FLASH_5720VENDOR_M_ATMEL_DB021D
:
12113 case FLASH_5720VENDOR_A_ATMEL_DB021B
:
12114 case FLASH_5720VENDOR_A_ATMEL_DB021D
:
12115 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
12117 case FLASH_5720VENDOR_M_ATMEL_DB041D
:
12118 case FLASH_5720VENDOR_A_ATMEL_DB041B
:
12119 case FLASH_5720VENDOR_A_ATMEL_DB041D
:
12120 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
12122 case FLASH_5720VENDOR_M_ATMEL_DB081D
:
12123 case FLASH_5720VENDOR_A_ATMEL_DB081D
:
12124 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
12127 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
12131 case FLASH_5720VENDOR_M_ST_M25PE10
:
12132 case FLASH_5720VENDOR_M_ST_M45PE10
:
12133 case FLASH_5720VENDOR_A_ST_M25PE10
:
12134 case FLASH_5720VENDOR_A_ST_M45PE10
:
12135 case FLASH_5720VENDOR_M_ST_M25PE20
:
12136 case FLASH_5720VENDOR_M_ST_M45PE20
:
12137 case FLASH_5720VENDOR_A_ST_M25PE20
:
12138 case FLASH_5720VENDOR_A_ST_M45PE20
:
12139 case FLASH_5720VENDOR_M_ST_M25PE40
:
12140 case FLASH_5720VENDOR_M_ST_M45PE40
:
12141 case FLASH_5720VENDOR_A_ST_M25PE40
:
12142 case FLASH_5720VENDOR_A_ST_M45PE40
:
12143 case FLASH_5720VENDOR_M_ST_M25PE80
:
12144 case FLASH_5720VENDOR_M_ST_M45PE80
:
12145 case FLASH_5720VENDOR_A_ST_M25PE80
:
12146 case FLASH_5720VENDOR_A_ST_M45PE80
:
12147 case FLASH_5720VENDOR_ST_25USPT
:
12148 case FLASH_5720VENDOR_ST_45USPT
:
12149 tp
->nvram_jedecnum
= JEDEC_ST
;
12150 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12151 tg3_flag_set(tp
, FLASH
);
12153 switch (nvmpinstrp
) {
12154 case FLASH_5720VENDOR_M_ST_M25PE20
:
12155 case FLASH_5720VENDOR_M_ST_M45PE20
:
12156 case FLASH_5720VENDOR_A_ST_M25PE20
:
12157 case FLASH_5720VENDOR_A_ST_M45PE20
:
12158 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
12160 case FLASH_5720VENDOR_M_ST_M25PE40
:
12161 case FLASH_5720VENDOR_M_ST_M45PE40
:
12162 case FLASH_5720VENDOR_A_ST_M25PE40
:
12163 case FLASH_5720VENDOR_A_ST_M45PE40
:
12164 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
12166 case FLASH_5720VENDOR_M_ST_M25PE80
:
12167 case FLASH_5720VENDOR_M_ST_M45PE80
:
12168 case FLASH_5720VENDOR_A_ST_M25PE80
:
12169 case FLASH_5720VENDOR_A_ST_M45PE80
:
12170 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
12173 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
12178 tg3_flag_set(tp
, NO_NVRAM
);
12182 tg3_nvram_get_pagesize(tp
, nvcfg1
);
12183 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
12184 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
12187 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12188 static void __devinit
tg3_nvram_init(struct tg3
*tp
)
12190 tw32_f(GRC_EEPROM_ADDR
,
12191 (EEPROM_ADDR_FSM_RESET
|
12192 (EEPROM_DEFAULT_CLOCK_PERIOD
<<
12193 EEPROM_ADDR_CLKPERD_SHIFT
)));
12197 /* Enable seeprom accesses. */
12198 tw32_f(GRC_LOCAL_CTRL
,
12199 tr32(GRC_LOCAL_CTRL
) | GRC_LCLCTRL_AUTO_SEEPROM
);
12202 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
12203 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
) {
12204 tg3_flag_set(tp
, NVRAM
);
12206 if (tg3_nvram_lock(tp
)) {
12207 netdev_warn(tp
->dev
,
12208 "Cannot get nvram lock, %s failed\n",
12212 tg3_enable_nvram_access(tp
);
12214 tp
->nvram_size
= 0;
12216 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
)
12217 tg3_get_5752_nvram_info(tp
);
12218 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
)
12219 tg3_get_5755_nvram_info(tp
);
12220 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
||
12221 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
12222 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
)
12223 tg3_get_5787_nvram_info(tp
);
12224 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
12225 tg3_get_5761_nvram_info(tp
);
12226 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
12227 tg3_get_5906_nvram_info(tp
);
12228 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
||
12229 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
12230 tg3_get_57780_nvram_info(tp
);
12231 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
12232 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
)
12233 tg3_get_5717_nvram_info(tp
);
12234 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
12235 tg3_get_5720_nvram_info(tp
);
12237 tg3_get_nvram_info(tp
);
12239 if (tp
->nvram_size
== 0)
12240 tg3_get_nvram_size(tp
);
12242 tg3_disable_nvram_access(tp
);
12243 tg3_nvram_unlock(tp
);
12246 tg3_flag_clear(tp
, NVRAM
);
12247 tg3_flag_clear(tp
, NVRAM_BUFFERED
);
12249 tg3_get_eeprom_size(tp
);
12253 static int tg3_nvram_write_block_using_eeprom(struct tg3
*tp
,
12254 u32 offset
, u32 len
, u8
*buf
)
12259 for (i
= 0; i
< len
; i
+= 4) {
12265 memcpy(&data
, buf
+ i
, 4);
12268 * The SEEPROM interface expects the data to always be opposite
12269 * the native endian format. We accomplish this by reversing
12270 * all the operations that would have been performed on the
12271 * data from a call to tg3_nvram_read_be32().
12273 tw32(GRC_EEPROM_DATA
, swab32(be32_to_cpu(data
)));
12275 val
= tr32(GRC_EEPROM_ADDR
);
12276 tw32(GRC_EEPROM_ADDR
, val
| EEPROM_ADDR_COMPLETE
);
12278 val
&= ~(EEPROM_ADDR_ADDR_MASK
| EEPROM_ADDR_DEVID_MASK
|
12280 tw32(GRC_EEPROM_ADDR
, val
|
12281 (0 << EEPROM_ADDR_DEVID_SHIFT
) |
12282 (addr
& EEPROM_ADDR_ADDR_MASK
) |
12283 EEPROM_ADDR_START
|
12284 EEPROM_ADDR_WRITE
);
12286 for (j
= 0; j
< 1000; j
++) {
12287 val
= tr32(GRC_EEPROM_ADDR
);
12289 if (val
& EEPROM_ADDR_COMPLETE
)
12293 if (!(val
& EEPROM_ADDR_COMPLETE
)) {
12302 /* offset and length are dword aligned */
12303 static int tg3_nvram_write_block_unbuffered(struct tg3
*tp
, u32 offset
, u32 len
,
12307 u32 pagesize
= tp
->nvram_pagesize
;
12308 u32 pagemask
= pagesize
- 1;
12312 tmp
= kmalloc(pagesize
, GFP_KERNEL
);
12318 u32 phy_addr
, page_off
, size
;
12320 phy_addr
= offset
& ~pagemask
;
12322 for (j
= 0; j
< pagesize
; j
+= 4) {
12323 ret
= tg3_nvram_read_be32(tp
, phy_addr
+ j
,
12324 (__be32
*) (tmp
+ j
));
12331 page_off
= offset
& pagemask
;
12338 memcpy(tmp
+ page_off
, buf
, size
);
12340 offset
= offset
+ (pagesize
- page_off
);
12342 tg3_enable_nvram_access(tp
);
12345 * Before we can erase the flash page, we need
12346 * to issue a special "write enable" command.
12348 nvram_cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
12350 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
12353 /* Erase the target page */
12354 tw32(NVRAM_ADDR
, phy_addr
);
12356 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
| NVRAM_CMD_WR
|
12357 NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
| NVRAM_CMD_ERASE
;
12359 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
12362 /* Issue another write enable to start the write. */
12363 nvram_cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
12365 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
12368 for (j
= 0; j
< pagesize
; j
+= 4) {
12371 data
= *((__be32
*) (tmp
+ j
));
12373 tw32(NVRAM_WRDATA
, be32_to_cpu(data
));
12375 tw32(NVRAM_ADDR
, phy_addr
+ j
);
12377 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
|
12381 nvram_cmd
|= NVRAM_CMD_FIRST
;
12382 else if (j
== (pagesize
- 4))
12383 nvram_cmd
|= NVRAM_CMD_LAST
;
12385 if ((ret
= tg3_nvram_exec_cmd(tp
, nvram_cmd
)))
12392 nvram_cmd
= NVRAM_CMD_WRDI
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
12393 tg3_nvram_exec_cmd(tp
, nvram_cmd
);
12400 /* offset and length are dword aligned */
12401 static int tg3_nvram_write_block_buffered(struct tg3
*tp
, u32 offset
, u32 len
,
12406 for (i
= 0; i
< len
; i
+= 4, offset
+= 4) {
12407 u32 page_off
, phy_addr
, nvram_cmd
;
12410 memcpy(&data
, buf
+ i
, 4);
12411 tw32(NVRAM_WRDATA
, be32_to_cpu(data
));
12413 page_off
= offset
% tp
->nvram_pagesize
;
12415 phy_addr
= tg3_nvram_phys_addr(tp
, offset
);
12417 tw32(NVRAM_ADDR
, phy_addr
);
12419 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
| NVRAM_CMD_WR
;
12421 if (page_off
== 0 || i
== 0)
12422 nvram_cmd
|= NVRAM_CMD_FIRST
;
12423 if (page_off
== (tp
->nvram_pagesize
- 4))
12424 nvram_cmd
|= NVRAM_CMD_LAST
;
12426 if (i
== (len
- 4))
12427 nvram_cmd
|= NVRAM_CMD_LAST
;
12429 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5752
&&
12430 !tg3_flag(tp
, 5755_PLUS
) &&
12431 (tp
->nvram_jedecnum
== JEDEC_ST
) &&
12432 (nvram_cmd
& NVRAM_CMD_FIRST
)) {
12434 if ((ret
= tg3_nvram_exec_cmd(tp
,
12435 NVRAM_CMD_WREN
| NVRAM_CMD_GO
|
12440 if (!tg3_flag(tp
, FLASH
)) {
12441 /* We always do complete word writes to eeprom. */
12442 nvram_cmd
|= (NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
);
12445 if ((ret
= tg3_nvram_exec_cmd(tp
, nvram_cmd
)))
12451 /* offset and length are dword aligned */
12452 static int tg3_nvram_write_block(struct tg3
*tp
, u32 offset
, u32 len
, u8
*buf
)
12456 if (tg3_flag(tp
, EEPROM_WRITE_PROT
)) {
12457 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
&
12458 ~GRC_LCLCTRL_GPIO_OUTPUT1
);
12462 if (!tg3_flag(tp
, NVRAM
)) {
12463 ret
= tg3_nvram_write_block_using_eeprom(tp
, offset
, len
, buf
);
12467 ret
= tg3_nvram_lock(tp
);
12471 tg3_enable_nvram_access(tp
);
12472 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
))
12473 tw32(NVRAM_WRITE1
, 0x406);
12475 grc_mode
= tr32(GRC_MODE
);
12476 tw32(GRC_MODE
, grc_mode
| GRC_MODE_NVRAM_WR_ENABLE
);
12478 if (tg3_flag(tp
, NVRAM_BUFFERED
) || !tg3_flag(tp
, FLASH
)) {
12479 ret
= tg3_nvram_write_block_buffered(tp
, offset
, len
,
12482 ret
= tg3_nvram_write_block_unbuffered(tp
, offset
, len
,
12486 grc_mode
= tr32(GRC_MODE
);
12487 tw32(GRC_MODE
, grc_mode
& ~GRC_MODE_NVRAM_WR_ENABLE
);
12489 tg3_disable_nvram_access(tp
);
12490 tg3_nvram_unlock(tp
);
12493 if (tg3_flag(tp
, EEPROM_WRITE_PROT
)) {
12494 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
12501 struct subsys_tbl_ent
{
12502 u16 subsys_vendor
, subsys_devid
;
12506 static struct subsys_tbl_ent subsys_id_to_phy_id
[] __devinitdata
= {
12507 /* Broadcom boards. */
12508 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12509 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6
, TG3_PHY_ID_BCM5401
},
12510 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12511 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5
, TG3_PHY_ID_BCM5701
},
12512 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12513 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6
, TG3_PHY_ID_BCM8002
},
12514 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12515 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9
, 0 },
12516 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12517 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1
, TG3_PHY_ID_BCM5701
},
12518 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12519 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8
, TG3_PHY_ID_BCM5701
},
12520 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12521 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7
, 0 },
12522 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12523 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10
, TG3_PHY_ID_BCM5701
},
12524 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12525 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12
, TG3_PHY_ID_BCM5701
},
12526 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12527 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1
, TG3_PHY_ID_BCM5703
},
12528 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12529 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2
, TG3_PHY_ID_BCM5703
},
12532 { TG3PCI_SUBVENDOR_ID_3COM
,
12533 TG3PCI_SUBDEVICE_ID_3COM_3C996T
, TG3_PHY_ID_BCM5401
},
12534 { TG3PCI_SUBVENDOR_ID_3COM
,
12535 TG3PCI_SUBDEVICE_ID_3COM_3C996BT
, TG3_PHY_ID_BCM5701
},
12536 { TG3PCI_SUBVENDOR_ID_3COM
,
12537 TG3PCI_SUBDEVICE_ID_3COM_3C996SX
, 0 },
12538 { TG3PCI_SUBVENDOR_ID_3COM
,
12539 TG3PCI_SUBDEVICE_ID_3COM_3C1000T
, TG3_PHY_ID_BCM5701
},
12540 { TG3PCI_SUBVENDOR_ID_3COM
,
12541 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01
, TG3_PHY_ID_BCM5701
},
12544 { TG3PCI_SUBVENDOR_ID_DELL
,
12545 TG3PCI_SUBDEVICE_ID_DELL_VIPER
, TG3_PHY_ID_BCM5401
},
12546 { TG3PCI_SUBVENDOR_ID_DELL
,
12547 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR
, TG3_PHY_ID_BCM5401
},
12548 { TG3PCI_SUBVENDOR_ID_DELL
,
12549 TG3PCI_SUBDEVICE_ID_DELL_MERLOT
, TG3_PHY_ID_BCM5411
},
12550 { TG3PCI_SUBVENDOR_ID_DELL
,
12551 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT
, TG3_PHY_ID_BCM5411
},
12553 /* Compaq boards. */
12554 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
12555 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE
, TG3_PHY_ID_BCM5701
},
12556 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
12557 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2
, TG3_PHY_ID_BCM5701
},
12558 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
12559 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING
, 0 },
12560 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
12561 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780
, TG3_PHY_ID_BCM5701
},
12562 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
12563 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2
, TG3_PHY_ID_BCM5701
},
12566 { TG3PCI_SUBVENDOR_ID_IBM
,
12567 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2
, 0 }
12570 static struct subsys_tbl_ent
* __devinit
tg3_lookup_by_subsys(struct tg3
*tp
)
12574 for (i
= 0; i
< ARRAY_SIZE(subsys_id_to_phy_id
); i
++) {
12575 if ((subsys_id_to_phy_id
[i
].subsys_vendor
==
12576 tp
->pdev
->subsystem_vendor
) &&
12577 (subsys_id_to_phy_id
[i
].subsys_devid
==
12578 tp
->pdev
->subsystem_device
))
12579 return &subsys_id_to_phy_id
[i
];
12584 static void __devinit
tg3_get_eeprom_hw_cfg(struct tg3
*tp
)
12589 /* On some early chips the SRAM cannot be accessed in D3hot state,
12590 * so need make sure we're in D0.
12592 pci_read_config_word(tp
->pdev
, tp
->pm_cap
+ PCI_PM_CTRL
, &pmcsr
);
12593 pmcsr
&= ~PCI_PM_CTRL_STATE_MASK
;
12594 pci_write_config_word(tp
->pdev
, tp
->pm_cap
+ PCI_PM_CTRL
, pmcsr
);
12597 /* Make sure register accesses (indirect or otherwise)
12598 * will function correctly.
12600 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
12601 tp
->misc_host_ctrl
);
12603 /* The memory arbiter has to be enabled in order for SRAM accesses
12604 * to succeed. Normally on powerup the tg3 chip firmware will make
12605 * sure it is enabled, but other entities such as system netboot
12606 * code might disable it.
12608 val
= tr32(MEMARB_MODE
);
12609 tw32(MEMARB_MODE
, val
| MEMARB_MODE_ENABLE
);
12611 tp
->phy_id
= TG3_PHY_ID_INVALID
;
12612 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
12614 /* Assume an onboard device and WOL capable by default. */
12615 tg3_flag_set(tp
, EEPROM_WRITE_PROT
);
12616 tg3_flag_set(tp
, WOL_CAP
);
12618 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
12619 if (!(tr32(PCIE_TRANSACTION_CFG
) & PCIE_TRANS_CFG_LOM
)) {
12620 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
12621 tg3_flag_set(tp
, IS_NIC
);
12623 val
= tr32(VCPU_CFGSHDW
);
12624 if (val
& VCPU_CFGSHDW_ASPM_DBNC
)
12625 tg3_flag_set(tp
, ASPM_WORKAROUND
);
12626 if ((val
& VCPU_CFGSHDW_WOL_ENABLE
) &&
12627 (val
& VCPU_CFGSHDW_WOL_MAGPKT
)) {
12628 tg3_flag_set(tp
, WOL_ENABLE
);
12629 device_set_wakeup_enable(&tp
->pdev
->dev
, true);
12634 tg3_read_mem(tp
, NIC_SRAM_DATA_SIG
, &val
);
12635 if (val
== NIC_SRAM_DATA_SIG_MAGIC
) {
12636 u32 nic_cfg
, led_cfg
;
12637 u32 nic_phy_id
, ver
, cfg2
= 0, cfg4
= 0, eeprom_phy_id
;
12638 int eeprom_phy_serdes
= 0;
12640 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG
, &nic_cfg
);
12641 tp
->nic_sram_data_cfg
= nic_cfg
;
12643 tg3_read_mem(tp
, NIC_SRAM_DATA_VER
, &ver
);
12644 ver
>>= NIC_SRAM_DATA_VER_SHIFT
;
12645 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
12646 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
&&
12647 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5703
&&
12648 (ver
> 0) && (ver
< 0x100))
12649 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_2
, &cfg2
);
12651 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
)
12652 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_4
, &cfg4
);
12654 if ((nic_cfg
& NIC_SRAM_DATA_CFG_PHY_TYPE_MASK
) ==
12655 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER
)
12656 eeprom_phy_serdes
= 1;
12658 tg3_read_mem(tp
, NIC_SRAM_DATA_PHY_ID
, &nic_phy_id
);
12659 if (nic_phy_id
!= 0) {
12660 u32 id1
= nic_phy_id
& NIC_SRAM_DATA_PHY_ID1_MASK
;
12661 u32 id2
= nic_phy_id
& NIC_SRAM_DATA_PHY_ID2_MASK
;
12663 eeprom_phy_id
= (id1
>> 16) << 10;
12664 eeprom_phy_id
|= (id2
& 0xfc00) << 16;
12665 eeprom_phy_id
|= (id2
& 0x03ff) << 0;
12669 tp
->phy_id
= eeprom_phy_id
;
12670 if (eeprom_phy_serdes
) {
12671 if (!tg3_flag(tp
, 5705_PLUS
))
12672 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
12674 tp
->phy_flags
|= TG3_PHYFLG_MII_SERDES
;
12677 if (tg3_flag(tp
, 5750_PLUS
))
12678 led_cfg
= cfg2
& (NIC_SRAM_DATA_CFG_LED_MODE_MASK
|
12679 SHASTA_EXT_LED_MODE_MASK
);
12681 led_cfg
= nic_cfg
& NIC_SRAM_DATA_CFG_LED_MODE_MASK
;
12685 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1
:
12686 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
12689 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2
:
12690 tp
->led_ctrl
= LED_CTRL_MODE_PHY_2
;
12693 case NIC_SRAM_DATA_CFG_LED_MODE_MAC
:
12694 tp
->led_ctrl
= LED_CTRL_MODE_MAC
;
12696 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
12697 * read on some older 5700/5701 bootcode.
12699 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
12701 GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
12703 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
12707 case SHASTA_EXT_LED_SHARED
:
12708 tp
->led_ctrl
= LED_CTRL_MODE_SHARED
;
12709 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A0
&&
12710 tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A1
)
12711 tp
->led_ctrl
|= (LED_CTRL_MODE_PHY_1
|
12712 LED_CTRL_MODE_PHY_2
);
12715 case SHASTA_EXT_LED_MAC
:
12716 tp
->led_ctrl
= LED_CTRL_MODE_SHASTA_MAC
;
12719 case SHASTA_EXT_LED_COMBO
:
12720 tp
->led_ctrl
= LED_CTRL_MODE_COMBO
;
12721 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A0
)
12722 tp
->led_ctrl
|= (LED_CTRL_MODE_PHY_1
|
12723 LED_CTRL_MODE_PHY_2
);
12728 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
12729 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) &&
12730 tp
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_DELL
)
12731 tp
->led_ctrl
= LED_CTRL_MODE_PHY_2
;
12733 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5784_AX
)
12734 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
12736 if (nic_cfg
& NIC_SRAM_DATA_CFG_EEPROM_WP
) {
12737 tg3_flag_set(tp
, EEPROM_WRITE_PROT
);
12738 if ((tp
->pdev
->subsystem_vendor
==
12739 PCI_VENDOR_ID_ARIMA
) &&
12740 (tp
->pdev
->subsystem_device
== 0x205a ||
12741 tp
->pdev
->subsystem_device
== 0x2063))
12742 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
12744 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
12745 tg3_flag_set(tp
, IS_NIC
);
12748 if (nic_cfg
& NIC_SRAM_DATA_CFG_ASF_ENABLE
) {
12749 tg3_flag_set(tp
, ENABLE_ASF
);
12750 if (tg3_flag(tp
, 5750_PLUS
))
12751 tg3_flag_set(tp
, ASF_NEW_HANDSHAKE
);
12754 if ((nic_cfg
& NIC_SRAM_DATA_CFG_APE_ENABLE
) &&
12755 tg3_flag(tp
, 5750_PLUS
))
12756 tg3_flag_set(tp
, ENABLE_APE
);
12758 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
&&
12759 !(nic_cfg
& NIC_SRAM_DATA_CFG_FIBER_WOL
))
12760 tg3_flag_clear(tp
, WOL_CAP
);
12762 if (tg3_flag(tp
, WOL_CAP
) &&
12763 (nic_cfg
& NIC_SRAM_DATA_CFG_WOL_ENABLE
)) {
12764 tg3_flag_set(tp
, WOL_ENABLE
);
12765 device_set_wakeup_enable(&tp
->pdev
->dev
, true);
12768 if (cfg2
& (1 << 17))
12769 tp
->phy_flags
|= TG3_PHYFLG_CAPACITIVE_COUPLING
;
12771 /* serdes signal pre-emphasis in register 0x590 set by */
12772 /* bootcode if bit 18 is set */
12773 if (cfg2
& (1 << 18))
12774 tp
->phy_flags
|= TG3_PHYFLG_SERDES_PREEMPHASIS
;
12776 if ((tg3_flag(tp
, 57765_PLUS
) ||
12777 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
&&
12778 GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5784_AX
)) &&
12779 (cfg2
& NIC_SRAM_DATA_CFG_2_APD_EN
))
12780 tp
->phy_flags
|= TG3_PHYFLG_ENABLE_APD
;
12782 if (tg3_flag(tp
, PCI_EXPRESS
) &&
12783 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5785
&&
12784 !tg3_flag(tp
, 57765_PLUS
)) {
12787 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_3
, &cfg3
);
12788 if (cfg3
& NIC_SRAM_ASPM_DEBOUNCE
)
12789 tg3_flag_set(tp
, ASPM_WORKAROUND
);
12792 if (cfg4
& NIC_SRAM_RGMII_INBAND_DISABLE
)
12793 tg3_flag_set(tp
, RGMII_INBAND_DISABLE
);
12794 if (cfg4
& NIC_SRAM_RGMII_EXT_IBND_RX_EN
)
12795 tg3_flag_set(tp
, RGMII_EXT_IBND_RX_EN
);
12796 if (cfg4
& NIC_SRAM_RGMII_EXT_IBND_TX_EN
)
12797 tg3_flag_set(tp
, RGMII_EXT_IBND_TX_EN
);
12800 if (tg3_flag(tp
, WOL_CAP
))
12801 device_set_wakeup_enable(&tp
->pdev
->dev
,
12802 tg3_flag(tp
, WOL_ENABLE
));
12804 device_set_wakeup_capable(&tp
->pdev
->dev
, false);
12807 static int __devinit
tg3_issue_otp_command(struct tg3
*tp
, u32 cmd
)
12812 tw32(OTP_CTRL
, cmd
| OTP_CTRL_OTP_CMD_START
);
12813 tw32(OTP_CTRL
, cmd
);
12815 /* Wait for up to 1 ms for command to execute. */
12816 for (i
= 0; i
< 100; i
++) {
12817 val
= tr32(OTP_STATUS
);
12818 if (val
& OTP_STATUS_CMD_DONE
)
12823 return (val
& OTP_STATUS_CMD_DONE
) ? 0 : -EBUSY
;
12826 /* Read the gphy configuration from the OTP region of the chip. The gphy
12827 * configuration is a 32-bit value that straddles the alignment boundary.
12828 * We do two 32-bit reads and then shift and merge the results.
12830 static u32 __devinit
tg3_read_otp_phycfg(struct tg3
*tp
)
12832 u32 bhalf_otp
, thalf_otp
;
12834 tw32(OTP_MODE
, OTP_MODE_OTP_THRU_GRC
);
12836 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_INIT
))
12839 tw32(OTP_ADDRESS
, OTP_ADDRESS_MAGIC1
);
12841 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_READ
))
12844 thalf_otp
= tr32(OTP_READ_DATA
);
12846 tw32(OTP_ADDRESS
, OTP_ADDRESS_MAGIC2
);
12848 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_READ
))
12851 bhalf_otp
= tr32(OTP_READ_DATA
);
12853 return ((thalf_otp
& 0x0000ffff) << 16) | (bhalf_otp
>> 16);
12856 static void __devinit
tg3_phy_init_link_config(struct tg3
*tp
)
12858 u32 adv
= ADVERTISED_Autoneg
|
12861 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
12862 adv
|= ADVERTISED_1000baseT_Half
|
12863 ADVERTISED_1000baseT_Full
;
12865 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
12866 adv
|= ADVERTISED_100baseT_Half
|
12867 ADVERTISED_100baseT_Full
|
12868 ADVERTISED_10baseT_Half
|
12869 ADVERTISED_10baseT_Full
|
12872 adv
|= ADVERTISED_FIBRE
;
12874 tp
->link_config
.advertising
= adv
;
12875 tp
->link_config
.speed
= SPEED_INVALID
;
12876 tp
->link_config
.duplex
= DUPLEX_INVALID
;
12877 tp
->link_config
.autoneg
= AUTONEG_ENABLE
;
12878 tp
->link_config
.active_speed
= SPEED_INVALID
;
12879 tp
->link_config
.active_duplex
= DUPLEX_INVALID
;
12880 tp
->link_config
.orig_speed
= SPEED_INVALID
;
12881 tp
->link_config
.orig_duplex
= DUPLEX_INVALID
;
12882 tp
->link_config
.orig_autoneg
= AUTONEG_INVALID
;
12885 static int __devinit
tg3_phy_probe(struct tg3
*tp
)
12887 u32 hw_phy_id_1
, hw_phy_id_2
;
12888 u32 hw_phy_id
, hw_phy_id_masked
;
12891 /* flow control autonegotiation is default behavior */
12892 tg3_flag_set(tp
, PAUSE_AUTONEG
);
12893 tp
->link_config
.flowctrl
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
12895 if (tg3_flag(tp
, USE_PHYLIB
))
12896 return tg3_phy_init(tp
);
12898 /* Reading the PHY ID register can conflict with ASF
12899 * firmware access to the PHY hardware.
12902 if (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
)) {
12903 hw_phy_id
= hw_phy_id_masked
= TG3_PHY_ID_INVALID
;
12905 /* Now read the physical PHY_ID from the chip and verify
12906 * that it is sane. If it doesn't look good, we fall back
12907 * to either the hard-coded table based PHY_ID and failing
12908 * that the value found in the eeprom area.
12910 err
|= tg3_readphy(tp
, MII_PHYSID1
, &hw_phy_id_1
);
12911 err
|= tg3_readphy(tp
, MII_PHYSID2
, &hw_phy_id_2
);
12913 hw_phy_id
= (hw_phy_id_1
& 0xffff) << 10;
12914 hw_phy_id
|= (hw_phy_id_2
& 0xfc00) << 16;
12915 hw_phy_id
|= (hw_phy_id_2
& 0x03ff) << 0;
12917 hw_phy_id_masked
= hw_phy_id
& TG3_PHY_ID_MASK
;
12920 if (!err
&& TG3_KNOWN_PHY_ID(hw_phy_id_masked
)) {
12921 tp
->phy_id
= hw_phy_id
;
12922 if (hw_phy_id_masked
== TG3_PHY_ID_BCM8002
)
12923 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
12925 tp
->phy_flags
&= ~TG3_PHYFLG_PHY_SERDES
;
12927 if (tp
->phy_id
!= TG3_PHY_ID_INVALID
) {
12928 /* Do nothing, phy ID already set up in
12929 * tg3_get_eeprom_hw_cfg().
12932 struct subsys_tbl_ent
*p
;
12934 /* No eeprom signature? Try the hardcoded
12935 * subsys device table.
12937 p
= tg3_lookup_by_subsys(tp
);
12941 tp
->phy_id
= p
->phy_id
;
12943 tp
->phy_id
== TG3_PHY_ID_BCM8002
)
12944 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
12948 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) &&
12949 ((tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
&&
12950 tp
->pci_chip_rev_id
!= CHIPREV_ID_5717_A0
) ||
12951 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
&&
12952 tp
->pci_chip_rev_id
!= CHIPREV_ID_57765_A0
)))
12953 tp
->phy_flags
|= TG3_PHYFLG_EEE_CAP
;
12955 tg3_phy_init_link_config(tp
);
12957 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) &&
12958 !tg3_flag(tp
, ENABLE_APE
) &&
12959 !tg3_flag(tp
, ENABLE_ASF
)) {
12962 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
12963 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
12964 (bmsr
& BMSR_LSTATUS
))
12965 goto skip_phy_reset
;
12967 err
= tg3_phy_reset(tp
);
12971 tg3_phy_set_wirespeed(tp
);
12973 mask
= (ADVERTISED_10baseT_Half
| ADVERTISED_10baseT_Full
|
12974 ADVERTISED_100baseT_Half
| ADVERTISED_100baseT_Full
|
12975 ADVERTISED_1000baseT_Half
| ADVERTISED_1000baseT_Full
);
12976 if (!tg3_copper_is_advertising_all(tp
, mask
)) {
12977 tg3_phy_autoneg_cfg(tp
, tp
->link_config
.advertising
,
12978 tp
->link_config
.flowctrl
);
12980 tg3_writephy(tp
, MII_BMCR
,
12981 BMCR_ANENABLE
| BMCR_ANRESTART
);
12986 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
12987 err
= tg3_init_5401phy_dsp(tp
);
12991 err
= tg3_init_5401phy_dsp(tp
);
12997 static void __devinit
tg3_read_vpd(struct tg3
*tp
)
13000 unsigned int block_end
, rosize
, len
;
13003 vpd_data
= (u8
*)tg3_vpd_readblock(tp
);
13007 i
= pci_vpd_find_tag(vpd_data
, 0, TG3_NVM_VPD_LEN
,
13008 PCI_VPD_LRDT_RO_DATA
);
13010 goto out_not_found
;
13012 rosize
= pci_vpd_lrdt_size(&vpd_data
[i
]);
13013 block_end
= i
+ PCI_VPD_LRDT_TAG_SIZE
+ rosize
;
13014 i
+= PCI_VPD_LRDT_TAG_SIZE
;
13016 if (block_end
> TG3_NVM_VPD_LEN
)
13017 goto out_not_found
;
13019 j
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
13020 PCI_VPD_RO_KEYWORD_MFR_ID
);
13022 len
= pci_vpd_info_field_size(&vpd_data
[j
]);
13024 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
13025 if (j
+ len
> block_end
|| len
!= 4 ||
13026 memcmp(&vpd_data
[j
], "1028", 4))
13029 j
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
13030 PCI_VPD_RO_KEYWORD_VENDOR0
);
13034 len
= pci_vpd_info_field_size(&vpd_data
[j
]);
13036 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
13037 if (j
+ len
> block_end
)
13040 memcpy(tp
->fw_ver
, &vpd_data
[j
], len
);
13041 strncat(tp
->fw_ver
, " bc ", TG3_NVM_VPD_LEN
- len
- 1);
13045 i
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
13046 PCI_VPD_RO_KEYWORD_PARTNO
);
13048 goto out_not_found
;
13050 len
= pci_vpd_info_field_size(&vpd_data
[i
]);
13052 i
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
13053 if (len
> TG3_BPN_SIZE
||
13054 (len
+ i
) > TG3_NVM_VPD_LEN
)
13055 goto out_not_found
;
13057 memcpy(tp
->board_part_number
, &vpd_data
[i
], len
);
13061 if (tp
->board_part_number
[0])
13065 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
) {
13066 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
)
13067 strcpy(tp
->board_part_number
, "BCM5717");
13068 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
)
13069 strcpy(tp
->board_part_number
, "BCM5718");
13072 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
) {
13073 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57780
)
13074 strcpy(tp
->board_part_number
, "BCM57780");
13075 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57760
)
13076 strcpy(tp
->board_part_number
, "BCM57760");
13077 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57790
)
13078 strcpy(tp
->board_part_number
, "BCM57790");
13079 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57788
)
13080 strcpy(tp
->board_part_number
, "BCM57788");
13083 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
) {
13084 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57761
)
13085 strcpy(tp
->board_part_number
, "BCM57761");
13086 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57765
)
13087 strcpy(tp
->board_part_number
, "BCM57765");
13088 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57781
)
13089 strcpy(tp
->board_part_number
, "BCM57781");
13090 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57785
)
13091 strcpy(tp
->board_part_number
, "BCM57785");
13092 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57791
)
13093 strcpy(tp
->board_part_number
, "BCM57791");
13094 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57795
)
13095 strcpy(tp
->board_part_number
, "BCM57795");
13098 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
13099 strcpy(tp
->board_part_number
, "BCM95906");
13102 strcpy(tp
->board_part_number
, "none");
13106 static int __devinit
tg3_fw_img_is_valid(struct tg3
*tp
, u32 offset
)
13110 if (tg3_nvram_read(tp
, offset
, &val
) ||
13111 (val
& 0xfc000000) != 0x0c000000 ||
13112 tg3_nvram_read(tp
, offset
+ 4, &val
) ||
13119 static void __devinit
tg3_read_bc_ver(struct tg3
*tp
)
13121 u32 val
, offset
, start
, ver_offset
;
13123 bool newver
= false;
13125 if (tg3_nvram_read(tp
, 0xc, &offset
) ||
13126 tg3_nvram_read(tp
, 0x4, &start
))
13129 offset
= tg3_nvram_logical_addr(tp
, offset
);
13131 if (tg3_nvram_read(tp
, offset
, &val
))
13134 if ((val
& 0xfc000000) == 0x0c000000) {
13135 if (tg3_nvram_read(tp
, offset
+ 4, &val
))
13142 dst_off
= strlen(tp
->fw_ver
);
13145 if (TG3_VER_SIZE
- dst_off
< 16 ||
13146 tg3_nvram_read(tp
, offset
+ 8, &ver_offset
))
13149 offset
= offset
+ ver_offset
- start
;
13150 for (i
= 0; i
< 16; i
+= 4) {
13152 if (tg3_nvram_read_be32(tp
, offset
+ i
, &v
))
13155 memcpy(tp
->fw_ver
+ dst_off
+ i
, &v
, sizeof(v
));
13160 if (tg3_nvram_read(tp
, TG3_NVM_PTREV_BCVER
, &ver_offset
))
13163 major
= (ver_offset
& TG3_NVM_BCVER_MAJMSK
) >>
13164 TG3_NVM_BCVER_MAJSFT
;
13165 minor
= ver_offset
& TG3_NVM_BCVER_MINMSK
;
13166 snprintf(&tp
->fw_ver
[dst_off
], TG3_VER_SIZE
- dst_off
,
13167 "v%d.%02d", major
, minor
);
13171 static void __devinit
tg3_read_hwsb_ver(struct tg3
*tp
)
13173 u32 val
, major
, minor
;
13175 /* Use native endian representation */
13176 if (tg3_nvram_read(tp
, TG3_NVM_HWSB_CFG1
, &val
))
13179 major
= (val
& TG3_NVM_HWSB_CFG1_MAJMSK
) >>
13180 TG3_NVM_HWSB_CFG1_MAJSFT
;
13181 minor
= (val
& TG3_NVM_HWSB_CFG1_MINMSK
) >>
13182 TG3_NVM_HWSB_CFG1_MINSFT
;
13184 snprintf(&tp
->fw_ver
[0], 32, "sb v%d.%02d", major
, minor
);
13187 static void __devinit
tg3_read_sb_ver(struct tg3
*tp
, u32 val
)
13189 u32 offset
, major
, minor
, build
;
13191 strncat(tp
->fw_ver
, "sb", TG3_VER_SIZE
- strlen(tp
->fw_ver
) - 1);
13193 if ((val
& TG3_EEPROM_SB_FORMAT_MASK
) != TG3_EEPROM_SB_FORMAT_1
)
13196 switch (val
& TG3_EEPROM_SB_REVISION_MASK
) {
13197 case TG3_EEPROM_SB_REVISION_0
:
13198 offset
= TG3_EEPROM_SB_F1R0_EDH_OFF
;
13200 case TG3_EEPROM_SB_REVISION_2
:
13201 offset
= TG3_EEPROM_SB_F1R2_EDH_OFF
;
13203 case TG3_EEPROM_SB_REVISION_3
:
13204 offset
= TG3_EEPROM_SB_F1R3_EDH_OFF
;
13206 case TG3_EEPROM_SB_REVISION_4
:
13207 offset
= TG3_EEPROM_SB_F1R4_EDH_OFF
;
13209 case TG3_EEPROM_SB_REVISION_5
:
13210 offset
= TG3_EEPROM_SB_F1R5_EDH_OFF
;
13212 case TG3_EEPROM_SB_REVISION_6
:
13213 offset
= TG3_EEPROM_SB_F1R6_EDH_OFF
;
13219 if (tg3_nvram_read(tp
, offset
, &val
))
13222 build
= (val
& TG3_EEPROM_SB_EDH_BLD_MASK
) >>
13223 TG3_EEPROM_SB_EDH_BLD_SHFT
;
13224 major
= (val
& TG3_EEPROM_SB_EDH_MAJ_MASK
) >>
13225 TG3_EEPROM_SB_EDH_MAJ_SHFT
;
13226 minor
= val
& TG3_EEPROM_SB_EDH_MIN_MASK
;
13228 if (minor
> 99 || build
> 26)
13231 offset
= strlen(tp
->fw_ver
);
13232 snprintf(&tp
->fw_ver
[offset
], TG3_VER_SIZE
- offset
,
13233 " v%d.%02d", major
, minor
);
13236 offset
= strlen(tp
->fw_ver
);
13237 if (offset
< TG3_VER_SIZE
- 1)
13238 tp
->fw_ver
[offset
] = 'a' + build
- 1;
13242 static void __devinit
tg3_read_mgmtfw_ver(struct tg3
*tp
)
13244 u32 val
, offset
, start
;
13247 for (offset
= TG3_NVM_DIR_START
;
13248 offset
< TG3_NVM_DIR_END
;
13249 offset
+= TG3_NVM_DIRENT_SIZE
) {
13250 if (tg3_nvram_read(tp
, offset
, &val
))
13253 if ((val
>> TG3_NVM_DIRTYPE_SHIFT
) == TG3_NVM_DIRTYPE_ASFINI
)
13257 if (offset
== TG3_NVM_DIR_END
)
13260 if (!tg3_flag(tp
, 5705_PLUS
))
13261 start
= 0x08000000;
13262 else if (tg3_nvram_read(tp
, offset
- 4, &start
))
13265 if (tg3_nvram_read(tp
, offset
+ 4, &offset
) ||
13266 !tg3_fw_img_is_valid(tp
, offset
) ||
13267 tg3_nvram_read(tp
, offset
+ 8, &val
))
13270 offset
+= val
- start
;
13272 vlen
= strlen(tp
->fw_ver
);
13274 tp
->fw_ver
[vlen
++] = ',';
13275 tp
->fw_ver
[vlen
++] = ' ';
13277 for (i
= 0; i
< 4; i
++) {
13279 if (tg3_nvram_read_be32(tp
, offset
, &v
))
13282 offset
+= sizeof(v
);
13284 if (vlen
> TG3_VER_SIZE
- sizeof(v
)) {
13285 memcpy(&tp
->fw_ver
[vlen
], &v
, TG3_VER_SIZE
- vlen
);
13289 memcpy(&tp
->fw_ver
[vlen
], &v
, sizeof(v
));
13294 static void __devinit
tg3_read_dash_ver(struct tg3
*tp
)
13300 if (!tg3_flag(tp
, ENABLE_APE
) || !tg3_flag(tp
, ENABLE_ASF
))
13303 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
13304 if (apedata
!= APE_SEG_SIG_MAGIC
)
13307 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
13308 if (!(apedata
& APE_FW_STATUS_READY
))
13311 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_VERSION
);
13313 if (tg3_ape_read32(tp
, TG3_APE_FW_FEATURES
) & TG3_APE_FW_FEATURE_NCSI
) {
13314 tg3_flag_set(tp
, APE_HAS_NCSI
);
13320 vlen
= strlen(tp
->fw_ver
);
13322 snprintf(&tp
->fw_ver
[vlen
], TG3_VER_SIZE
- vlen
, " %s v%d.%d.%d.%d",
13324 (apedata
& APE_FW_VERSION_MAJMSK
) >> APE_FW_VERSION_MAJSFT
,
13325 (apedata
& APE_FW_VERSION_MINMSK
) >> APE_FW_VERSION_MINSFT
,
13326 (apedata
& APE_FW_VERSION_REVMSK
) >> APE_FW_VERSION_REVSFT
,
13327 (apedata
& APE_FW_VERSION_BLDMSK
));
13330 static void __devinit
tg3_read_fw_ver(struct tg3
*tp
)
13333 bool vpd_vers
= false;
13335 if (tp
->fw_ver
[0] != 0)
13338 if (tg3_flag(tp
, NO_NVRAM
)) {
13339 strcat(tp
->fw_ver
, "sb");
13343 if (tg3_nvram_read(tp
, 0, &val
))
13346 if (val
== TG3_EEPROM_MAGIC
)
13347 tg3_read_bc_ver(tp
);
13348 else if ((val
& TG3_EEPROM_MAGIC_FW_MSK
) == TG3_EEPROM_MAGIC_FW
)
13349 tg3_read_sb_ver(tp
, val
);
13350 else if ((val
& TG3_EEPROM_MAGIC_HW_MSK
) == TG3_EEPROM_MAGIC_HW
)
13351 tg3_read_hwsb_ver(tp
);
13355 if (!tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
) || vpd_vers
)
13358 tg3_read_mgmtfw_ver(tp
);
13361 tp
->fw_ver
[TG3_VER_SIZE
- 1] = 0;
13364 static struct pci_dev
* __devinit
tg3_find_peer(struct tg3
*);
13366 static inline u32
tg3_rx_ret_ring_size(struct tg3
*tp
)
13368 if (tg3_flag(tp
, LRG_PROD_RING_CAP
))
13369 return TG3_RX_RET_MAX_SIZE_5717
;
13370 else if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
))
13371 return TG3_RX_RET_MAX_SIZE_5700
;
13373 return TG3_RX_RET_MAX_SIZE_5705
;
13376 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets
) = {
13377 { PCI_DEVICE(PCI_VENDOR_ID_AMD
, PCI_DEVICE_ID_AMD_FE_GATE_700C
) },
13378 { PCI_DEVICE(PCI_VENDOR_ID_AMD
, PCI_DEVICE_ID_AMD_8131_BRIDGE
) },
13379 { PCI_DEVICE(PCI_VENDOR_ID_VIA
, PCI_DEVICE_ID_VIA_8385_0
) },
13383 static int __devinit
tg3_get_invariants(struct tg3
*tp
)
13386 u32 pci_state_reg
, grc_misc_cfg
;
13391 /* Force memory write invalidate off. If we leave it on,
13392 * then on 5700_BX chips we have to enable a workaround.
13393 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13394 * to match the cacheline size. The Broadcom driver have this
13395 * workaround but turns MWI off all the times so never uses
13396 * it. This seems to suggest that the workaround is insufficient.
13398 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
13399 pci_cmd
&= ~PCI_COMMAND_INVALIDATE
;
13400 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
13402 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
13403 * has the register indirect write enable bit set before
13404 * we try to access any of the MMIO registers. It is also
13405 * critical that the PCI-X hw workaround situation is decided
13406 * before that as well.
13408 pci_read_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
13411 tp
->pci_chip_rev_id
= (misc_ctrl_reg
>>
13412 MISC_HOST_CTRL_CHIPREV_SHIFT
);
13413 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_USE_PROD_ID_REG
) {
13414 u32 prod_id_asic_rev
;
13416 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
||
13417 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
||
13418 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5719
||
13419 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5720
)
13420 pci_read_config_dword(tp
->pdev
,
13421 TG3PCI_GEN2_PRODID_ASICREV
,
13422 &prod_id_asic_rev
);
13423 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57781
||
13424 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57785
||
13425 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57761
||
13426 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57765
||
13427 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57791
||
13428 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57795
)
13429 pci_read_config_dword(tp
->pdev
,
13430 TG3PCI_GEN15_PRODID_ASICREV
,
13431 &prod_id_asic_rev
);
13433 pci_read_config_dword(tp
->pdev
, TG3PCI_PRODID_ASICREV
,
13434 &prod_id_asic_rev
);
13436 tp
->pci_chip_rev_id
= prod_id_asic_rev
;
13439 /* Wrong chip ID in 5752 A0. This code can be removed later
13440 * as A0 is not in production.
13442 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5752_A0_HW
)
13443 tp
->pci_chip_rev_id
= CHIPREV_ID_5752_A0
;
13445 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13446 * we need to disable memory and use config. cycles
13447 * only to access all registers. The 5702/03 chips
13448 * can mistakenly decode the special cycles from the
13449 * ICH chipsets as memory write cycles, causing corruption
13450 * of register and memory space. Only certain ICH bridges
13451 * will drive special cycles with non-zero data during the
13452 * address phase which can fall within the 5703's address
13453 * range. This is not an ICH bug as the PCI spec allows
13454 * non-zero address during special cycles. However, only
13455 * these ICH bridges are known to drive non-zero addresses
13456 * during special cycles.
13458 * Since special cycles do not cross PCI bridges, we only
13459 * enable this workaround if the 5703 is on the secondary
13460 * bus of these ICH bridges.
13462 if ((tp
->pci_chip_rev_id
== CHIPREV_ID_5703_A1
) ||
13463 (tp
->pci_chip_rev_id
== CHIPREV_ID_5703_A2
)) {
13464 static struct tg3_dev_id
{
13468 } ich_chipsets
[] = {
13469 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801AA_8
,
13471 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801AB_8
,
13473 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801BA_11
,
13475 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801BA_6
,
13479 struct tg3_dev_id
*pci_id
= &ich_chipsets
[0];
13480 struct pci_dev
*bridge
= NULL
;
13482 while (pci_id
->vendor
!= 0) {
13483 bridge
= pci_get_device(pci_id
->vendor
, pci_id
->device
,
13489 if (pci_id
->rev
!= PCI_ANY_ID
) {
13490 if (bridge
->revision
> pci_id
->rev
)
13493 if (bridge
->subordinate
&&
13494 (bridge
->subordinate
->number
==
13495 tp
->pdev
->bus
->number
)) {
13496 tg3_flag_set(tp
, ICH_WORKAROUND
);
13497 pci_dev_put(bridge
);
13503 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
13504 static struct tg3_dev_id
{
13507 } bridge_chipsets
[] = {
13508 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_PXH_0
},
13509 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_PXH_1
},
13512 struct tg3_dev_id
*pci_id
= &bridge_chipsets
[0];
13513 struct pci_dev
*bridge
= NULL
;
13515 while (pci_id
->vendor
!= 0) {
13516 bridge
= pci_get_device(pci_id
->vendor
,
13523 if (bridge
->subordinate
&&
13524 (bridge
->subordinate
->number
<=
13525 tp
->pdev
->bus
->number
) &&
13526 (bridge
->subordinate
->subordinate
>=
13527 tp
->pdev
->bus
->number
)) {
13528 tg3_flag_set(tp
, 5701_DMA_BUG
);
13529 pci_dev_put(bridge
);
13535 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
13536 * DMA addresses > 40-bit. This bridge may have other additional
13537 * 57xx devices behind it in some 4-port NIC designs for example.
13538 * Any tg3 device found behind the bridge will also need the 40-bit
13541 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5780
||
13542 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
) {
13543 tg3_flag_set(tp
, 5780_CLASS
);
13544 tg3_flag_set(tp
, 40BIT_DMA_BUG
);
13545 tp
->msi_cap
= pci_find_capability(tp
->pdev
, PCI_CAP_ID_MSI
);
13547 struct pci_dev
*bridge
= NULL
;
13550 bridge
= pci_get_device(PCI_VENDOR_ID_SERVERWORKS
,
13551 PCI_DEVICE_ID_SERVERWORKS_EPB
,
13553 if (bridge
&& bridge
->subordinate
&&
13554 (bridge
->subordinate
->number
<=
13555 tp
->pdev
->bus
->number
) &&
13556 (bridge
->subordinate
->subordinate
>=
13557 tp
->pdev
->bus
->number
)) {
13558 tg3_flag_set(tp
, 40BIT_DMA_BUG
);
13559 pci_dev_put(bridge
);
13565 /* Initialize misc host control in PCI block. */
13566 tp
->misc_host_ctrl
|= (misc_ctrl_reg
&
13567 MISC_HOST_CTRL_CHIPREV
);
13568 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
13569 tp
->misc_host_ctrl
);
13571 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
13572 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
||
13573 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
13574 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
13575 tp
->pdev_peer
= tg3_find_peer(tp
);
13577 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
13578 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
13579 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
13580 tg3_flag_set(tp
, 5717_PLUS
);
13582 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
||
13583 tg3_flag(tp
, 5717_PLUS
))
13584 tg3_flag_set(tp
, 57765_PLUS
);
13586 /* Intentionally exclude ASIC_REV_5906 */
13587 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
13588 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
||
13589 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
13590 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
||
13591 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
13592 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
||
13593 tg3_flag(tp
, 57765_PLUS
))
13594 tg3_flag_set(tp
, 5755_PLUS
);
13596 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
||
13597 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
||
13598 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
||
13599 tg3_flag(tp
, 5755_PLUS
) ||
13600 tg3_flag(tp
, 5780_CLASS
))
13601 tg3_flag_set(tp
, 5750_PLUS
);
13603 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
||
13604 tg3_flag(tp
, 5750_PLUS
))
13605 tg3_flag_set(tp
, 5705_PLUS
);
13607 /* Determine TSO capabilities */
13608 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
)
13609 ; /* Do nothing. HW bug. */
13610 else if (tg3_flag(tp
, 57765_PLUS
))
13611 tg3_flag_set(tp
, HW_TSO_3
);
13612 else if (tg3_flag(tp
, 5755_PLUS
) ||
13613 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
13614 tg3_flag_set(tp
, HW_TSO_2
);
13615 else if (tg3_flag(tp
, 5750_PLUS
)) {
13616 tg3_flag_set(tp
, HW_TSO_1
);
13617 tg3_flag_set(tp
, TSO_BUG
);
13618 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
&&
13619 tp
->pci_chip_rev_id
>= CHIPREV_ID_5750_C2
)
13620 tg3_flag_clear(tp
, TSO_BUG
);
13621 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
13622 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
&&
13623 tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A0
) {
13624 tg3_flag_set(tp
, TSO_BUG
);
13625 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
)
13626 tp
->fw_needed
= FIRMWARE_TG3TSO5
;
13628 tp
->fw_needed
= FIRMWARE_TG3TSO
;
13631 /* Selectively allow TSO based on operating conditions */
13632 if (tg3_flag(tp
, HW_TSO_1
) ||
13633 tg3_flag(tp
, HW_TSO_2
) ||
13634 tg3_flag(tp
, HW_TSO_3
) ||
13635 (tp
->fw_needed
&& !tg3_flag(tp
, ENABLE_ASF
)))
13636 tg3_flag_set(tp
, TSO_CAPABLE
);
13638 tg3_flag_clear(tp
, TSO_CAPABLE
);
13639 tg3_flag_clear(tp
, TSO_BUG
);
13640 tp
->fw_needed
= NULL
;
13643 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
)
13644 tp
->fw_needed
= FIRMWARE_TG3
;
13648 if (tg3_flag(tp
, 5750_PLUS
)) {
13649 tg3_flag_set(tp
, SUPPORT_MSI
);
13650 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5750_AX
||
13651 GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5750_BX
||
13652 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
&&
13653 tp
->pci_chip_rev_id
<= CHIPREV_ID_5714_A2
&&
13654 tp
->pdev_peer
== tp
->pdev
))
13655 tg3_flag_clear(tp
, SUPPORT_MSI
);
13657 if (tg3_flag(tp
, 5755_PLUS
) ||
13658 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
13659 tg3_flag_set(tp
, 1SHOT_MSI
);
13662 if (tg3_flag(tp
, 57765_PLUS
)) {
13663 tg3_flag_set(tp
, SUPPORT_MSIX
);
13664 tp
->irq_max
= TG3_IRQ_MAX_VECS
;
13668 /* All chips can get confused if TX buffers
13669 * straddle the 4GB address boundary.
13671 tg3_flag_set(tp
, 4G_DMA_BNDRY_BUG
);
13673 if (tg3_flag(tp
, 5755_PLUS
))
13674 tg3_flag_set(tp
, SHORT_DMA_BUG
);
13676 tg3_flag_set(tp
, 40BIT_DMA_LIMIT_BUG
);
13678 if (tg3_flag(tp
, 5717_PLUS
))
13679 tg3_flag_set(tp
, LRG_PROD_RING_CAP
);
13681 if (tg3_flag(tp
, 57765_PLUS
) &&
13682 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5719
)
13683 tg3_flag_set(tp
, USE_JUMBO_BDFLAG
);
13685 if (!tg3_flag(tp
, 5705_PLUS
) ||
13686 tg3_flag(tp
, 5780_CLASS
) ||
13687 tg3_flag(tp
, USE_JUMBO_BDFLAG
))
13688 tg3_flag_set(tp
, JUMBO_CAPABLE
);
13690 pci_read_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
13693 tp
->pcie_cap
= pci_find_capability(tp
->pdev
, PCI_CAP_ID_EXP
);
13694 if (tp
->pcie_cap
!= 0) {
13697 tg3_flag_set(tp
, PCI_EXPRESS
);
13699 tp
->pcie_readrq
= 4096;
13700 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
13701 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
13702 tp
->pcie_readrq
= 2048;
13704 pcie_set_readrq(tp
->pdev
, tp
->pcie_readrq
);
13706 pci_read_config_word(tp
->pdev
,
13707 tp
->pcie_cap
+ PCI_EXP_LNKCTL
,
13709 if (lnkctl
& PCI_EXP_LNKCTL_CLKREQ_EN
) {
13710 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
13711 tg3_flag_clear(tp
, HW_TSO_2
);
13712 tg3_flag_clear(tp
, TSO_CAPABLE
);
13713 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
13714 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
||
13715 tp
->pci_chip_rev_id
== CHIPREV_ID_57780_A0
||
13716 tp
->pci_chip_rev_id
== CHIPREV_ID_57780_A1
)
13717 tg3_flag_set(tp
, CLKREQ_BUG
);
13718 } else if (tp
->pci_chip_rev_id
== CHIPREV_ID_5717_A0
) {
13719 tg3_flag_set(tp
, L1PLLPD_EN
);
13721 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
) {
13722 tg3_flag_set(tp
, PCI_EXPRESS
);
13723 } else if (!tg3_flag(tp
, 5705_PLUS
) ||
13724 tg3_flag(tp
, 5780_CLASS
)) {
13725 tp
->pcix_cap
= pci_find_capability(tp
->pdev
, PCI_CAP_ID_PCIX
);
13726 if (!tp
->pcix_cap
) {
13727 dev_err(&tp
->pdev
->dev
,
13728 "Cannot find PCI-X capability, aborting\n");
13732 if (!(pci_state_reg
& PCISTATE_CONV_PCI_MODE
))
13733 tg3_flag_set(tp
, PCIX_MODE
);
13736 /* If we have an AMD 762 or VIA K8T800 chipset, write
13737 * reordering to the mailbox registers done by the host
13738 * controller can cause major troubles. We read back from
13739 * every mailbox register write to force the writes to be
13740 * posted to the chip in order.
13742 if (pci_dev_present(tg3_write_reorder_chipsets
) &&
13743 !tg3_flag(tp
, PCI_EXPRESS
))
13744 tg3_flag_set(tp
, MBOX_WRITE_REORDER
);
13746 pci_read_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
,
13747 &tp
->pci_cacheline_sz
);
13748 pci_read_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
13749 &tp
->pci_lat_timer
);
13750 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
&&
13751 tp
->pci_lat_timer
< 64) {
13752 tp
->pci_lat_timer
= 64;
13753 pci_write_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
13754 tp
->pci_lat_timer
);
13757 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5700_BX
) {
13758 /* 5700 BX chips need to have their TX producer index
13759 * mailboxes written twice to workaround a bug.
13761 tg3_flag_set(tp
, TXD_MBOX_HWBUG
);
13763 /* If we are in PCI-X mode, enable register write workaround.
13765 * The workaround is to use indirect register accesses
13766 * for all chip writes not to mailbox registers.
13768 if (tg3_flag(tp
, PCIX_MODE
)) {
13771 tg3_flag_set(tp
, PCIX_TARGET_HWBUG
);
13773 /* The chip can have it's power management PCI config
13774 * space registers clobbered due to this bug.
13775 * So explicitly force the chip into D0 here.
13777 pci_read_config_dword(tp
->pdev
,
13778 tp
->pm_cap
+ PCI_PM_CTRL
,
13780 pm_reg
&= ~PCI_PM_CTRL_STATE_MASK
;
13781 pm_reg
|= PCI_PM_CTRL_PME_ENABLE
| 0 /* D0 */;
13782 pci_write_config_dword(tp
->pdev
,
13783 tp
->pm_cap
+ PCI_PM_CTRL
,
13786 /* Also, force SERR#/PERR# in PCI command. */
13787 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
13788 pci_cmd
|= PCI_COMMAND_PARITY
| PCI_COMMAND_SERR
;
13789 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
13793 if ((pci_state_reg
& PCISTATE_BUS_SPEED_HIGH
) != 0)
13794 tg3_flag_set(tp
, PCI_HIGH_SPEED
);
13795 if ((pci_state_reg
& PCISTATE_BUS_32BIT
) != 0)
13796 tg3_flag_set(tp
, PCI_32BIT
);
13798 /* Chip-specific fixup from Broadcom driver */
13799 if ((tp
->pci_chip_rev_id
== CHIPREV_ID_5704_A0
) &&
13800 (!(pci_state_reg
& PCISTATE_RETRY_SAME_DMA
))) {
13801 pci_state_reg
|= PCISTATE_RETRY_SAME_DMA
;
13802 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
, pci_state_reg
);
13805 /* Default fast path register access methods */
13806 tp
->read32
= tg3_read32
;
13807 tp
->write32
= tg3_write32
;
13808 tp
->read32_mbox
= tg3_read32
;
13809 tp
->write32_mbox
= tg3_write32
;
13810 tp
->write32_tx_mbox
= tg3_write32
;
13811 tp
->write32_rx_mbox
= tg3_write32
;
13813 /* Various workaround register access methods */
13814 if (tg3_flag(tp
, PCIX_TARGET_HWBUG
))
13815 tp
->write32
= tg3_write_indirect_reg32
;
13816 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
||
13817 (tg3_flag(tp
, PCI_EXPRESS
) &&
13818 tp
->pci_chip_rev_id
== CHIPREV_ID_5750_A0
)) {
13820 * Back to back register writes can cause problems on these
13821 * chips, the workaround is to read back all reg writes
13822 * except those to mailbox regs.
13824 * See tg3_write_indirect_reg32().
13826 tp
->write32
= tg3_write_flush_reg32
;
13829 if (tg3_flag(tp
, TXD_MBOX_HWBUG
) || tg3_flag(tp
, MBOX_WRITE_REORDER
)) {
13830 tp
->write32_tx_mbox
= tg3_write32_tx_mbox
;
13831 if (tg3_flag(tp
, MBOX_WRITE_REORDER
))
13832 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
13835 if (tg3_flag(tp
, ICH_WORKAROUND
)) {
13836 tp
->read32
= tg3_read_indirect_reg32
;
13837 tp
->write32
= tg3_write_indirect_reg32
;
13838 tp
->read32_mbox
= tg3_read_indirect_mbox
;
13839 tp
->write32_mbox
= tg3_write_indirect_mbox
;
13840 tp
->write32_tx_mbox
= tg3_write_indirect_mbox
;
13841 tp
->write32_rx_mbox
= tg3_write_indirect_mbox
;
13846 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
13847 pci_cmd
&= ~PCI_COMMAND_MEMORY
;
13848 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
13850 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
13851 tp
->read32_mbox
= tg3_read32_mbox_5906
;
13852 tp
->write32_mbox
= tg3_write32_mbox_5906
;
13853 tp
->write32_tx_mbox
= tg3_write32_mbox_5906
;
13854 tp
->write32_rx_mbox
= tg3_write32_mbox_5906
;
13857 if (tp
->write32
== tg3_write_indirect_reg32
||
13858 (tg3_flag(tp
, PCIX_MODE
) &&
13859 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
13860 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
)))
13861 tg3_flag_set(tp
, SRAM_USE_CONFIG
);
13863 /* Get eeprom hw config before calling tg3_set_power_state().
13864 * In particular, the TG3_FLAG_IS_NIC flag must be
13865 * determined before calling tg3_set_power_state() so that
13866 * we know whether or not to switch out of Vaux power.
13867 * When the flag is set, it means that GPIO1 is used for eeprom
13868 * write protect and also implies that it is a LOM where GPIOs
13869 * are not used to switch power.
13871 tg3_get_eeprom_hw_cfg(tp
);
13873 if (tg3_flag(tp
, ENABLE_APE
)) {
13874 /* Allow reads and writes to the
13875 * APE register and memory space.
13877 pci_state_reg
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
13878 PCISTATE_ALLOW_APE_SHMEM_WR
|
13879 PCISTATE_ALLOW_APE_PSPACE_WR
;
13880 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
13884 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
13885 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
||
13886 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
13887 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
||
13888 tg3_flag(tp
, 57765_PLUS
))
13889 tg3_flag_set(tp
, CPMU_PRESENT
);
13891 /* Set up tp->grc_local_ctrl before calling tg3_power_up().
13892 * GPIO1 driven high will bring 5700's external PHY out of reset.
13893 * It is also used as eeprom write protect on LOMs.
13895 tp
->grc_local_ctrl
= GRC_LCLCTRL_INT_ON_ATTN
| GRC_LCLCTRL_AUTO_SEEPROM
;
13896 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
13897 tg3_flag(tp
, EEPROM_WRITE_PROT
))
13898 tp
->grc_local_ctrl
|= (GRC_LCLCTRL_GPIO_OE1
|
13899 GRC_LCLCTRL_GPIO_OUTPUT1
);
13900 /* Unused GPIO3 must be driven as output on 5752 because there
13901 * are no pull-up resistors on unused GPIO pins.
13903 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
)
13904 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE3
;
13906 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
13907 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
||
13908 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
13909 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_UART_SEL
;
13911 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
13912 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
) {
13913 /* Turn off the debug UART. */
13914 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_UART_SEL
;
13915 if (tg3_flag(tp
, IS_NIC
))
13916 /* Keep VMain power. */
13917 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE0
|
13918 GRC_LCLCTRL_GPIO_OUTPUT0
;
13921 /* Force the chip into D0. */
13922 err
= tg3_power_up(tp
);
13924 dev_err(&tp
->pdev
->dev
, "Transition to D0 failed\n");
13928 /* Derive initial jumbo mode from MTU assigned in
13929 * ether_setup() via the alloc_etherdev() call
13931 if (tp
->dev
->mtu
> ETH_DATA_LEN
&& !tg3_flag(tp
, 5780_CLASS
))
13932 tg3_flag_set(tp
, JUMBO_RING_ENABLE
);
13934 /* Determine WakeOnLan speed to use. */
13935 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
13936 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
||
13937 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B0
||
13938 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B2
) {
13939 tg3_flag_clear(tp
, WOL_SPEED_100MB
);
13941 tg3_flag_set(tp
, WOL_SPEED_100MB
);
13944 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
13945 tp
->phy_flags
|= TG3_PHYFLG_IS_FET
;
13947 /* A few boards don't want Ethernet@WireSpeed phy feature */
13948 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
13949 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
13950 (tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A0
) &&
13951 (tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A1
)) ||
13952 (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) ||
13953 (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
13954 tp
->phy_flags
|= TG3_PHYFLG_NO_ETH_WIRE_SPEED
;
13956 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5703_AX
||
13957 GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5704_AX
)
13958 tp
->phy_flags
|= TG3_PHYFLG_ADC_BUG
;
13959 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5704_A0
)
13960 tp
->phy_flags
|= TG3_PHYFLG_5704_A0_BUG
;
13962 if (tg3_flag(tp
, 5705_PLUS
) &&
13963 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
13964 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5785
&&
13965 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_57780
&&
13966 !tg3_flag(tp
, 57765_PLUS
)) {
13967 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
13968 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
||
13969 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
13970 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
) {
13971 if (tp
->pdev
->device
!= PCI_DEVICE_ID_TIGON3_5756
&&
13972 tp
->pdev
->device
!= PCI_DEVICE_ID_TIGON3_5722
)
13973 tp
->phy_flags
|= TG3_PHYFLG_JITTER_BUG
;
13974 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5755M
)
13975 tp
->phy_flags
|= TG3_PHYFLG_ADJUST_TRIM
;
13977 tp
->phy_flags
|= TG3_PHYFLG_BER_BUG
;
13980 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
&&
13981 GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5784_AX
) {
13982 tp
->phy_otp
= tg3_read_otp_phycfg(tp
);
13983 if (tp
->phy_otp
== 0)
13984 tp
->phy_otp
= TG3_OTP_DEFAULT
;
13987 if (tg3_flag(tp
, CPMU_PRESENT
))
13988 tp
->mi_mode
= MAC_MI_MODE_500KHZ_CONST
;
13990 tp
->mi_mode
= MAC_MI_MODE_BASE
;
13992 tp
->coalesce_mode
= 0;
13993 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5700_AX
&&
13994 GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5700_BX
)
13995 tp
->coalesce_mode
|= HOSTCC_MODE_32BYTE
;
13997 /* Set these bits to enable statistics workaround. */
13998 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
13999 tp
->pci_chip_rev_id
== CHIPREV_ID_5719_A0
||
14000 tp
->pci_chip_rev_id
== CHIPREV_ID_5720_A0
) {
14001 tp
->coalesce_mode
|= HOSTCC_MODE_ATTN
;
14002 tp
->grc_mode
|= GRC_MODE_IRQ_ON_FLOW_ATTN
;
14005 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
14006 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
)
14007 tg3_flag_set(tp
, USE_PHYLIB
);
14009 err
= tg3_mdio_init(tp
);
14013 /* Initialize data/descriptor byte/word swapping. */
14014 val
= tr32(GRC_MODE
);
14015 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
14016 val
&= (GRC_MODE_BYTE_SWAP_B2HRX_DATA
|
14017 GRC_MODE_WORD_SWAP_B2HRX_DATA
|
14018 GRC_MODE_B2HRX_ENABLE
|
14019 GRC_MODE_HTX2B_ENABLE
|
14020 GRC_MODE_HOST_STACKUP
);
14022 val
&= GRC_MODE_HOST_STACKUP
;
14024 tw32(GRC_MODE
, val
| tp
->grc_mode
);
14026 tg3_switch_clocks(tp
);
14028 /* Clear this out for sanity. */
14029 tw32(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
14031 pci_read_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
14033 if ((pci_state_reg
& PCISTATE_CONV_PCI_MODE
) == 0 &&
14034 !tg3_flag(tp
, PCIX_TARGET_HWBUG
)) {
14035 u32 chiprevid
= GET_CHIP_REV_ID(tp
->misc_host_ctrl
);
14037 if (chiprevid
== CHIPREV_ID_5701_A0
||
14038 chiprevid
== CHIPREV_ID_5701_B0
||
14039 chiprevid
== CHIPREV_ID_5701_B2
||
14040 chiprevid
== CHIPREV_ID_5701_B5
) {
14041 void __iomem
*sram_base
;
14043 /* Write some dummy words into the SRAM status block
14044 * area, see if it reads back correctly. If the return
14045 * value is bad, force enable the PCIX workaround.
14047 sram_base
= tp
->regs
+ NIC_SRAM_WIN_BASE
+ NIC_SRAM_STATS_BLK
;
14049 writel(0x00000000, sram_base
);
14050 writel(0x00000000, sram_base
+ 4);
14051 writel(0xffffffff, sram_base
+ 4);
14052 if (readl(sram_base
) != 0x00000000)
14053 tg3_flag_set(tp
, PCIX_TARGET_HWBUG
);
14058 tg3_nvram_init(tp
);
14060 grc_misc_cfg
= tr32(GRC_MISC_CFG
);
14061 grc_misc_cfg
&= GRC_MISC_CFG_BOARD_ID_MASK
;
14063 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
14064 (grc_misc_cfg
== GRC_MISC_CFG_BOARD_ID_5788
||
14065 grc_misc_cfg
== GRC_MISC_CFG_BOARD_ID_5788M
))
14066 tg3_flag_set(tp
, IS_5788
);
14068 if (!tg3_flag(tp
, IS_5788
) &&
14069 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
)
14070 tg3_flag_set(tp
, TAGGED_STATUS
);
14071 if (tg3_flag(tp
, TAGGED_STATUS
)) {
14072 tp
->coalesce_mode
|= (HOSTCC_MODE_CLRTICK_RXBD
|
14073 HOSTCC_MODE_CLRTICK_TXBD
);
14075 tp
->misc_host_ctrl
|= MISC_HOST_CTRL_TAGGED_STATUS
;
14076 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
14077 tp
->misc_host_ctrl
);
14080 /* Preserve the APE MAC_MODE bits */
14081 if (tg3_flag(tp
, ENABLE_APE
))
14082 tp
->mac_mode
= MAC_MODE_APE_TX_EN
| MAC_MODE_APE_RX_EN
;
14084 tp
->mac_mode
= TG3_DEF_MAC_MODE
;
14086 /* these are limited to 10/100 only */
14087 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
&&
14088 (grc_misc_cfg
== 0x8000 || grc_misc_cfg
== 0x4000)) ||
14089 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
14090 tp
->pdev
->vendor
== PCI_VENDOR_ID_BROADCOM
&&
14091 (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5901
||
14092 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5901_2
||
14093 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5705F
)) ||
14094 (tp
->pdev
->vendor
== PCI_VENDOR_ID_BROADCOM
&&
14095 (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5751F
||
14096 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5753F
||
14097 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5787F
)) ||
14098 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57790
||
14099 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57791
||
14100 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57795
||
14101 (tp
->phy_flags
& TG3_PHYFLG_IS_FET
))
14102 tp
->phy_flags
|= TG3_PHYFLG_10_100_ONLY
;
14104 err
= tg3_phy_probe(tp
);
14106 dev_err(&tp
->pdev
->dev
, "phy probe failed, err %d\n", err
);
14107 /* ... but do not return immediately ... */
14112 tg3_read_fw_ver(tp
);
14114 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
14115 tp
->phy_flags
&= ~TG3_PHYFLG_USE_MI_INTERRUPT
;
14117 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
)
14118 tp
->phy_flags
|= TG3_PHYFLG_USE_MI_INTERRUPT
;
14120 tp
->phy_flags
&= ~TG3_PHYFLG_USE_MI_INTERRUPT
;
14123 /* 5700 {AX,BX} chips have a broken status block link
14124 * change bit implementation, so we must use the
14125 * status register in those cases.
14127 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
)
14128 tg3_flag_set(tp
, USE_LINKCHG_REG
);
14130 tg3_flag_clear(tp
, USE_LINKCHG_REG
);
14132 /* The led_ctrl is set during tg3_phy_probe, here we might
14133 * have to force the link status polling mechanism based
14134 * upon subsystem IDs.
14136 if (tp
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_DELL
&&
14137 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
&&
14138 !(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
14139 tp
->phy_flags
|= TG3_PHYFLG_USE_MI_INTERRUPT
;
14140 tg3_flag_set(tp
, USE_LINKCHG_REG
);
14143 /* For all SERDES we poll the MAC status register. */
14144 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
14145 tg3_flag_set(tp
, POLL_SERDES
);
14147 tg3_flag_clear(tp
, POLL_SERDES
);
14149 tp
->rx_offset
= NET_IP_ALIGN
;
14150 tp
->rx_copy_thresh
= TG3_RX_COPY_THRESHOLD
;
14151 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
&&
14152 tg3_flag(tp
, PCIX_MODE
)) {
14154 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14155 tp
->rx_copy_thresh
= ~(u16
)0;
14159 tp
->rx_std_ring_mask
= TG3_RX_STD_RING_SIZE(tp
) - 1;
14160 tp
->rx_jmb_ring_mask
= TG3_RX_JMB_RING_SIZE(tp
) - 1;
14161 tp
->rx_ret_ring_mask
= tg3_rx_ret_ring_size(tp
) - 1;
14163 tp
->rx_std_max_post
= tp
->rx_std_ring_mask
+ 1;
14165 /* Increment the rx prod index on the rx std ring by at most
14166 * 8 for these chips to workaround hw errata.
14168 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
||
14169 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
||
14170 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
)
14171 tp
->rx_std_max_post
= 8;
14173 if (tg3_flag(tp
, ASPM_WORKAROUND
))
14174 tp
->pwrmgmt_thresh
= tr32(PCIE_PWR_MGMT_THRESH
) &
14175 PCIE_PWR_MGMT_L1_THRESH_MSK
;
14180 #ifdef CONFIG_SPARC
14181 static int __devinit
tg3_get_macaddr_sparc(struct tg3
*tp
)
14183 struct net_device
*dev
= tp
->dev
;
14184 struct pci_dev
*pdev
= tp
->pdev
;
14185 struct device_node
*dp
= pci_device_to_OF_node(pdev
);
14186 const unsigned char *addr
;
14189 addr
= of_get_property(dp
, "local-mac-address", &len
);
14190 if (addr
&& len
== 6) {
14191 memcpy(dev
->dev_addr
, addr
, 6);
14192 memcpy(dev
->perm_addr
, dev
->dev_addr
, 6);
14198 static int __devinit
tg3_get_default_macaddr_sparc(struct tg3
*tp
)
14200 struct net_device
*dev
= tp
->dev
;
14202 memcpy(dev
->dev_addr
, idprom
->id_ethaddr
, 6);
14203 memcpy(dev
->perm_addr
, idprom
->id_ethaddr
, 6);
14208 static int __devinit
tg3_get_device_address(struct tg3
*tp
)
14210 struct net_device
*dev
= tp
->dev
;
14211 u32 hi
, lo
, mac_offset
;
14214 #ifdef CONFIG_SPARC
14215 if (!tg3_get_macaddr_sparc(tp
))
14220 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
14221 tg3_flag(tp
, 5780_CLASS
)) {
14222 if (tr32(TG3PCI_DUAL_MAC_CTRL
) & DUAL_MAC_CTRL_ID
)
14224 if (tg3_nvram_lock(tp
))
14225 tw32_f(NVRAM_CMD
, NVRAM_CMD_RESET
);
14227 tg3_nvram_unlock(tp
);
14228 } else if (tg3_flag(tp
, 5717_PLUS
)) {
14229 if (PCI_FUNC(tp
->pdev
->devfn
) & 1)
14231 if (PCI_FUNC(tp
->pdev
->devfn
) > 1)
14232 mac_offset
+= 0x18c;
14233 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
14236 /* First try to get it from MAC address mailbox. */
14237 tg3_read_mem(tp
, NIC_SRAM_MAC_ADDR_HIGH_MBOX
, &hi
);
14238 if ((hi
>> 16) == 0x484b) {
14239 dev
->dev_addr
[0] = (hi
>> 8) & 0xff;
14240 dev
->dev_addr
[1] = (hi
>> 0) & 0xff;
14242 tg3_read_mem(tp
, NIC_SRAM_MAC_ADDR_LOW_MBOX
, &lo
);
14243 dev
->dev_addr
[2] = (lo
>> 24) & 0xff;
14244 dev
->dev_addr
[3] = (lo
>> 16) & 0xff;
14245 dev
->dev_addr
[4] = (lo
>> 8) & 0xff;
14246 dev
->dev_addr
[5] = (lo
>> 0) & 0xff;
14248 /* Some old bootcode may report a 0 MAC address in SRAM */
14249 addr_ok
= is_valid_ether_addr(&dev
->dev_addr
[0]);
14252 /* Next, try NVRAM. */
14253 if (!tg3_flag(tp
, NO_NVRAM
) &&
14254 !tg3_nvram_read_be32(tp
, mac_offset
+ 0, &hi
) &&
14255 !tg3_nvram_read_be32(tp
, mac_offset
+ 4, &lo
)) {
14256 memcpy(&dev
->dev_addr
[0], ((char *)&hi
) + 2, 2);
14257 memcpy(&dev
->dev_addr
[2], (char *)&lo
, sizeof(lo
));
14259 /* Finally just fetch it out of the MAC control regs. */
14261 hi
= tr32(MAC_ADDR_0_HIGH
);
14262 lo
= tr32(MAC_ADDR_0_LOW
);
14264 dev
->dev_addr
[5] = lo
& 0xff;
14265 dev
->dev_addr
[4] = (lo
>> 8) & 0xff;
14266 dev
->dev_addr
[3] = (lo
>> 16) & 0xff;
14267 dev
->dev_addr
[2] = (lo
>> 24) & 0xff;
14268 dev
->dev_addr
[1] = hi
& 0xff;
14269 dev
->dev_addr
[0] = (hi
>> 8) & 0xff;
14273 if (!is_valid_ether_addr(&dev
->dev_addr
[0])) {
14274 #ifdef CONFIG_SPARC
14275 if (!tg3_get_default_macaddr_sparc(tp
))
14280 memcpy(dev
->perm_addr
, dev
->dev_addr
, dev
->addr_len
);
14284 #define BOUNDARY_SINGLE_CACHELINE 1
14285 #define BOUNDARY_MULTI_CACHELINE 2
14287 static u32 __devinit
tg3_calc_dma_bndry(struct tg3
*tp
, u32 val
)
14289 int cacheline_size
;
14293 pci_read_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
, &byte
);
14295 cacheline_size
= 1024;
14297 cacheline_size
= (int) byte
* 4;
14299 /* On 5703 and later chips, the boundary bits have no
14302 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
14303 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
&&
14304 !tg3_flag(tp
, PCI_EXPRESS
))
14307 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14308 goal
= BOUNDARY_MULTI_CACHELINE
;
14310 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14311 goal
= BOUNDARY_SINGLE_CACHELINE
;
14317 if (tg3_flag(tp
, 57765_PLUS
)) {
14318 val
= goal
? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT
;
14325 /* PCI controllers on most RISC systems tend to disconnect
14326 * when a device tries to burst across a cache-line boundary.
14327 * Therefore, letting tg3 do so just wastes PCI bandwidth.
14329 * Unfortunately, for PCI-E there are only limited
14330 * write-side controls for this, and thus for reads
14331 * we will still get the disconnects. We'll also waste
14332 * these PCI cycles for both read and write for chips
14333 * other than 5700 and 5701 which do not implement the
14336 if (tg3_flag(tp
, PCIX_MODE
) && !tg3_flag(tp
, PCI_EXPRESS
)) {
14337 switch (cacheline_size
) {
14342 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
14343 val
|= (DMA_RWCTRL_READ_BNDRY_128_PCIX
|
14344 DMA_RWCTRL_WRITE_BNDRY_128_PCIX
);
14346 val
|= (DMA_RWCTRL_READ_BNDRY_384_PCIX
|
14347 DMA_RWCTRL_WRITE_BNDRY_384_PCIX
);
14352 val
|= (DMA_RWCTRL_READ_BNDRY_256_PCIX
|
14353 DMA_RWCTRL_WRITE_BNDRY_256_PCIX
);
14357 val
|= (DMA_RWCTRL_READ_BNDRY_384_PCIX
|
14358 DMA_RWCTRL_WRITE_BNDRY_384_PCIX
);
14361 } else if (tg3_flag(tp
, PCI_EXPRESS
)) {
14362 switch (cacheline_size
) {
14366 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
14367 val
&= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE
;
14368 val
|= DMA_RWCTRL_WRITE_BNDRY_64_PCIE
;
14374 val
&= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE
;
14375 val
|= DMA_RWCTRL_WRITE_BNDRY_128_PCIE
;
14379 switch (cacheline_size
) {
14381 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
14382 val
|= (DMA_RWCTRL_READ_BNDRY_16
|
14383 DMA_RWCTRL_WRITE_BNDRY_16
);
14388 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
14389 val
|= (DMA_RWCTRL_READ_BNDRY_32
|
14390 DMA_RWCTRL_WRITE_BNDRY_32
);
14395 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
14396 val
|= (DMA_RWCTRL_READ_BNDRY_64
|
14397 DMA_RWCTRL_WRITE_BNDRY_64
);
14402 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
14403 val
|= (DMA_RWCTRL_READ_BNDRY_128
|
14404 DMA_RWCTRL_WRITE_BNDRY_128
);
14409 val
|= (DMA_RWCTRL_READ_BNDRY_256
|
14410 DMA_RWCTRL_WRITE_BNDRY_256
);
14413 val
|= (DMA_RWCTRL_READ_BNDRY_512
|
14414 DMA_RWCTRL_WRITE_BNDRY_512
);
14418 val
|= (DMA_RWCTRL_READ_BNDRY_1024
|
14419 DMA_RWCTRL_WRITE_BNDRY_1024
);
14428 static int __devinit
tg3_do_test_dma(struct tg3
*tp
, u32
*buf
, dma_addr_t buf_dma
, int size
, int to_device
)
14430 struct tg3_internal_buffer_desc test_desc
;
14431 u32 sram_dma_descs
;
14434 sram_dma_descs
= NIC_SRAM_DMA_DESC_POOL_BASE
;
14436 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ
, 0);
14437 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ
, 0);
14438 tw32(RDMAC_STATUS
, 0);
14439 tw32(WDMAC_STATUS
, 0);
14441 tw32(BUFMGR_MODE
, 0);
14442 tw32(FTQ_RESET
, 0);
14444 test_desc
.addr_hi
= ((u64
) buf_dma
) >> 32;
14445 test_desc
.addr_lo
= buf_dma
& 0xffffffff;
14446 test_desc
.nic_mbuf
= 0x00002100;
14447 test_desc
.len
= size
;
14450 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14451 * the *second* time the tg3 driver was getting loaded after an
14454 * Broadcom tells me:
14455 * ...the DMA engine is connected to the GRC block and a DMA
14456 * reset may affect the GRC block in some unpredictable way...
14457 * The behavior of resets to individual blocks has not been tested.
14459 * Broadcom noted the GRC reset will also reset all sub-components.
14462 test_desc
.cqid_sqid
= (13 << 8) | 2;
14464 tw32_f(RDMAC_MODE
, RDMAC_MODE_ENABLE
);
14467 test_desc
.cqid_sqid
= (16 << 8) | 7;
14469 tw32_f(WDMAC_MODE
, WDMAC_MODE_ENABLE
);
14472 test_desc
.flags
= 0x00000005;
14474 for (i
= 0; i
< (sizeof(test_desc
) / sizeof(u32
)); i
++) {
14477 val
= *(((u32
*)&test_desc
) + i
);
14478 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
,
14479 sram_dma_descs
+ (i
* sizeof(u32
)));
14480 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
14482 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
14485 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ
, sram_dma_descs
);
14487 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ
, sram_dma_descs
);
14490 for (i
= 0; i
< 40; i
++) {
14494 val
= tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ
);
14496 val
= tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ
);
14497 if ((val
& 0xffff) == sram_dma_descs
) {
14508 #define TEST_BUFFER_SIZE 0x2000
14510 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets
) = {
14511 { PCI_DEVICE(PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_UNI_N_PCI15
) },
14515 static int __devinit
tg3_test_dma(struct tg3
*tp
)
14517 dma_addr_t buf_dma
;
14518 u32
*buf
, saved_dma_rwctrl
;
14521 buf
= dma_alloc_coherent(&tp
->pdev
->dev
, TEST_BUFFER_SIZE
,
14522 &buf_dma
, GFP_KERNEL
);
14528 tp
->dma_rwctrl
= ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT
) |
14529 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT
));
14531 tp
->dma_rwctrl
= tg3_calc_dma_bndry(tp
, tp
->dma_rwctrl
);
14533 if (tg3_flag(tp
, 57765_PLUS
))
14536 if (tg3_flag(tp
, PCI_EXPRESS
)) {
14537 /* DMA read watermark not used on PCIE */
14538 tp
->dma_rwctrl
|= 0x00180000;
14539 } else if (!tg3_flag(tp
, PCIX_MODE
)) {
14540 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
||
14541 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
)
14542 tp
->dma_rwctrl
|= 0x003f0000;
14544 tp
->dma_rwctrl
|= 0x003f000f;
14546 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
14547 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
14548 u32 ccval
= (tr32(TG3PCI_CLOCK_CTRL
) & 0x1f);
14549 u32 read_water
= 0x7;
14551 /* If the 5704 is behind the EPB bridge, we can
14552 * do the less restrictive ONE_DMA workaround for
14553 * better performance.
14555 if (tg3_flag(tp
, 40BIT_DMA_BUG
) &&
14556 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
)
14557 tp
->dma_rwctrl
|= 0x8000;
14558 else if (ccval
== 0x6 || ccval
== 0x7)
14559 tp
->dma_rwctrl
|= DMA_RWCTRL_ONE_DMA
;
14561 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
)
14563 /* Set bit 23 to enable PCIX hw bug fix */
14565 (read_water
<< DMA_RWCTRL_READ_WATER_SHIFT
) |
14566 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT
) |
14568 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5780
) {
14569 /* 5780 always in PCIX mode */
14570 tp
->dma_rwctrl
|= 0x00144000;
14571 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
) {
14572 /* 5714 always in PCIX mode */
14573 tp
->dma_rwctrl
|= 0x00148000;
14575 tp
->dma_rwctrl
|= 0x001b000f;
14579 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
14580 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
)
14581 tp
->dma_rwctrl
&= 0xfffffff0;
14583 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
14584 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
14585 /* Remove this if it causes problems for some boards. */
14586 tp
->dma_rwctrl
|= DMA_RWCTRL_USE_MEM_READ_MULT
;
14588 /* On 5700/5701 chips, we need to set this bit.
14589 * Otherwise the chip will issue cacheline transactions
14590 * to streamable DMA memory with not all the byte
14591 * enables turned on. This is an error on several
14592 * RISC PCI controllers, in particular sparc64.
14594 * On 5703/5704 chips, this bit has been reassigned
14595 * a different meaning. In particular, it is used
14596 * on those chips to enable a PCI-X workaround.
14598 tp
->dma_rwctrl
|= DMA_RWCTRL_ASSERT_ALL_BE
;
14601 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
14604 /* Unneeded, already done by tg3_get_invariants. */
14605 tg3_switch_clocks(tp
);
14608 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
14609 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
)
14612 /* It is best to perform DMA test with maximum write burst size
14613 * to expose the 5700/5701 write DMA bug.
14615 saved_dma_rwctrl
= tp
->dma_rwctrl
;
14616 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
14617 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
14622 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++)
14625 /* Send the buffer to the chip. */
14626 ret
= tg3_do_test_dma(tp
, buf
, buf_dma
, TEST_BUFFER_SIZE
, 1);
14628 dev_err(&tp
->pdev
->dev
,
14629 "%s: Buffer write failed. err = %d\n",
14635 /* validate data reached card RAM correctly. */
14636 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++) {
14638 tg3_read_mem(tp
, 0x2100 + (i
*4), &val
);
14639 if (le32_to_cpu(val
) != p
[i
]) {
14640 dev_err(&tp
->pdev
->dev
,
14641 "%s: Buffer corrupted on device! "
14642 "(%d != %d)\n", __func__
, val
, i
);
14643 /* ret = -ENODEV here? */
14648 /* Now read it back. */
14649 ret
= tg3_do_test_dma(tp
, buf
, buf_dma
, TEST_BUFFER_SIZE
, 0);
14651 dev_err(&tp
->pdev
->dev
, "%s: Buffer read failed. "
14652 "err = %d\n", __func__
, ret
);
14657 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++) {
14661 if ((tp
->dma_rwctrl
& DMA_RWCTRL_WRITE_BNDRY_MASK
) !=
14662 DMA_RWCTRL_WRITE_BNDRY_16
) {
14663 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
14664 tp
->dma_rwctrl
|= DMA_RWCTRL_WRITE_BNDRY_16
;
14665 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
14668 dev_err(&tp
->pdev
->dev
,
14669 "%s: Buffer corrupted on read back! "
14670 "(%d != %d)\n", __func__
, p
[i
], i
);
14676 if (i
== (TEST_BUFFER_SIZE
/ sizeof(u32
))) {
14682 if ((tp
->dma_rwctrl
& DMA_RWCTRL_WRITE_BNDRY_MASK
) !=
14683 DMA_RWCTRL_WRITE_BNDRY_16
) {
14684 /* DMA test passed without adjusting DMA boundary,
14685 * now look for chipsets that are known to expose the
14686 * DMA bug without failing the test.
14688 if (pci_dev_present(tg3_dma_wait_state_chipsets
)) {
14689 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
14690 tp
->dma_rwctrl
|= DMA_RWCTRL_WRITE_BNDRY_16
;
14692 /* Safe to use the calculated DMA boundary. */
14693 tp
->dma_rwctrl
= saved_dma_rwctrl
;
14696 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
14700 dma_free_coherent(&tp
->pdev
->dev
, TEST_BUFFER_SIZE
, buf
, buf_dma
);
14705 static void __devinit
tg3_init_bufmgr_config(struct tg3
*tp
)
14707 if (tg3_flag(tp
, 57765_PLUS
)) {
14708 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
14709 DEFAULT_MB_RDMA_LOW_WATER_5705
;
14710 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
14711 DEFAULT_MB_MACRX_LOW_WATER_57765
;
14712 tp
->bufmgr_config
.mbuf_high_water
=
14713 DEFAULT_MB_HIGH_WATER_57765
;
14715 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
14716 DEFAULT_MB_RDMA_LOW_WATER_5705
;
14717 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
14718 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765
;
14719 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
14720 DEFAULT_MB_HIGH_WATER_JUMBO_57765
;
14721 } else if (tg3_flag(tp
, 5705_PLUS
)) {
14722 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
14723 DEFAULT_MB_RDMA_LOW_WATER_5705
;
14724 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
14725 DEFAULT_MB_MACRX_LOW_WATER_5705
;
14726 tp
->bufmgr_config
.mbuf_high_water
=
14727 DEFAULT_MB_HIGH_WATER_5705
;
14728 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
14729 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
14730 DEFAULT_MB_MACRX_LOW_WATER_5906
;
14731 tp
->bufmgr_config
.mbuf_high_water
=
14732 DEFAULT_MB_HIGH_WATER_5906
;
14735 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
14736 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780
;
14737 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
14738 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780
;
14739 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
14740 DEFAULT_MB_HIGH_WATER_JUMBO_5780
;
14742 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
14743 DEFAULT_MB_RDMA_LOW_WATER
;
14744 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
14745 DEFAULT_MB_MACRX_LOW_WATER
;
14746 tp
->bufmgr_config
.mbuf_high_water
=
14747 DEFAULT_MB_HIGH_WATER
;
14749 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
14750 DEFAULT_MB_RDMA_LOW_WATER_JUMBO
;
14751 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
14752 DEFAULT_MB_MACRX_LOW_WATER_JUMBO
;
14753 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
14754 DEFAULT_MB_HIGH_WATER_JUMBO
;
14757 tp
->bufmgr_config
.dma_low_water
= DEFAULT_DMA_LOW_WATER
;
14758 tp
->bufmgr_config
.dma_high_water
= DEFAULT_DMA_HIGH_WATER
;
14761 static char * __devinit
tg3_phy_string(struct tg3
*tp
)
14763 switch (tp
->phy_id
& TG3_PHY_ID_MASK
) {
14764 case TG3_PHY_ID_BCM5400
: return "5400";
14765 case TG3_PHY_ID_BCM5401
: return "5401";
14766 case TG3_PHY_ID_BCM5411
: return "5411";
14767 case TG3_PHY_ID_BCM5701
: return "5701";
14768 case TG3_PHY_ID_BCM5703
: return "5703";
14769 case TG3_PHY_ID_BCM5704
: return "5704";
14770 case TG3_PHY_ID_BCM5705
: return "5705";
14771 case TG3_PHY_ID_BCM5750
: return "5750";
14772 case TG3_PHY_ID_BCM5752
: return "5752";
14773 case TG3_PHY_ID_BCM5714
: return "5714";
14774 case TG3_PHY_ID_BCM5780
: return "5780";
14775 case TG3_PHY_ID_BCM5755
: return "5755";
14776 case TG3_PHY_ID_BCM5787
: return "5787";
14777 case TG3_PHY_ID_BCM5784
: return "5784";
14778 case TG3_PHY_ID_BCM5756
: return "5722/5756";
14779 case TG3_PHY_ID_BCM5906
: return "5906";
14780 case TG3_PHY_ID_BCM5761
: return "5761";
14781 case TG3_PHY_ID_BCM5718C
: return "5718C";
14782 case TG3_PHY_ID_BCM5718S
: return "5718S";
14783 case TG3_PHY_ID_BCM57765
: return "57765";
14784 case TG3_PHY_ID_BCM5719C
: return "5719C";
14785 case TG3_PHY_ID_BCM5720C
: return "5720C";
14786 case TG3_PHY_ID_BCM8002
: return "8002/serdes";
14787 case 0: return "serdes";
14788 default: return "unknown";
14792 static char * __devinit
tg3_bus_string(struct tg3
*tp
, char *str
)
14794 if (tg3_flag(tp
, PCI_EXPRESS
)) {
14795 strcpy(str
, "PCI Express");
14797 } else if (tg3_flag(tp
, PCIX_MODE
)) {
14798 u32 clock_ctrl
= tr32(TG3PCI_CLOCK_CTRL
) & 0x1f;
14800 strcpy(str
, "PCIX:");
14802 if ((clock_ctrl
== 7) ||
14803 ((tr32(GRC_MISC_CFG
) & GRC_MISC_CFG_BOARD_ID_MASK
) ==
14804 GRC_MISC_CFG_BOARD_ID_5704CIOBE
))
14805 strcat(str
, "133MHz");
14806 else if (clock_ctrl
== 0)
14807 strcat(str
, "33MHz");
14808 else if (clock_ctrl
== 2)
14809 strcat(str
, "50MHz");
14810 else if (clock_ctrl
== 4)
14811 strcat(str
, "66MHz");
14812 else if (clock_ctrl
== 6)
14813 strcat(str
, "100MHz");
14815 strcpy(str
, "PCI:");
14816 if (tg3_flag(tp
, PCI_HIGH_SPEED
))
14817 strcat(str
, "66MHz");
14819 strcat(str
, "33MHz");
14821 if (tg3_flag(tp
, PCI_32BIT
))
14822 strcat(str
, ":32-bit");
14824 strcat(str
, ":64-bit");
14828 static struct pci_dev
* __devinit
tg3_find_peer(struct tg3
*tp
)
14830 struct pci_dev
*peer
;
14831 unsigned int func
, devnr
= tp
->pdev
->devfn
& ~7;
14833 for (func
= 0; func
< 8; func
++) {
14834 peer
= pci_get_slot(tp
->pdev
->bus
, devnr
| func
);
14835 if (peer
&& peer
!= tp
->pdev
)
14839 /* 5704 can be configured in single-port mode, set peer to
14840 * tp->pdev in that case.
14848 * We don't need to keep the refcount elevated; there's no way
14849 * to remove one half of this device without removing the other
14856 static void __devinit
tg3_init_coal(struct tg3
*tp
)
14858 struct ethtool_coalesce
*ec
= &tp
->coal
;
14860 memset(ec
, 0, sizeof(*ec
));
14861 ec
->cmd
= ETHTOOL_GCOALESCE
;
14862 ec
->rx_coalesce_usecs
= LOW_RXCOL_TICKS
;
14863 ec
->tx_coalesce_usecs
= LOW_TXCOL_TICKS
;
14864 ec
->rx_max_coalesced_frames
= LOW_RXMAX_FRAMES
;
14865 ec
->tx_max_coalesced_frames
= LOW_TXMAX_FRAMES
;
14866 ec
->rx_coalesce_usecs_irq
= DEFAULT_RXCOAL_TICK_INT
;
14867 ec
->tx_coalesce_usecs_irq
= DEFAULT_TXCOAL_TICK_INT
;
14868 ec
->rx_max_coalesced_frames_irq
= DEFAULT_RXCOAL_MAXF_INT
;
14869 ec
->tx_max_coalesced_frames_irq
= DEFAULT_TXCOAL_MAXF_INT
;
14870 ec
->stats_block_coalesce_usecs
= DEFAULT_STAT_COAL_TICKS
;
14872 if (tp
->coalesce_mode
& (HOSTCC_MODE_CLRTICK_RXBD
|
14873 HOSTCC_MODE_CLRTICK_TXBD
)) {
14874 ec
->rx_coalesce_usecs
= LOW_RXCOL_TICKS_CLRTCKS
;
14875 ec
->rx_coalesce_usecs_irq
= DEFAULT_RXCOAL_TICK_INT_CLRTCKS
;
14876 ec
->tx_coalesce_usecs
= LOW_TXCOL_TICKS_CLRTCKS
;
14877 ec
->tx_coalesce_usecs_irq
= DEFAULT_TXCOAL_TICK_INT_CLRTCKS
;
14880 if (tg3_flag(tp
, 5705_PLUS
)) {
14881 ec
->rx_coalesce_usecs_irq
= 0;
14882 ec
->tx_coalesce_usecs_irq
= 0;
14883 ec
->stats_block_coalesce_usecs
= 0;
14887 static const struct net_device_ops tg3_netdev_ops
= {
14888 .ndo_open
= tg3_open
,
14889 .ndo_stop
= tg3_close
,
14890 .ndo_start_xmit
= tg3_start_xmit
,
14891 .ndo_get_stats64
= tg3_get_stats64
,
14892 .ndo_validate_addr
= eth_validate_addr
,
14893 .ndo_set_multicast_list
= tg3_set_rx_mode
,
14894 .ndo_set_mac_address
= tg3_set_mac_addr
,
14895 .ndo_do_ioctl
= tg3_ioctl
,
14896 .ndo_tx_timeout
= tg3_tx_timeout
,
14897 .ndo_change_mtu
= tg3_change_mtu
,
14898 .ndo_fix_features
= tg3_fix_features
,
14899 .ndo_set_features
= tg3_set_features
,
14900 #ifdef CONFIG_NET_POLL_CONTROLLER
14901 .ndo_poll_controller
= tg3_poll_controller
,
14905 static int __devinit
tg3_init_one(struct pci_dev
*pdev
,
14906 const struct pci_device_id
*ent
)
14908 struct net_device
*dev
;
14910 int i
, err
, pm_cap
;
14911 u32 sndmbx
, rcvmbx
, intmbx
;
14913 u64 dma_mask
, persist_dma_mask
;
14916 printk_once(KERN_INFO
"%s\n", version
);
14918 err
= pci_enable_device(pdev
);
14920 dev_err(&pdev
->dev
, "Cannot enable PCI device, aborting\n");
14924 err
= pci_request_regions(pdev
, DRV_MODULE_NAME
);
14926 dev_err(&pdev
->dev
, "Cannot obtain PCI resources, aborting\n");
14927 goto err_out_disable_pdev
;
14930 pci_set_master(pdev
);
14932 /* Find power-management capability. */
14933 pm_cap
= pci_find_capability(pdev
, PCI_CAP_ID_PM
);
14935 dev_err(&pdev
->dev
,
14936 "Cannot find Power Management capability, aborting\n");
14938 goto err_out_free_res
;
14941 dev
= alloc_etherdev_mq(sizeof(*tp
), TG3_IRQ_MAX_VECS
);
14943 dev_err(&pdev
->dev
, "Etherdev alloc failed, aborting\n");
14945 goto err_out_free_res
;
14948 SET_NETDEV_DEV(dev
, &pdev
->dev
);
14950 tp
= netdev_priv(dev
);
14953 tp
->pm_cap
= pm_cap
;
14954 tp
->rx_mode
= TG3_DEF_RX_MODE
;
14955 tp
->tx_mode
= TG3_DEF_TX_MODE
;
14958 tp
->msg_enable
= tg3_debug
;
14960 tp
->msg_enable
= TG3_DEF_MSG_ENABLE
;
14962 /* The word/byte swap controls here control register access byte
14963 * swapping. DMA data byte swapping is controlled in the GRC_MODE
14966 tp
->misc_host_ctrl
=
14967 MISC_HOST_CTRL_MASK_PCI_INT
|
14968 MISC_HOST_CTRL_WORD_SWAP
|
14969 MISC_HOST_CTRL_INDIR_ACCESS
|
14970 MISC_HOST_CTRL_PCISTATE_RW
;
14972 /* The NONFRM (non-frame) byte/word swap controls take effect
14973 * on descriptor entries, anything which isn't packet data.
14975 * The StrongARM chips on the board (one for tx, one for rx)
14976 * are running in big-endian mode.
14978 tp
->grc_mode
= (GRC_MODE_WSWAP_DATA
| GRC_MODE_BSWAP_DATA
|
14979 GRC_MODE_WSWAP_NONFRM_DATA
);
14980 #ifdef __BIG_ENDIAN
14981 tp
->grc_mode
|= GRC_MODE_BSWAP_NONFRM_DATA
;
14983 spin_lock_init(&tp
->lock
);
14984 spin_lock_init(&tp
->indirect_lock
);
14985 INIT_WORK(&tp
->reset_task
, tg3_reset_task
);
14987 tp
->regs
= pci_ioremap_bar(pdev
, BAR_0
);
14989 dev_err(&pdev
->dev
, "Cannot map device registers, aborting\n");
14991 goto err_out_free_dev
;
14994 tp
->rx_pending
= TG3_DEF_RX_RING_PENDING
;
14995 tp
->rx_jumbo_pending
= TG3_DEF_RX_JUMBO_RING_PENDING
;
14997 dev
->ethtool_ops
= &tg3_ethtool_ops
;
14998 dev
->watchdog_timeo
= TG3_TX_TIMEOUT
;
14999 dev
->netdev_ops
= &tg3_netdev_ops
;
15000 dev
->irq
= pdev
->irq
;
15002 err
= tg3_get_invariants(tp
);
15004 dev_err(&pdev
->dev
,
15005 "Problem fetching invariants of chip, aborting\n");
15006 goto err_out_iounmap
;
15009 /* The EPB bridge inside 5714, 5715, and 5780 and any
15010 * device behind the EPB cannot support DMA addresses > 40-bit.
15011 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15012 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15013 * do DMA address check in tg3_start_xmit().
15015 if (tg3_flag(tp
, IS_5788
))
15016 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(32);
15017 else if (tg3_flag(tp
, 40BIT_DMA_BUG
)) {
15018 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(40);
15019 #ifdef CONFIG_HIGHMEM
15020 dma_mask
= DMA_BIT_MASK(64);
15023 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(64);
15025 /* Configure DMA attributes. */
15026 if (dma_mask
> DMA_BIT_MASK(32)) {
15027 err
= pci_set_dma_mask(pdev
, dma_mask
);
15029 features
|= NETIF_F_HIGHDMA
;
15030 err
= pci_set_consistent_dma_mask(pdev
,
15033 dev_err(&pdev
->dev
, "Unable to obtain 64 bit "
15034 "DMA for consistent allocations\n");
15035 goto err_out_iounmap
;
15039 if (err
|| dma_mask
== DMA_BIT_MASK(32)) {
15040 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
15042 dev_err(&pdev
->dev
,
15043 "No usable DMA configuration, aborting\n");
15044 goto err_out_iounmap
;
15048 tg3_init_bufmgr_config(tp
);
15050 features
|= NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
;
15052 /* 5700 B0 chips do not support checksumming correctly due
15053 * to hardware bugs.
15055 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5700_B0
) {
15056 features
|= NETIF_F_SG
| NETIF_F_IP_CSUM
| NETIF_F_RXCSUM
;
15058 if (tg3_flag(tp
, 5755_PLUS
))
15059 features
|= NETIF_F_IPV6_CSUM
;
15062 /* TSO is on by default on chips that support hardware TSO.
15063 * Firmware TSO on older chips gives lower performance, so it
15064 * is off by default, but can be enabled using ethtool.
15066 if ((tg3_flag(tp
, HW_TSO_1
) ||
15067 tg3_flag(tp
, HW_TSO_2
) ||
15068 tg3_flag(tp
, HW_TSO_3
)) &&
15069 (features
& NETIF_F_IP_CSUM
))
15070 features
|= NETIF_F_TSO
;
15071 if (tg3_flag(tp
, HW_TSO_2
) || tg3_flag(tp
, HW_TSO_3
)) {
15072 if (features
& NETIF_F_IPV6_CSUM
)
15073 features
|= NETIF_F_TSO6
;
15074 if (tg3_flag(tp
, HW_TSO_3
) ||
15075 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
||
15076 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
&&
15077 GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5784_AX
) ||
15078 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
15079 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
)
15080 features
|= NETIF_F_TSO_ECN
;
15084 * Add loopback capability only for a subset of devices that support
15085 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15086 * loopback for the remaining devices.
15088 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5780
&&
15089 !tg3_flag(tp
, CPMU_PRESENT
))
15090 /* Add the loopback capability */
15091 features
|= NETIF_F_LOOPBACK
;
15093 dev
->features
|= features
;
15094 dev
->hw_features
|= features
;
15095 dev
->vlan_features
|= features
;
15097 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A1
&&
15098 !tg3_flag(tp
, TSO_CAPABLE
) &&
15099 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
)) {
15100 tg3_flag_set(tp
, MAX_RXPEND_64
);
15101 tp
->rx_pending
= 63;
15104 err
= tg3_get_device_address(tp
);
15106 dev_err(&pdev
->dev
,
15107 "Could not obtain valid ethernet address, aborting\n");
15108 goto err_out_iounmap
;
15111 if (tg3_flag(tp
, ENABLE_APE
)) {
15112 tp
->aperegs
= pci_ioremap_bar(pdev
, BAR_2
);
15113 if (!tp
->aperegs
) {
15114 dev_err(&pdev
->dev
,
15115 "Cannot map APE registers, aborting\n");
15117 goto err_out_iounmap
;
15120 tg3_ape_lock_init(tp
);
15122 if (tg3_flag(tp
, ENABLE_ASF
))
15123 tg3_read_dash_ver(tp
);
15127 * Reset chip in case UNDI or EFI driver did not shutdown
15128 * DMA self test will enable WDMAC and we'll see (spurious)
15129 * pending DMA on the PCI bus at that point.
15131 if ((tr32(HOSTCC_MODE
) & HOSTCC_MODE_ENABLE
) ||
15132 (tr32(WDMAC_MODE
) & WDMAC_MODE_ENABLE
)) {
15133 tw32(MEMARB_MODE
, MEMARB_MODE_ENABLE
);
15134 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
15137 err
= tg3_test_dma(tp
);
15139 dev_err(&pdev
->dev
, "DMA engine test failed, aborting\n");
15140 goto err_out_apeunmap
;
15143 intmbx
= MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
;
15144 rcvmbx
= MAILBOX_RCVRET_CON_IDX_0
+ TG3_64BIT_REG_LOW
;
15145 sndmbx
= MAILBOX_SNDHOST_PROD_IDX_0
+ TG3_64BIT_REG_LOW
;
15146 for (i
= 0; i
< tp
->irq_max
; i
++) {
15147 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
15150 tnapi
->tx_pending
= TG3_DEF_TX_RING_PENDING
;
15152 tnapi
->int_mbox
= intmbx
;
15158 tnapi
->consmbox
= rcvmbx
;
15159 tnapi
->prodmbox
= sndmbx
;
15162 tnapi
->coal_now
= HOSTCC_MODE_COAL_VEC1_NOW
<< (i
- 1);
15164 tnapi
->coal_now
= HOSTCC_MODE_NOW
;
15166 if (!tg3_flag(tp
, SUPPORT_MSIX
))
15170 * If we support MSIX, we'll be using RSS. If we're using
15171 * RSS, the first vector only handles link interrupts and the
15172 * remaining vectors handle rx and tx interrupts. Reuse the
15173 * mailbox values for the next iteration. The values we setup
15174 * above are still useful for the single vectored mode.
15189 pci_set_drvdata(pdev
, dev
);
15191 err
= register_netdev(dev
);
15193 dev_err(&pdev
->dev
, "Cannot register net device, aborting\n");
15194 goto err_out_apeunmap
;
15197 netdev_info(dev
, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15198 tp
->board_part_number
,
15199 tp
->pci_chip_rev_id
,
15200 tg3_bus_string(tp
, str
),
15203 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
15204 struct phy_device
*phydev
;
15205 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
15207 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15208 phydev
->drv
->name
, dev_name(&phydev
->dev
));
15212 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
15213 ethtype
= "10/100Base-TX";
15214 else if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
15215 ethtype
= "1000Base-SX";
15217 ethtype
= "10/100/1000Base-T";
15219 netdev_info(dev
, "attached PHY is %s (%s Ethernet) "
15220 "(WireSpeed[%d], EEE[%d])\n",
15221 tg3_phy_string(tp
), ethtype
,
15222 (tp
->phy_flags
& TG3_PHYFLG_NO_ETH_WIRE_SPEED
) == 0,
15223 (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
) != 0);
15226 netdev_info(dev
, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15227 (dev
->features
& NETIF_F_RXCSUM
) != 0,
15228 tg3_flag(tp
, USE_LINKCHG_REG
) != 0,
15229 (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
) != 0,
15230 tg3_flag(tp
, ENABLE_ASF
) != 0,
15231 tg3_flag(tp
, TSO_CAPABLE
) != 0);
15232 netdev_info(dev
, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15234 pdev
->dma_mask
== DMA_BIT_MASK(32) ? 32 :
15235 ((u64
)pdev
->dma_mask
) == DMA_BIT_MASK(40) ? 40 : 64);
15237 pci_save_state(pdev
);
15243 iounmap(tp
->aperegs
);
15244 tp
->aperegs
= NULL
;
15257 pci_release_regions(pdev
);
15259 err_out_disable_pdev
:
15260 pci_disable_device(pdev
);
15261 pci_set_drvdata(pdev
, NULL
);
15265 static void __devexit
tg3_remove_one(struct pci_dev
*pdev
)
15267 struct net_device
*dev
= pci_get_drvdata(pdev
);
15270 struct tg3
*tp
= netdev_priv(dev
);
15273 release_firmware(tp
->fw
);
15275 cancel_work_sync(&tp
->reset_task
);
15277 if (!tg3_flag(tp
, USE_PHYLIB
)) {
15282 unregister_netdev(dev
);
15284 iounmap(tp
->aperegs
);
15285 tp
->aperegs
= NULL
;
15292 pci_release_regions(pdev
);
15293 pci_disable_device(pdev
);
15294 pci_set_drvdata(pdev
, NULL
);
15298 #ifdef CONFIG_PM_SLEEP
15299 static int tg3_suspend(struct device
*device
)
15301 struct pci_dev
*pdev
= to_pci_dev(device
);
15302 struct net_device
*dev
= pci_get_drvdata(pdev
);
15303 struct tg3
*tp
= netdev_priv(dev
);
15306 if (!netif_running(dev
))
15309 flush_work_sync(&tp
->reset_task
);
15311 tg3_netif_stop(tp
);
15313 del_timer_sync(&tp
->timer
);
15315 tg3_full_lock(tp
, 1);
15316 tg3_disable_ints(tp
);
15317 tg3_full_unlock(tp
);
15319 netif_device_detach(dev
);
15321 tg3_full_lock(tp
, 0);
15322 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
15323 tg3_flag_clear(tp
, INIT_COMPLETE
);
15324 tg3_full_unlock(tp
);
15326 err
= tg3_power_down_prepare(tp
);
15330 tg3_full_lock(tp
, 0);
15332 tg3_flag_set(tp
, INIT_COMPLETE
);
15333 err2
= tg3_restart_hw(tp
, 1);
15337 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
15338 add_timer(&tp
->timer
);
15340 netif_device_attach(dev
);
15341 tg3_netif_start(tp
);
15344 tg3_full_unlock(tp
);
15353 static int tg3_resume(struct device
*device
)
15355 struct pci_dev
*pdev
= to_pci_dev(device
);
15356 struct net_device
*dev
= pci_get_drvdata(pdev
);
15357 struct tg3
*tp
= netdev_priv(dev
);
15360 if (!netif_running(dev
))
15363 netif_device_attach(dev
);
15365 tg3_full_lock(tp
, 0);
15367 tg3_flag_set(tp
, INIT_COMPLETE
);
15368 err
= tg3_restart_hw(tp
, 1);
15372 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
15373 add_timer(&tp
->timer
);
15375 tg3_netif_start(tp
);
15378 tg3_full_unlock(tp
);
15386 static SIMPLE_DEV_PM_OPS(tg3_pm_ops
, tg3_suspend
, tg3_resume
);
15387 #define TG3_PM_OPS (&tg3_pm_ops)
15391 #define TG3_PM_OPS NULL
15393 #endif /* CONFIG_PM_SLEEP */
15396 * tg3_io_error_detected - called when PCI error is detected
15397 * @pdev: Pointer to PCI device
15398 * @state: The current pci connection state
15400 * This function is called after a PCI bus error affecting
15401 * this device has been detected.
15403 static pci_ers_result_t
tg3_io_error_detected(struct pci_dev
*pdev
,
15404 pci_channel_state_t state
)
15406 struct net_device
*netdev
= pci_get_drvdata(pdev
);
15407 struct tg3
*tp
= netdev_priv(netdev
);
15408 pci_ers_result_t err
= PCI_ERS_RESULT_NEED_RESET
;
15410 netdev_info(netdev
, "PCI I/O error detected\n");
15414 if (!netif_running(netdev
))
15419 tg3_netif_stop(tp
);
15421 del_timer_sync(&tp
->timer
);
15422 tg3_flag_clear(tp
, RESTART_TIMER
);
15424 /* Want to make sure that the reset task doesn't run */
15425 cancel_work_sync(&tp
->reset_task
);
15426 tg3_flag_clear(tp
, TX_RECOVERY_PENDING
);
15427 tg3_flag_clear(tp
, RESTART_TIMER
);
15429 netif_device_detach(netdev
);
15431 /* Clean up software state, even if MMIO is blocked */
15432 tg3_full_lock(tp
, 0);
15433 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 0);
15434 tg3_full_unlock(tp
);
15437 if (state
== pci_channel_io_perm_failure
)
15438 err
= PCI_ERS_RESULT_DISCONNECT
;
15440 pci_disable_device(pdev
);
15448 * tg3_io_slot_reset - called after the pci bus has been reset.
15449 * @pdev: Pointer to PCI device
15451 * Restart the card from scratch, as if from a cold-boot.
15452 * At this point, the card has exprienced a hard reset,
15453 * followed by fixups by BIOS, and has its config space
15454 * set up identically to what it was at cold boot.
15456 static pci_ers_result_t
tg3_io_slot_reset(struct pci_dev
*pdev
)
15458 struct net_device
*netdev
= pci_get_drvdata(pdev
);
15459 struct tg3
*tp
= netdev_priv(netdev
);
15460 pci_ers_result_t rc
= PCI_ERS_RESULT_DISCONNECT
;
15465 if (pci_enable_device(pdev
)) {
15466 netdev_err(netdev
, "Cannot re-enable PCI device after reset.\n");
15470 pci_set_master(pdev
);
15471 pci_restore_state(pdev
);
15472 pci_save_state(pdev
);
15474 if (!netif_running(netdev
)) {
15475 rc
= PCI_ERS_RESULT_RECOVERED
;
15479 err
= tg3_power_up(tp
);
15481 netdev_err(netdev
, "Failed to restore register access.\n");
15485 rc
= PCI_ERS_RESULT_RECOVERED
;
15494 * tg3_io_resume - called when traffic can start flowing again.
15495 * @pdev: Pointer to PCI device
15497 * This callback is called when the error recovery driver tells
15498 * us that its OK to resume normal operation.
15500 static void tg3_io_resume(struct pci_dev
*pdev
)
15502 struct net_device
*netdev
= pci_get_drvdata(pdev
);
15503 struct tg3
*tp
= netdev_priv(netdev
);
15508 if (!netif_running(netdev
))
15511 tg3_full_lock(tp
, 0);
15512 tg3_flag_set(tp
, INIT_COMPLETE
);
15513 err
= tg3_restart_hw(tp
, 1);
15514 tg3_full_unlock(tp
);
15516 netdev_err(netdev
, "Cannot restart hardware after reset.\n");
15520 netif_device_attach(netdev
);
15522 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
15523 add_timer(&tp
->timer
);
15525 tg3_netif_start(tp
);
15533 static struct pci_error_handlers tg3_err_handler
= {
15534 .error_detected
= tg3_io_error_detected
,
15535 .slot_reset
= tg3_io_slot_reset
,
15536 .resume
= tg3_io_resume
15539 static struct pci_driver tg3_driver
= {
15540 .name
= DRV_MODULE_NAME
,
15541 .id_table
= tg3_pci_tbl
,
15542 .probe
= tg3_init_one
,
15543 .remove
= __devexit_p(tg3_remove_one
),
15544 .err_handler
= &tg3_err_handler
,
15545 .driver
.pm
= TG3_PM_OPS
,
15548 static int __init
tg3_init(void)
15550 return pci_register_driver(&tg3_driver
);
15553 static void __exit
tg3_cleanup(void)
15555 pci_unregister_driver(&tg3_driver
);
15558 module_init(tg3_init
);
15559 module_exit(tg3_cleanup
);