Intel xhci: Support EHCI/xHCI port switching.
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / net / tg3.c
blobdb19332a7d87ae69ef2268b7c25901ec6f3aca5d
1 /*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2011 Broadcom Corporation.
9 * Firmware is:
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/ioport.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/ethtool.h>
35 #include <linux/mdio.h>
36 #include <linux/mii.h>
37 #include <linux/phy.h>
38 #include <linux/brcmphy.h>
39 #include <linux/if_vlan.h>
40 #include <linux/ip.h>
41 #include <linux/tcp.h>
42 #include <linux/workqueue.h>
43 #include <linux/prefetch.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/firmware.h>
47 #include <net/checksum.h>
48 #include <net/ip.h>
50 #include <asm/system.h>
51 #include <linux/io.h>
52 #include <asm/byteorder.h>
53 #include <linux/uaccess.h>
55 #ifdef CONFIG_SPARC
56 #include <asm/idprom.h>
57 #include <asm/prom.h>
58 #endif
60 #define BAR_0 0
61 #define BAR_2 2
63 #include "tg3.h"
65 /* Functions & macros to verify TG3_FLAGS types */
67 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
69 return test_bit(flag, bits);
72 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
74 set_bit(flag, bits);
77 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
79 clear_bit(flag, bits);
82 #define tg3_flag(tp, flag) \
83 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
84 #define tg3_flag_set(tp, flag) \
85 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
86 #define tg3_flag_clear(tp, flag) \
87 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
89 #define DRV_MODULE_NAME "tg3"
90 #define TG3_MAJ_NUM 3
91 #define TG3_MIN_NUM 119
92 #define DRV_MODULE_VERSION \
93 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
94 #define DRV_MODULE_RELDATE "May 18, 2011"
96 #define TG3_DEF_MAC_MODE 0
97 #define TG3_DEF_RX_MODE 0
98 #define TG3_DEF_TX_MODE 0
99 #define TG3_DEF_MSG_ENABLE \
100 (NETIF_MSG_DRV | \
101 NETIF_MSG_PROBE | \
102 NETIF_MSG_LINK | \
103 NETIF_MSG_TIMER | \
104 NETIF_MSG_IFDOWN | \
105 NETIF_MSG_IFUP | \
106 NETIF_MSG_RX_ERR | \
107 NETIF_MSG_TX_ERR)
109 /* length of time before we decide the hardware is borked,
110 * and dev->tx_timeout() should be called to fix the problem
113 #define TG3_TX_TIMEOUT (5 * HZ)
115 /* hardware minimum and maximum for a single frame's data payload */
116 #define TG3_MIN_MTU 60
117 #define TG3_MAX_MTU(tp) \
118 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
120 /* These numbers seem to be hard coded in the NIC firmware somehow.
121 * You can't change the ring sizes, but you can change where you place
122 * them in the NIC onboard memory.
124 #define TG3_RX_STD_RING_SIZE(tp) \
125 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
126 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
127 #define TG3_DEF_RX_RING_PENDING 200
128 #define TG3_RX_JMB_RING_SIZE(tp) \
129 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
130 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
131 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
132 #define TG3_RSS_INDIR_TBL_SIZE 128
134 /* Do not place this n-ring entries value into the tp struct itself,
135 * we really want to expose these constants to GCC so that modulo et
136 * al. operations are done with shifts and masks instead of with
137 * hw multiply/modulo instructions. Another solution would be to
138 * replace things like '% foo' with '& (foo - 1)'.
141 #define TG3_TX_RING_SIZE 512
142 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
144 #define TG3_RX_STD_RING_BYTES(tp) \
145 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
146 #define TG3_RX_JMB_RING_BYTES(tp) \
147 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
148 #define TG3_RX_RCB_RING_BYTES(tp) \
149 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
150 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
151 TG3_TX_RING_SIZE)
152 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
154 #define TG3_DMA_BYTE_ENAB 64
156 #define TG3_RX_STD_DMA_SZ 1536
157 #define TG3_RX_JMB_DMA_SZ 9046
159 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
161 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
162 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
164 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
165 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
167 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
168 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
170 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
171 * that are at least dword aligned when used in PCIX mode. The driver
172 * works around this bug by double copying the packet. This workaround
173 * is built into the normal double copy length check for efficiency.
175 * However, the double copy is only necessary on those architectures
176 * where unaligned memory accesses are inefficient. For those architectures
177 * where unaligned memory accesses incur little penalty, we can reintegrate
178 * the 5701 in the normal rx path. Doing so saves a device structure
179 * dereference by hardcoding the double copy threshold in place.
181 #define TG3_RX_COPY_THRESHOLD 256
182 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
183 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
184 #else
185 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
186 #endif
188 /* minimum number of free TX descriptors required to wake up TX process */
189 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
191 #define TG3_RAW_IP_ALIGN 2
193 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
195 #define FIRMWARE_TG3 "tigon/tg3.bin"
196 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
197 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
199 static char version[] __devinitdata =
200 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
202 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
203 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
204 MODULE_LICENSE("GPL");
205 MODULE_VERSION(DRV_MODULE_VERSION);
206 MODULE_FIRMWARE(FIRMWARE_TG3);
207 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
208 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
210 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
211 module_param(tg3_debug, int, 0);
212 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
214 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
215 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
216 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
217 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
218 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
219 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
220 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
221 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
222 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
223 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
224 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
225 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
226 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
227 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
288 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
289 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
290 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
291 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
292 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
293 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
294 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
298 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
300 static const struct {
301 const char string[ETH_GSTRING_LEN];
302 } ethtool_stats_keys[] = {
303 { "rx_octets" },
304 { "rx_fragments" },
305 { "rx_ucast_packets" },
306 { "rx_mcast_packets" },
307 { "rx_bcast_packets" },
308 { "rx_fcs_errors" },
309 { "rx_align_errors" },
310 { "rx_xon_pause_rcvd" },
311 { "rx_xoff_pause_rcvd" },
312 { "rx_mac_ctrl_rcvd" },
313 { "rx_xoff_entered" },
314 { "rx_frame_too_long_errors" },
315 { "rx_jabbers" },
316 { "rx_undersize_packets" },
317 { "rx_in_length_errors" },
318 { "rx_out_length_errors" },
319 { "rx_64_or_less_octet_packets" },
320 { "rx_65_to_127_octet_packets" },
321 { "rx_128_to_255_octet_packets" },
322 { "rx_256_to_511_octet_packets" },
323 { "rx_512_to_1023_octet_packets" },
324 { "rx_1024_to_1522_octet_packets" },
325 { "rx_1523_to_2047_octet_packets" },
326 { "rx_2048_to_4095_octet_packets" },
327 { "rx_4096_to_8191_octet_packets" },
328 { "rx_8192_to_9022_octet_packets" },
330 { "tx_octets" },
331 { "tx_collisions" },
333 { "tx_xon_sent" },
334 { "tx_xoff_sent" },
335 { "tx_flow_control" },
336 { "tx_mac_errors" },
337 { "tx_single_collisions" },
338 { "tx_mult_collisions" },
339 { "tx_deferred" },
340 { "tx_excessive_collisions" },
341 { "tx_late_collisions" },
342 { "tx_collide_2times" },
343 { "tx_collide_3times" },
344 { "tx_collide_4times" },
345 { "tx_collide_5times" },
346 { "tx_collide_6times" },
347 { "tx_collide_7times" },
348 { "tx_collide_8times" },
349 { "tx_collide_9times" },
350 { "tx_collide_10times" },
351 { "tx_collide_11times" },
352 { "tx_collide_12times" },
353 { "tx_collide_13times" },
354 { "tx_collide_14times" },
355 { "tx_collide_15times" },
356 { "tx_ucast_packets" },
357 { "tx_mcast_packets" },
358 { "tx_bcast_packets" },
359 { "tx_carrier_sense_errors" },
360 { "tx_discards" },
361 { "tx_errors" },
363 { "dma_writeq_full" },
364 { "dma_write_prioq_full" },
365 { "rxbds_empty" },
366 { "rx_discards" },
367 { "rx_errors" },
368 { "rx_threshold_hit" },
370 { "dma_readq_full" },
371 { "dma_read_prioq_full" },
372 { "tx_comp_queue_full" },
374 { "ring_set_send_prod_index" },
375 { "ring_status_update" },
376 { "nic_irqs" },
377 { "nic_avoided_irqs" },
378 { "nic_tx_threshold_hit" },
380 { "mbuf_lwm_thresh_hit" },
383 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
386 static const struct {
387 const char string[ETH_GSTRING_LEN];
388 } ethtool_test_keys[] = {
389 { "nvram test (online) " },
390 { "link test (online) " },
391 { "register test (offline)" },
392 { "memory test (offline)" },
393 { "loopback test (offline)" },
394 { "interrupt test (offline)" },
397 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
400 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
402 writel(val, tp->regs + off);
405 static u32 tg3_read32(struct tg3 *tp, u32 off)
407 return readl(tp->regs + off);
410 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
412 writel(val, tp->aperegs + off);
415 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
417 return readl(tp->aperegs + off);
420 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
422 unsigned long flags;
424 spin_lock_irqsave(&tp->indirect_lock, flags);
425 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
426 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
427 spin_unlock_irqrestore(&tp->indirect_lock, flags);
430 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
432 writel(val, tp->regs + off);
433 readl(tp->regs + off);
436 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
438 unsigned long flags;
439 u32 val;
441 spin_lock_irqsave(&tp->indirect_lock, flags);
442 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
443 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
444 spin_unlock_irqrestore(&tp->indirect_lock, flags);
445 return val;
448 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
450 unsigned long flags;
452 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
453 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
454 TG3_64BIT_REG_LOW, val);
455 return;
457 if (off == TG3_RX_STD_PROD_IDX_REG) {
458 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
459 TG3_64BIT_REG_LOW, val);
460 return;
463 spin_lock_irqsave(&tp->indirect_lock, flags);
464 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
465 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
466 spin_unlock_irqrestore(&tp->indirect_lock, flags);
468 /* In indirect mode when disabling interrupts, we also need
469 * to clear the interrupt bit in the GRC local ctrl register.
471 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
472 (val == 0x1)) {
473 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
474 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
478 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
480 unsigned long flags;
481 u32 val;
483 spin_lock_irqsave(&tp->indirect_lock, flags);
484 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
485 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
486 spin_unlock_irqrestore(&tp->indirect_lock, flags);
487 return val;
490 /* usec_wait specifies the wait time in usec when writing to certain registers
491 * where it is unsafe to read back the register without some delay.
492 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
493 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
495 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
497 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
498 /* Non-posted methods */
499 tp->write32(tp, off, val);
500 else {
501 /* Posted method */
502 tg3_write32(tp, off, val);
503 if (usec_wait)
504 udelay(usec_wait);
505 tp->read32(tp, off);
507 /* Wait again after the read for the posted method to guarantee that
508 * the wait time is met.
510 if (usec_wait)
511 udelay(usec_wait);
514 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
516 tp->write32_mbox(tp, off, val);
517 if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
518 tp->read32_mbox(tp, off);
521 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
523 void __iomem *mbox = tp->regs + off;
524 writel(val, mbox);
525 if (tg3_flag(tp, TXD_MBOX_HWBUG))
526 writel(val, mbox);
527 if (tg3_flag(tp, MBOX_WRITE_REORDER))
528 readl(mbox);
531 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
533 return readl(tp->regs + off + GRCMBOX_BASE);
536 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
538 writel(val, tp->regs + off + GRCMBOX_BASE);
541 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
542 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
543 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
544 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
545 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
547 #define tw32(reg, val) tp->write32(tp, reg, val)
548 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
549 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
550 #define tr32(reg) tp->read32(tp, reg)
552 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
554 unsigned long flags;
556 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
557 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
558 return;
560 spin_lock_irqsave(&tp->indirect_lock, flags);
561 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
562 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
563 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
565 /* Always leave this as zero. */
566 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
567 } else {
568 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
569 tw32_f(TG3PCI_MEM_WIN_DATA, val);
571 /* Always leave this as zero. */
572 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
574 spin_unlock_irqrestore(&tp->indirect_lock, flags);
577 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
579 unsigned long flags;
581 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
582 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
583 *val = 0;
584 return;
587 spin_lock_irqsave(&tp->indirect_lock, flags);
588 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
589 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
590 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
592 /* Always leave this as zero. */
593 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
594 } else {
595 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
596 *val = tr32(TG3PCI_MEM_WIN_DATA);
598 /* Always leave this as zero. */
599 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
601 spin_unlock_irqrestore(&tp->indirect_lock, flags);
604 static void tg3_ape_lock_init(struct tg3 *tp)
606 int i;
607 u32 regbase;
609 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
610 regbase = TG3_APE_LOCK_GRANT;
611 else
612 regbase = TG3_APE_PER_LOCK_GRANT;
614 /* Make sure the driver hasn't any stale locks. */
615 for (i = 0; i < 8; i++)
616 tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
619 static int tg3_ape_lock(struct tg3 *tp, int locknum)
621 int i, off;
622 int ret = 0;
623 u32 status, req, gnt;
625 if (!tg3_flag(tp, ENABLE_APE))
626 return 0;
628 switch (locknum) {
629 case TG3_APE_LOCK_GRC:
630 case TG3_APE_LOCK_MEM:
631 break;
632 default:
633 return -EINVAL;
636 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
637 req = TG3_APE_LOCK_REQ;
638 gnt = TG3_APE_LOCK_GRANT;
639 } else {
640 req = TG3_APE_PER_LOCK_REQ;
641 gnt = TG3_APE_PER_LOCK_GRANT;
644 off = 4 * locknum;
646 tg3_ape_write32(tp, req + off, APE_LOCK_REQ_DRIVER);
648 /* Wait for up to 1 millisecond to acquire lock. */
649 for (i = 0; i < 100; i++) {
650 status = tg3_ape_read32(tp, gnt + off);
651 if (status == APE_LOCK_GRANT_DRIVER)
652 break;
653 udelay(10);
656 if (status != APE_LOCK_GRANT_DRIVER) {
657 /* Revoke the lock request. */
658 tg3_ape_write32(tp, gnt + off,
659 APE_LOCK_GRANT_DRIVER);
661 ret = -EBUSY;
664 return ret;
667 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
669 u32 gnt;
671 if (!tg3_flag(tp, ENABLE_APE))
672 return;
674 switch (locknum) {
675 case TG3_APE_LOCK_GRC:
676 case TG3_APE_LOCK_MEM:
677 break;
678 default:
679 return;
682 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
683 gnt = TG3_APE_LOCK_GRANT;
684 else
685 gnt = TG3_APE_PER_LOCK_GRANT;
687 tg3_ape_write32(tp, gnt + 4 * locknum, APE_LOCK_GRANT_DRIVER);
690 static void tg3_disable_ints(struct tg3 *tp)
692 int i;
694 tw32(TG3PCI_MISC_HOST_CTRL,
695 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
696 for (i = 0; i < tp->irq_max; i++)
697 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
700 static void tg3_enable_ints(struct tg3 *tp)
702 int i;
704 tp->irq_sync = 0;
705 wmb();
707 tw32(TG3PCI_MISC_HOST_CTRL,
708 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
710 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
711 for (i = 0; i < tp->irq_cnt; i++) {
712 struct tg3_napi *tnapi = &tp->napi[i];
714 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
715 if (tg3_flag(tp, 1SHOT_MSI))
716 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
718 tp->coal_now |= tnapi->coal_now;
721 /* Force an initial interrupt */
722 if (!tg3_flag(tp, TAGGED_STATUS) &&
723 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
724 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
725 else
726 tw32(HOSTCC_MODE, tp->coal_now);
728 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
731 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
733 struct tg3 *tp = tnapi->tp;
734 struct tg3_hw_status *sblk = tnapi->hw_status;
735 unsigned int work_exists = 0;
737 /* check for phy events */
738 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
739 if (sblk->status & SD_STATUS_LINK_CHG)
740 work_exists = 1;
742 /* check for RX/TX work to do */
743 if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
744 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
745 work_exists = 1;
747 return work_exists;
750 /* tg3_int_reenable
751 * similar to tg3_enable_ints, but it accurately determines whether there
752 * is new work pending and can return without flushing the PIO write
753 * which reenables interrupts
755 static void tg3_int_reenable(struct tg3_napi *tnapi)
757 struct tg3 *tp = tnapi->tp;
759 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
760 mmiowb();
762 /* When doing tagged status, this work check is unnecessary.
763 * The last_tag we write above tells the chip which piece of
764 * work we've completed.
766 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
767 tw32(HOSTCC_MODE, tp->coalesce_mode |
768 HOSTCC_MODE_ENABLE | tnapi->coal_now);
771 static void tg3_switch_clocks(struct tg3 *tp)
773 u32 clock_ctrl;
774 u32 orig_clock_ctrl;
776 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
777 return;
779 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
781 orig_clock_ctrl = clock_ctrl;
782 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
783 CLOCK_CTRL_CLKRUN_OENABLE |
784 0x1f);
785 tp->pci_clock_ctrl = clock_ctrl;
787 if (tg3_flag(tp, 5705_PLUS)) {
788 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
789 tw32_wait_f(TG3PCI_CLOCK_CTRL,
790 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
792 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
793 tw32_wait_f(TG3PCI_CLOCK_CTRL,
794 clock_ctrl |
795 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
796 40);
797 tw32_wait_f(TG3PCI_CLOCK_CTRL,
798 clock_ctrl | (CLOCK_CTRL_ALTCLK),
799 40);
801 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
804 #define PHY_BUSY_LOOPS 5000
806 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
808 u32 frame_val;
809 unsigned int loops;
810 int ret;
812 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
813 tw32_f(MAC_MI_MODE,
814 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
815 udelay(80);
818 *val = 0x0;
820 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
821 MI_COM_PHY_ADDR_MASK);
822 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
823 MI_COM_REG_ADDR_MASK);
824 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
826 tw32_f(MAC_MI_COM, frame_val);
828 loops = PHY_BUSY_LOOPS;
829 while (loops != 0) {
830 udelay(10);
831 frame_val = tr32(MAC_MI_COM);
833 if ((frame_val & MI_COM_BUSY) == 0) {
834 udelay(5);
835 frame_val = tr32(MAC_MI_COM);
836 break;
838 loops -= 1;
841 ret = -EBUSY;
842 if (loops != 0) {
843 *val = frame_val & MI_COM_DATA_MASK;
844 ret = 0;
847 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
848 tw32_f(MAC_MI_MODE, tp->mi_mode);
849 udelay(80);
852 return ret;
855 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
857 u32 frame_val;
858 unsigned int loops;
859 int ret;
861 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
862 (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
863 return 0;
865 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
866 tw32_f(MAC_MI_MODE,
867 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
868 udelay(80);
871 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
872 MI_COM_PHY_ADDR_MASK);
873 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
874 MI_COM_REG_ADDR_MASK);
875 frame_val |= (val & MI_COM_DATA_MASK);
876 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
878 tw32_f(MAC_MI_COM, frame_val);
880 loops = PHY_BUSY_LOOPS;
881 while (loops != 0) {
882 udelay(10);
883 frame_val = tr32(MAC_MI_COM);
884 if ((frame_val & MI_COM_BUSY) == 0) {
885 udelay(5);
886 frame_val = tr32(MAC_MI_COM);
887 break;
889 loops -= 1;
892 ret = -EBUSY;
893 if (loops != 0)
894 ret = 0;
896 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
897 tw32_f(MAC_MI_MODE, tp->mi_mode);
898 udelay(80);
901 return ret;
904 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
906 int err;
908 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
909 if (err)
910 goto done;
912 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
913 if (err)
914 goto done;
916 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
917 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
918 if (err)
919 goto done;
921 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
923 done:
924 return err;
927 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
929 int err;
931 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
932 if (err)
933 goto done;
935 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
936 if (err)
937 goto done;
939 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
940 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
941 if (err)
942 goto done;
944 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
946 done:
947 return err;
950 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
952 int err;
954 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
955 if (!err)
956 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
958 return err;
961 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
963 int err;
965 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
966 if (!err)
967 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
969 return err;
972 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
974 int err;
976 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
977 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
978 MII_TG3_AUXCTL_SHDWSEL_MISC);
979 if (!err)
980 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
982 return err;
985 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
987 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
988 set |= MII_TG3_AUXCTL_MISC_WREN;
990 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
993 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
994 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
995 MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
996 MII_TG3_AUXCTL_ACTL_TX_6DB)
998 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
999 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1000 MII_TG3_AUXCTL_ACTL_TX_6DB);
1002 static int tg3_bmcr_reset(struct tg3 *tp)
1004 u32 phy_control;
1005 int limit, err;
1007 /* OK, reset it, and poll the BMCR_RESET bit until it
1008 * clears or we time out.
1010 phy_control = BMCR_RESET;
1011 err = tg3_writephy(tp, MII_BMCR, phy_control);
1012 if (err != 0)
1013 return -EBUSY;
1015 limit = 5000;
1016 while (limit--) {
1017 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1018 if (err != 0)
1019 return -EBUSY;
1021 if ((phy_control & BMCR_RESET) == 0) {
1022 udelay(40);
1023 break;
1025 udelay(10);
1027 if (limit < 0)
1028 return -EBUSY;
1030 return 0;
1033 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1035 struct tg3 *tp = bp->priv;
1036 u32 val;
1038 spin_lock_bh(&tp->lock);
1040 if (tg3_readphy(tp, reg, &val))
1041 val = -EIO;
1043 spin_unlock_bh(&tp->lock);
1045 return val;
1048 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1050 struct tg3 *tp = bp->priv;
1051 u32 ret = 0;
1053 spin_lock_bh(&tp->lock);
1055 if (tg3_writephy(tp, reg, val))
1056 ret = -EIO;
1058 spin_unlock_bh(&tp->lock);
1060 return ret;
1063 static int tg3_mdio_reset(struct mii_bus *bp)
1065 return 0;
1068 static void tg3_mdio_config_5785(struct tg3 *tp)
1070 u32 val;
1071 struct phy_device *phydev;
1073 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1074 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1075 case PHY_ID_BCM50610:
1076 case PHY_ID_BCM50610M:
1077 val = MAC_PHYCFG2_50610_LED_MODES;
1078 break;
1079 case PHY_ID_BCMAC131:
1080 val = MAC_PHYCFG2_AC131_LED_MODES;
1081 break;
1082 case PHY_ID_RTL8211C:
1083 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1084 break;
1085 case PHY_ID_RTL8201E:
1086 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1087 break;
1088 default:
1089 return;
1092 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1093 tw32(MAC_PHYCFG2, val);
1095 val = tr32(MAC_PHYCFG1);
1096 val &= ~(MAC_PHYCFG1_RGMII_INT |
1097 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1098 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1099 tw32(MAC_PHYCFG1, val);
1101 return;
1104 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1105 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1106 MAC_PHYCFG2_FMODE_MASK_MASK |
1107 MAC_PHYCFG2_GMODE_MASK_MASK |
1108 MAC_PHYCFG2_ACT_MASK_MASK |
1109 MAC_PHYCFG2_QUAL_MASK_MASK |
1110 MAC_PHYCFG2_INBAND_ENABLE;
1112 tw32(MAC_PHYCFG2, val);
1114 val = tr32(MAC_PHYCFG1);
1115 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1116 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1117 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1118 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1119 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1120 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1121 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1123 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1124 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1125 tw32(MAC_PHYCFG1, val);
1127 val = tr32(MAC_EXT_RGMII_MODE);
1128 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1129 MAC_RGMII_MODE_RX_QUALITY |
1130 MAC_RGMII_MODE_RX_ACTIVITY |
1131 MAC_RGMII_MODE_RX_ENG_DET |
1132 MAC_RGMII_MODE_TX_ENABLE |
1133 MAC_RGMII_MODE_TX_LOWPWR |
1134 MAC_RGMII_MODE_TX_RESET);
1135 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1136 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1137 val |= MAC_RGMII_MODE_RX_INT_B |
1138 MAC_RGMII_MODE_RX_QUALITY |
1139 MAC_RGMII_MODE_RX_ACTIVITY |
1140 MAC_RGMII_MODE_RX_ENG_DET;
1141 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1142 val |= MAC_RGMII_MODE_TX_ENABLE |
1143 MAC_RGMII_MODE_TX_LOWPWR |
1144 MAC_RGMII_MODE_TX_RESET;
1146 tw32(MAC_EXT_RGMII_MODE, val);
1149 static void tg3_mdio_start(struct tg3 *tp)
1151 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1152 tw32_f(MAC_MI_MODE, tp->mi_mode);
1153 udelay(80);
1155 if (tg3_flag(tp, MDIOBUS_INITED) &&
1156 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1157 tg3_mdio_config_5785(tp);
1160 static int tg3_mdio_init(struct tg3 *tp)
1162 int i;
1163 u32 reg;
1164 struct phy_device *phydev;
1166 if (tg3_flag(tp, 5717_PLUS)) {
1167 u32 is_serdes;
1169 tp->phy_addr = PCI_FUNC(tp->pdev->devfn) + 1;
1171 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1172 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1173 else
1174 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1175 TG3_CPMU_PHY_STRAP_IS_SERDES;
1176 if (is_serdes)
1177 tp->phy_addr += 7;
1178 } else
1179 tp->phy_addr = TG3_PHY_MII_ADDR;
1181 tg3_mdio_start(tp);
1183 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1184 return 0;
1186 tp->mdio_bus = mdiobus_alloc();
1187 if (tp->mdio_bus == NULL)
1188 return -ENOMEM;
1190 tp->mdio_bus->name = "tg3 mdio bus";
1191 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1192 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1193 tp->mdio_bus->priv = tp;
1194 tp->mdio_bus->parent = &tp->pdev->dev;
1195 tp->mdio_bus->read = &tg3_mdio_read;
1196 tp->mdio_bus->write = &tg3_mdio_write;
1197 tp->mdio_bus->reset = &tg3_mdio_reset;
1198 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1199 tp->mdio_bus->irq = &tp->mdio_irq[0];
1201 for (i = 0; i < PHY_MAX_ADDR; i++)
1202 tp->mdio_bus->irq[i] = PHY_POLL;
1204 /* The bus registration will look for all the PHYs on the mdio bus.
1205 * Unfortunately, it does not ensure the PHY is powered up before
1206 * accessing the PHY ID registers. A chip reset is the
1207 * quickest way to bring the device back to an operational state..
1209 if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1210 tg3_bmcr_reset(tp);
1212 i = mdiobus_register(tp->mdio_bus);
1213 if (i) {
1214 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1215 mdiobus_free(tp->mdio_bus);
1216 return i;
1219 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1221 if (!phydev || !phydev->drv) {
1222 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1223 mdiobus_unregister(tp->mdio_bus);
1224 mdiobus_free(tp->mdio_bus);
1225 return -ENODEV;
1228 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1229 case PHY_ID_BCM57780:
1230 phydev->interface = PHY_INTERFACE_MODE_GMII;
1231 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1232 break;
1233 case PHY_ID_BCM50610:
1234 case PHY_ID_BCM50610M:
1235 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1236 PHY_BRCM_RX_REFCLK_UNUSED |
1237 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1238 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1239 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1240 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1241 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1242 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1243 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1244 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1245 /* fallthru */
1246 case PHY_ID_RTL8211C:
1247 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1248 break;
1249 case PHY_ID_RTL8201E:
1250 case PHY_ID_BCMAC131:
1251 phydev->interface = PHY_INTERFACE_MODE_MII;
1252 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1253 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1254 break;
1257 tg3_flag_set(tp, MDIOBUS_INITED);
1259 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1260 tg3_mdio_config_5785(tp);
1262 return 0;
1265 static void tg3_mdio_fini(struct tg3 *tp)
1267 if (tg3_flag(tp, MDIOBUS_INITED)) {
1268 tg3_flag_clear(tp, MDIOBUS_INITED);
1269 mdiobus_unregister(tp->mdio_bus);
1270 mdiobus_free(tp->mdio_bus);
1274 /* tp->lock is held. */
1275 static inline void tg3_generate_fw_event(struct tg3 *tp)
1277 u32 val;
1279 val = tr32(GRC_RX_CPU_EVENT);
1280 val |= GRC_RX_CPU_DRIVER_EVENT;
1281 tw32_f(GRC_RX_CPU_EVENT, val);
1283 tp->last_event_jiffies = jiffies;
1286 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1288 /* tp->lock is held. */
1289 static void tg3_wait_for_event_ack(struct tg3 *tp)
1291 int i;
1292 unsigned int delay_cnt;
1293 long time_remain;
1295 /* If enough time has passed, no wait is necessary. */
1296 time_remain = (long)(tp->last_event_jiffies + 1 +
1297 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1298 (long)jiffies;
1299 if (time_remain < 0)
1300 return;
1302 /* Check if we can shorten the wait time. */
1303 delay_cnt = jiffies_to_usecs(time_remain);
1304 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1305 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1306 delay_cnt = (delay_cnt >> 3) + 1;
1308 for (i = 0; i < delay_cnt; i++) {
1309 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1310 break;
1311 udelay(8);
1315 /* tp->lock is held. */
1316 static void tg3_ump_link_report(struct tg3 *tp)
1318 u32 reg;
1319 u32 val;
1321 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1322 return;
1324 tg3_wait_for_event_ack(tp);
1326 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1328 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1330 val = 0;
1331 if (!tg3_readphy(tp, MII_BMCR, &reg))
1332 val = reg << 16;
1333 if (!tg3_readphy(tp, MII_BMSR, &reg))
1334 val |= (reg & 0xffff);
1335 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1337 val = 0;
1338 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1339 val = reg << 16;
1340 if (!tg3_readphy(tp, MII_LPA, &reg))
1341 val |= (reg & 0xffff);
1342 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1344 val = 0;
1345 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1346 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1347 val = reg << 16;
1348 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1349 val |= (reg & 0xffff);
1351 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1353 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1354 val = reg << 16;
1355 else
1356 val = 0;
1357 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1359 tg3_generate_fw_event(tp);
1362 static void tg3_link_report(struct tg3 *tp)
1364 if (!netif_carrier_ok(tp->dev)) {
1365 netif_info(tp, link, tp->dev, "Link is down\n");
1366 tg3_ump_link_report(tp);
1367 } else if (netif_msg_link(tp)) {
1368 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1369 (tp->link_config.active_speed == SPEED_1000 ?
1370 1000 :
1371 (tp->link_config.active_speed == SPEED_100 ?
1372 100 : 10)),
1373 (tp->link_config.active_duplex == DUPLEX_FULL ?
1374 "full" : "half"));
1376 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1377 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1378 "on" : "off",
1379 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1380 "on" : "off");
1382 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1383 netdev_info(tp->dev, "EEE is %s\n",
1384 tp->setlpicnt ? "enabled" : "disabled");
1386 tg3_ump_link_report(tp);
1390 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1392 u16 miireg;
1394 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1395 miireg = ADVERTISE_PAUSE_CAP;
1396 else if (flow_ctrl & FLOW_CTRL_TX)
1397 miireg = ADVERTISE_PAUSE_ASYM;
1398 else if (flow_ctrl & FLOW_CTRL_RX)
1399 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1400 else
1401 miireg = 0;
1403 return miireg;
1406 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1408 u16 miireg;
1410 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1411 miireg = ADVERTISE_1000XPAUSE;
1412 else if (flow_ctrl & FLOW_CTRL_TX)
1413 miireg = ADVERTISE_1000XPSE_ASYM;
1414 else if (flow_ctrl & FLOW_CTRL_RX)
1415 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1416 else
1417 miireg = 0;
1419 return miireg;
1422 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1424 u8 cap = 0;
1426 if (lcladv & ADVERTISE_1000XPAUSE) {
1427 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1428 if (rmtadv & LPA_1000XPAUSE)
1429 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1430 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1431 cap = FLOW_CTRL_RX;
1432 } else {
1433 if (rmtadv & LPA_1000XPAUSE)
1434 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1436 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1437 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1438 cap = FLOW_CTRL_TX;
1441 return cap;
1444 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1446 u8 autoneg;
1447 u8 flowctrl = 0;
1448 u32 old_rx_mode = tp->rx_mode;
1449 u32 old_tx_mode = tp->tx_mode;
1451 if (tg3_flag(tp, USE_PHYLIB))
1452 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1453 else
1454 autoneg = tp->link_config.autoneg;
1456 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1457 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1458 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1459 else
1460 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1461 } else
1462 flowctrl = tp->link_config.flowctrl;
1464 tp->link_config.active_flowctrl = flowctrl;
1466 if (flowctrl & FLOW_CTRL_RX)
1467 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1468 else
1469 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1471 if (old_rx_mode != tp->rx_mode)
1472 tw32_f(MAC_RX_MODE, tp->rx_mode);
1474 if (flowctrl & FLOW_CTRL_TX)
1475 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1476 else
1477 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1479 if (old_tx_mode != tp->tx_mode)
1480 tw32_f(MAC_TX_MODE, tp->tx_mode);
1483 static void tg3_adjust_link(struct net_device *dev)
1485 u8 oldflowctrl, linkmesg = 0;
1486 u32 mac_mode, lcl_adv, rmt_adv;
1487 struct tg3 *tp = netdev_priv(dev);
1488 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1490 spin_lock_bh(&tp->lock);
1492 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1493 MAC_MODE_HALF_DUPLEX);
1495 oldflowctrl = tp->link_config.active_flowctrl;
1497 if (phydev->link) {
1498 lcl_adv = 0;
1499 rmt_adv = 0;
1501 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1502 mac_mode |= MAC_MODE_PORT_MODE_MII;
1503 else if (phydev->speed == SPEED_1000 ||
1504 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1505 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1506 else
1507 mac_mode |= MAC_MODE_PORT_MODE_MII;
1509 if (phydev->duplex == DUPLEX_HALF)
1510 mac_mode |= MAC_MODE_HALF_DUPLEX;
1511 else {
1512 lcl_adv = tg3_advert_flowctrl_1000T(
1513 tp->link_config.flowctrl);
1515 if (phydev->pause)
1516 rmt_adv = LPA_PAUSE_CAP;
1517 if (phydev->asym_pause)
1518 rmt_adv |= LPA_PAUSE_ASYM;
1521 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1522 } else
1523 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1525 if (mac_mode != tp->mac_mode) {
1526 tp->mac_mode = mac_mode;
1527 tw32_f(MAC_MODE, tp->mac_mode);
1528 udelay(40);
1531 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1532 if (phydev->speed == SPEED_10)
1533 tw32(MAC_MI_STAT,
1534 MAC_MI_STAT_10MBPS_MODE |
1535 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1536 else
1537 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1540 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1541 tw32(MAC_TX_LENGTHS,
1542 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1543 (6 << TX_LENGTHS_IPG_SHIFT) |
1544 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1545 else
1546 tw32(MAC_TX_LENGTHS,
1547 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1548 (6 << TX_LENGTHS_IPG_SHIFT) |
1549 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1551 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1552 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1553 phydev->speed != tp->link_config.active_speed ||
1554 phydev->duplex != tp->link_config.active_duplex ||
1555 oldflowctrl != tp->link_config.active_flowctrl)
1556 linkmesg = 1;
1558 tp->link_config.active_speed = phydev->speed;
1559 tp->link_config.active_duplex = phydev->duplex;
1561 spin_unlock_bh(&tp->lock);
1563 if (linkmesg)
1564 tg3_link_report(tp);
1567 static int tg3_phy_init(struct tg3 *tp)
1569 struct phy_device *phydev;
1571 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1572 return 0;
1574 /* Bring the PHY back to a known state. */
1575 tg3_bmcr_reset(tp);
1577 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1579 /* Attach the MAC to the PHY. */
1580 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1581 phydev->dev_flags, phydev->interface);
1582 if (IS_ERR(phydev)) {
1583 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1584 return PTR_ERR(phydev);
1587 /* Mask with MAC supported features. */
1588 switch (phydev->interface) {
1589 case PHY_INTERFACE_MODE_GMII:
1590 case PHY_INTERFACE_MODE_RGMII:
1591 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1592 phydev->supported &= (PHY_GBIT_FEATURES |
1593 SUPPORTED_Pause |
1594 SUPPORTED_Asym_Pause);
1595 break;
1597 /* fallthru */
1598 case PHY_INTERFACE_MODE_MII:
1599 phydev->supported &= (PHY_BASIC_FEATURES |
1600 SUPPORTED_Pause |
1601 SUPPORTED_Asym_Pause);
1602 break;
1603 default:
1604 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1605 return -EINVAL;
1608 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1610 phydev->advertising = phydev->supported;
1612 return 0;
1615 static void tg3_phy_start(struct tg3 *tp)
1617 struct phy_device *phydev;
1619 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1620 return;
1622 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1624 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1625 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1626 phydev->speed = tp->link_config.orig_speed;
1627 phydev->duplex = tp->link_config.orig_duplex;
1628 phydev->autoneg = tp->link_config.orig_autoneg;
1629 phydev->advertising = tp->link_config.orig_advertising;
1632 phy_start(phydev);
1634 phy_start_aneg(phydev);
1637 static void tg3_phy_stop(struct tg3 *tp)
1639 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1640 return;
1642 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1645 static void tg3_phy_fini(struct tg3 *tp)
1647 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1648 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1649 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1653 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1655 u32 phytest;
1657 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1658 u32 phy;
1660 tg3_writephy(tp, MII_TG3_FET_TEST,
1661 phytest | MII_TG3_FET_SHADOW_EN);
1662 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1663 if (enable)
1664 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1665 else
1666 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1667 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1669 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1673 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1675 u32 reg;
1677 if (!tg3_flag(tp, 5705_PLUS) ||
1678 (tg3_flag(tp, 5717_PLUS) &&
1679 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1680 return;
1682 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1683 tg3_phy_fet_toggle_apd(tp, enable);
1684 return;
1687 reg = MII_TG3_MISC_SHDW_WREN |
1688 MII_TG3_MISC_SHDW_SCR5_SEL |
1689 MII_TG3_MISC_SHDW_SCR5_LPED |
1690 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1691 MII_TG3_MISC_SHDW_SCR5_SDTL |
1692 MII_TG3_MISC_SHDW_SCR5_C125OE;
1693 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1694 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1696 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1699 reg = MII_TG3_MISC_SHDW_WREN |
1700 MII_TG3_MISC_SHDW_APD_SEL |
1701 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1702 if (enable)
1703 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1705 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1708 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1710 u32 phy;
1712 if (!tg3_flag(tp, 5705_PLUS) ||
1713 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
1714 return;
1716 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1717 u32 ephy;
1719 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1720 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1722 tg3_writephy(tp, MII_TG3_FET_TEST,
1723 ephy | MII_TG3_FET_SHADOW_EN);
1724 if (!tg3_readphy(tp, reg, &phy)) {
1725 if (enable)
1726 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1727 else
1728 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1729 tg3_writephy(tp, reg, phy);
1731 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1733 } else {
1734 int ret;
1736 ret = tg3_phy_auxctl_read(tp,
1737 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
1738 if (!ret) {
1739 if (enable)
1740 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1741 else
1742 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1743 tg3_phy_auxctl_write(tp,
1744 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
1749 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1751 int ret;
1752 u32 val;
1754 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
1755 return;
1757 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
1758 if (!ret)
1759 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
1760 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
1763 static void tg3_phy_apply_otp(struct tg3 *tp)
1765 u32 otp, phy;
1767 if (!tp->phy_otp)
1768 return;
1770 otp = tp->phy_otp;
1772 if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
1773 return;
1775 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1776 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1777 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1779 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1780 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1781 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1783 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1784 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1785 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1787 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1788 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1790 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1791 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1793 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1794 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1795 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1797 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1800 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
1802 u32 val;
1804 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
1805 return;
1807 tp->setlpicnt = 0;
1809 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1810 current_link_up == 1 &&
1811 tp->link_config.active_duplex == DUPLEX_FULL &&
1812 (tp->link_config.active_speed == SPEED_100 ||
1813 tp->link_config.active_speed == SPEED_1000)) {
1814 u32 eeectl;
1816 if (tp->link_config.active_speed == SPEED_1000)
1817 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
1818 else
1819 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
1821 tw32(TG3_CPMU_EEE_CTRL, eeectl);
1823 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
1824 TG3_CL45_D7_EEERES_STAT, &val);
1826 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
1827 val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
1828 tp->setlpicnt = 2;
1831 if (!tp->setlpicnt) {
1832 val = tr32(TG3_CPMU_EEE_MODE);
1833 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
1837 static void tg3_phy_eee_enable(struct tg3 *tp)
1839 u32 val;
1841 if (tp->link_config.active_speed == SPEED_1000 &&
1842 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1843 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
1844 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
1845 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
1846 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0003);
1847 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1850 val = tr32(TG3_CPMU_EEE_MODE);
1851 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
1854 static int tg3_wait_macro_done(struct tg3 *tp)
1856 int limit = 100;
1858 while (limit--) {
1859 u32 tmp32;
1861 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
1862 if ((tmp32 & 0x1000) == 0)
1863 break;
1866 if (limit < 0)
1867 return -EBUSY;
1869 return 0;
1872 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1874 static const u32 test_pat[4][6] = {
1875 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1876 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1877 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1878 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1880 int chan;
1882 for (chan = 0; chan < 4; chan++) {
1883 int i;
1885 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1886 (chan * 0x2000) | 0x0200);
1887 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1889 for (i = 0; i < 6; i++)
1890 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1891 test_pat[chan][i]);
1893 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1894 if (tg3_wait_macro_done(tp)) {
1895 *resetp = 1;
1896 return -EBUSY;
1899 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1900 (chan * 0x2000) | 0x0200);
1901 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
1902 if (tg3_wait_macro_done(tp)) {
1903 *resetp = 1;
1904 return -EBUSY;
1907 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
1908 if (tg3_wait_macro_done(tp)) {
1909 *resetp = 1;
1910 return -EBUSY;
1913 for (i = 0; i < 6; i += 2) {
1914 u32 low, high;
1916 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1917 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1918 tg3_wait_macro_done(tp)) {
1919 *resetp = 1;
1920 return -EBUSY;
1922 low &= 0x7fff;
1923 high &= 0x000f;
1924 if (low != test_pat[chan][i] ||
1925 high != test_pat[chan][i+1]) {
1926 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1927 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1928 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1930 return -EBUSY;
1935 return 0;
1938 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1940 int chan;
1942 for (chan = 0; chan < 4; chan++) {
1943 int i;
1945 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1946 (chan * 0x2000) | 0x0200);
1947 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1948 for (i = 0; i < 6; i++)
1949 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1950 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1951 if (tg3_wait_macro_done(tp))
1952 return -EBUSY;
1955 return 0;
1958 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1960 u32 reg32, phy9_orig;
1961 int retries, do_phy_reset, err;
1963 retries = 10;
1964 do_phy_reset = 1;
1965 do {
1966 if (do_phy_reset) {
1967 err = tg3_bmcr_reset(tp);
1968 if (err)
1969 return err;
1970 do_phy_reset = 0;
1973 /* Disable transmitter and interrupt. */
1974 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1975 continue;
1977 reg32 |= 0x3000;
1978 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1980 /* Set full-duplex, 1000 mbps. */
1981 tg3_writephy(tp, MII_BMCR,
1982 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1984 /* Set to master mode. */
1985 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1986 continue;
1988 tg3_writephy(tp, MII_TG3_CTRL,
1989 (MII_TG3_CTRL_AS_MASTER |
1990 MII_TG3_CTRL_ENABLE_AS_MASTER));
1992 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
1993 if (err)
1994 return err;
1996 /* Block the PHY control access. */
1997 tg3_phydsp_write(tp, 0x8005, 0x0800);
1999 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2000 if (!err)
2001 break;
2002 } while (--retries);
2004 err = tg3_phy_reset_chanpat(tp);
2005 if (err)
2006 return err;
2008 tg3_phydsp_write(tp, 0x8005, 0x0000);
2010 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2011 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2013 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2015 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
2017 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2018 reg32 &= ~0x3000;
2019 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2020 } else if (!err)
2021 err = -EBUSY;
2023 return err;
2026 /* This will reset the tigon3 PHY if there is no valid
2027 * link unless the FORCE argument is non-zero.
2029 static int tg3_phy_reset(struct tg3 *tp)
2031 u32 val, cpmuctrl;
2032 int err;
2034 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2035 val = tr32(GRC_MISC_CFG);
2036 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2037 udelay(40);
2039 err = tg3_readphy(tp, MII_BMSR, &val);
2040 err |= tg3_readphy(tp, MII_BMSR, &val);
2041 if (err != 0)
2042 return -EBUSY;
2044 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2045 netif_carrier_off(tp->dev);
2046 tg3_link_report(tp);
2049 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2050 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2051 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2052 err = tg3_phy_reset_5703_4_5(tp);
2053 if (err)
2054 return err;
2055 goto out;
2058 cpmuctrl = 0;
2059 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2060 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2061 cpmuctrl = tr32(TG3_CPMU_CTRL);
2062 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2063 tw32(TG3_CPMU_CTRL,
2064 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2067 err = tg3_bmcr_reset(tp);
2068 if (err)
2069 return err;
2071 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2072 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2073 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2075 tw32(TG3_CPMU_CTRL, cpmuctrl);
2078 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2079 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2080 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2081 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2082 CPMU_LSPD_1000MB_MACCLK_12_5) {
2083 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2084 udelay(40);
2085 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2089 if (tg3_flag(tp, 5717_PLUS) &&
2090 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2091 return 0;
2093 tg3_phy_apply_otp(tp);
2095 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2096 tg3_phy_toggle_apd(tp, true);
2097 else
2098 tg3_phy_toggle_apd(tp, false);
2100 out:
2101 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2102 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2103 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2104 tg3_phydsp_write(tp, 0x000a, 0x0323);
2105 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2108 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2109 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2110 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2113 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2114 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2115 tg3_phydsp_write(tp, 0x000a, 0x310b);
2116 tg3_phydsp_write(tp, 0x201f, 0x9506);
2117 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2118 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2120 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2121 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2122 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2123 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2124 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2125 tg3_writephy(tp, MII_TG3_TEST1,
2126 MII_TG3_TEST1_TRIM_EN | 0x4);
2127 } else
2128 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2130 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2134 /* Set Extended packet length bit (bit 14) on all chips that */
2135 /* support jumbo frames */
2136 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2137 /* Cannot do read-modify-write on 5401 */
2138 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2139 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2140 /* Set bit 14 with read-modify-write to preserve other bits */
2141 err = tg3_phy_auxctl_read(tp,
2142 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2143 if (!err)
2144 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2145 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2148 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2149 * jumbo frames transmission.
2151 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2152 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2153 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2154 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2157 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2158 /* adjust output voltage */
2159 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2162 tg3_phy_toggle_automdix(tp, 1);
2163 tg3_phy_set_wirespeed(tp);
2164 return 0;
2167 static void tg3_frob_aux_power(struct tg3 *tp)
2169 bool need_vaux = false;
2171 /* The GPIOs do something completely different on 57765. */
2172 if (!tg3_flag(tp, IS_NIC) ||
2173 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2174 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2175 return;
2177 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2178 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
2179 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2180 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) &&
2181 tp->pdev_peer != tp->pdev) {
2182 struct net_device *dev_peer;
2184 dev_peer = pci_get_drvdata(tp->pdev_peer);
2186 /* remove_one() may have been run on the peer. */
2187 if (dev_peer) {
2188 struct tg3 *tp_peer = netdev_priv(dev_peer);
2190 if (tg3_flag(tp_peer, INIT_COMPLETE))
2191 return;
2193 if (tg3_flag(tp_peer, WOL_ENABLE) ||
2194 tg3_flag(tp_peer, ENABLE_ASF))
2195 need_vaux = true;
2199 if (tg3_flag(tp, WOL_ENABLE) || tg3_flag(tp, ENABLE_ASF))
2200 need_vaux = true;
2202 if (need_vaux) {
2203 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2204 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2205 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2206 (GRC_LCLCTRL_GPIO_OE0 |
2207 GRC_LCLCTRL_GPIO_OE1 |
2208 GRC_LCLCTRL_GPIO_OE2 |
2209 GRC_LCLCTRL_GPIO_OUTPUT0 |
2210 GRC_LCLCTRL_GPIO_OUTPUT1),
2211 100);
2212 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2213 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2214 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2215 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2216 GRC_LCLCTRL_GPIO_OE1 |
2217 GRC_LCLCTRL_GPIO_OE2 |
2218 GRC_LCLCTRL_GPIO_OUTPUT0 |
2219 GRC_LCLCTRL_GPIO_OUTPUT1 |
2220 tp->grc_local_ctrl;
2221 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2223 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2224 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2226 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2227 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2228 } else {
2229 u32 no_gpio2;
2230 u32 grc_local_ctrl = 0;
2232 /* Workaround to prevent overdrawing Amps. */
2233 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2234 ASIC_REV_5714) {
2235 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2236 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2237 grc_local_ctrl, 100);
2240 /* On 5753 and variants, GPIO2 cannot be used. */
2241 no_gpio2 = tp->nic_sram_data_cfg &
2242 NIC_SRAM_DATA_CFG_NO_GPIO2;
2244 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2245 GRC_LCLCTRL_GPIO_OE1 |
2246 GRC_LCLCTRL_GPIO_OE2 |
2247 GRC_LCLCTRL_GPIO_OUTPUT1 |
2248 GRC_LCLCTRL_GPIO_OUTPUT2;
2249 if (no_gpio2) {
2250 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2251 GRC_LCLCTRL_GPIO_OUTPUT2);
2253 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2254 grc_local_ctrl, 100);
2256 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2258 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2259 grc_local_ctrl, 100);
2261 if (!no_gpio2) {
2262 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2263 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2264 grc_local_ctrl, 100);
2267 } else {
2268 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
2269 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
2270 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2271 (GRC_LCLCTRL_GPIO_OE1 |
2272 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2274 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2275 GRC_LCLCTRL_GPIO_OE1, 100);
2277 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2278 (GRC_LCLCTRL_GPIO_OE1 |
2279 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2284 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2286 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2287 return 1;
2288 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2289 if (speed != SPEED_10)
2290 return 1;
2291 } else if (speed == SPEED_10)
2292 return 1;
2294 return 0;
2297 static int tg3_setup_phy(struct tg3 *, int);
2299 #define RESET_KIND_SHUTDOWN 0
2300 #define RESET_KIND_INIT 1
2301 #define RESET_KIND_SUSPEND 2
2303 static void tg3_write_sig_post_reset(struct tg3 *, int);
2304 static int tg3_halt_cpu(struct tg3 *, u32);
2306 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2308 u32 val;
2310 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2311 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2312 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2313 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2315 sg_dig_ctrl |=
2316 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2317 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2318 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2320 return;
2323 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2324 tg3_bmcr_reset(tp);
2325 val = tr32(GRC_MISC_CFG);
2326 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2327 udelay(40);
2328 return;
2329 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2330 u32 phytest;
2331 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2332 u32 phy;
2334 tg3_writephy(tp, MII_ADVERTISE, 0);
2335 tg3_writephy(tp, MII_BMCR,
2336 BMCR_ANENABLE | BMCR_ANRESTART);
2338 tg3_writephy(tp, MII_TG3_FET_TEST,
2339 phytest | MII_TG3_FET_SHADOW_EN);
2340 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2341 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2342 tg3_writephy(tp,
2343 MII_TG3_FET_SHDW_AUXMODE4,
2344 phy);
2346 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2348 return;
2349 } else if (do_low_power) {
2350 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2351 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2353 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2354 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2355 MII_TG3_AUXCTL_PCTL_VREG_11V;
2356 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2359 /* The PHY should not be powered down on some chips because
2360 * of bugs.
2362 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2363 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2364 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2365 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2366 return;
2368 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2369 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2370 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2371 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2372 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2373 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2376 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2379 /* tp->lock is held. */
2380 static int tg3_nvram_lock(struct tg3 *tp)
2382 if (tg3_flag(tp, NVRAM)) {
2383 int i;
2385 if (tp->nvram_lock_cnt == 0) {
2386 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2387 for (i = 0; i < 8000; i++) {
2388 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2389 break;
2390 udelay(20);
2392 if (i == 8000) {
2393 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2394 return -ENODEV;
2397 tp->nvram_lock_cnt++;
2399 return 0;
2402 /* tp->lock is held. */
2403 static void tg3_nvram_unlock(struct tg3 *tp)
2405 if (tg3_flag(tp, NVRAM)) {
2406 if (tp->nvram_lock_cnt > 0)
2407 tp->nvram_lock_cnt--;
2408 if (tp->nvram_lock_cnt == 0)
2409 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2413 /* tp->lock is held. */
2414 static void tg3_enable_nvram_access(struct tg3 *tp)
2416 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2417 u32 nvaccess = tr32(NVRAM_ACCESS);
2419 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2423 /* tp->lock is held. */
2424 static void tg3_disable_nvram_access(struct tg3 *tp)
2426 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2427 u32 nvaccess = tr32(NVRAM_ACCESS);
2429 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2433 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2434 u32 offset, u32 *val)
2436 u32 tmp;
2437 int i;
2439 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2440 return -EINVAL;
2442 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2443 EEPROM_ADDR_DEVID_MASK |
2444 EEPROM_ADDR_READ);
2445 tw32(GRC_EEPROM_ADDR,
2446 tmp |
2447 (0 << EEPROM_ADDR_DEVID_SHIFT) |
2448 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2449 EEPROM_ADDR_ADDR_MASK) |
2450 EEPROM_ADDR_READ | EEPROM_ADDR_START);
2452 for (i = 0; i < 1000; i++) {
2453 tmp = tr32(GRC_EEPROM_ADDR);
2455 if (tmp & EEPROM_ADDR_COMPLETE)
2456 break;
2457 msleep(1);
2459 if (!(tmp & EEPROM_ADDR_COMPLETE))
2460 return -EBUSY;
2462 tmp = tr32(GRC_EEPROM_DATA);
2465 * The data will always be opposite the native endian
2466 * format. Perform a blind byteswap to compensate.
2468 *val = swab32(tmp);
2470 return 0;
2473 #define NVRAM_CMD_TIMEOUT 10000
2475 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2477 int i;
2479 tw32(NVRAM_CMD, nvram_cmd);
2480 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2481 udelay(10);
2482 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2483 udelay(10);
2484 break;
2488 if (i == NVRAM_CMD_TIMEOUT)
2489 return -EBUSY;
2491 return 0;
2494 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2496 if (tg3_flag(tp, NVRAM) &&
2497 tg3_flag(tp, NVRAM_BUFFERED) &&
2498 tg3_flag(tp, FLASH) &&
2499 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2500 (tp->nvram_jedecnum == JEDEC_ATMEL))
2502 addr = ((addr / tp->nvram_pagesize) <<
2503 ATMEL_AT45DB0X1B_PAGE_POS) +
2504 (addr % tp->nvram_pagesize);
2506 return addr;
2509 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2511 if (tg3_flag(tp, NVRAM) &&
2512 tg3_flag(tp, NVRAM_BUFFERED) &&
2513 tg3_flag(tp, FLASH) &&
2514 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2515 (tp->nvram_jedecnum == JEDEC_ATMEL))
2517 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2518 tp->nvram_pagesize) +
2519 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2521 return addr;
2524 /* NOTE: Data read in from NVRAM is byteswapped according to
2525 * the byteswapping settings for all other register accesses.
2526 * tg3 devices are BE devices, so on a BE machine, the data
2527 * returned will be exactly as it is seen in NVRAM. On a LE
2528 * machine, the 32-bit value will be byteswapped.
2530 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2532 int ret;
2534 if (!tg3_flag(tp, NVRAM))
2535 return tg3_nvram_read_using_eeprom(tp, offset, val);
2537 offset = tg3_nvram_phys_addr(tp, offset);
2539 if (offset > NVRAM_ADDR_MSK)
2540 return -EINVAL;
2542 ret = tg3_nvram_lock(tp);
2543 if (ret)
2544 return ret;
2546 tg3_enable_nvram_access(tp);
2548 tw32(NVRAM_ADDR, offset);
2549 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2550 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2552 if (ret == 0)
2553 *val = tr32(NVRAM_RDDATA);
2555 tg3_disable_nvram_access(tp);
2557 tg3_nvram_unlock(tp);
2559 return ret;
2562 /* Ensures NVRAM data is in bytestream format. */
2563 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2565 u32 v;
2566 int res = tg3_nvram_read(tp, offset, &v);
2567 if (!res)
2568 *val = cpu_to_be32(v);
2569 return res;
2572 /* tp->lock is held. */
2573 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2575 u32 addr_high, addr_low;
2576 int i;
2578 addr_high = ((tp->dev->dev_addr[0] << 8) |
2579 tp->dev->dev_addr[1]);
2580 addr_low = ((tp->dev->dev_addr[2] << 24) |
2581 (tp->dev->dev_addr[3] << 16) |
2582 (tp->dev->dev_addr[4] << 8) |
2583 (tp->dev->dev_addr[5] << 0));
2584 for (i = 0; i < 4; i++) {
2585 if (i == 1 && skip_mac_1)
2586 continue;
2587 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2588 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2591 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2592 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2593 for (i = 0; i < 12; i++) {
2594 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2595 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2599 addr_high = (tp->dev->dev_addr[0] +
2600 tp->dev->dev_addr[1] +
2601 tp->dev->dev_addr[2] +
2602 tp->dev->dev_addr[3] +
2603 tp->dev->dev_addr[4] +
2604 tp->dev->dev_addr[5]) &
2605 TX_BACKOFF_SEED_MASK;
2606 tw32(MAC_TX_BACKOFF_SEED, addr_high);
2609 static void tg3_enable_register_access(struct tg3 *tp)
2612 * Make sure register accesses (indirect or otherwise) will function
2613 * correctly.
2615 pci_write_config_dword(tp->pdev,
2616 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
2619 static int tg3_power_up(struct tg3 *tp)
2621 tg3_enable_register_access(tp);
2623 pci_set_power_state(tp->pdev, PCI_D0);
2625 /* Switch out of Vaux if it is a NIC */
2626 if (tg3_flag(tp, IS_NIC))
2627 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2629 return 0;
2632 static int tg3_power_down_prepare(struct tg3 *tp)
2634 u32 misc_host_ctrl;
2635 bool device_should_wake, do_low_power;
2637 tg3_enable_register_access(tp);
2639 /* Restore the CLKREQ setting. */
2640 if (tg3_flag(tp, CLKREQ_BUG)) {
2641 u16 lnkctl;
2643 pci_read_config_word(tp->pdev,
2644 tp->pcie_cap + PCI_EXP_LNKCTL,
2645 &lnkctl);
2646 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2647 pci_write_config_word(tp->pdev,
2648 tp->pcie_cap + PCI_EXP_LNKCTL,
2649 lnkctl);
2652 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2653 tw32(TG3PCI_MISC_HOST_CTRL,
2654 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2656 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
2657 tg3_flag(tp, WOL_ENABLE);
2659 if (tg3_flag(tp, USE_PHYLIB)) {
2660 do_low_power = false;
2661 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
2662 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2663 struct phy_device *phydev;
2664 u32 phyid, advertising;
2666 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2668 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2670 tp->link_config.orig_speed = phydev->speed;
2671 tp->link_config.orig_duplex = phydev->duplex;
2672 tp->link_config.orig_autoneg = phydev->autoneg;
2673 tp->link_config.orig_advertising = phydev->advertising;
2675 advertising = ADVERTISED_TP |
2676 ADVERTISED_Pause |
2677 ADVERTISED_Autoneg |
2678 ADVERTISED_10baseT_Half;
2680 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
2681 if (tg3_flag(tp, WOL_SPEED_100MB))
2682 advertising |=
2683 ADVERTISED_100baseT_Half |
2684 ADVERTISED_100baseT_Full |
2685 ADVERTISED_10baseT_Full;
2686 else
2687 advertising |= ADVERTISED_10baseT_Full;
2690 phydev->advertising = advertising;
2692 phy_start_aneg(phydev);
2694 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2695 if (phyid != PHY_ID_BCMAC131) {
2696 phyid &= PHY_BCM_OUI_MASK;
2697 if (phyid == PHY_BCM_OUI_1 ||
2698 phyid == PHY_BCM_OUI_2 ||
2699 phyid == PHY_BCM_OUI_3)
2700 do_low_power = true;
2703 } else {
2704 do_low_power = true;
2706 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2707 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2708 tp->link_config.orig_speed = tp->link_config.speed;
2709 tp->link_config.orig_duplex = tp->link_config.duplex;
2710 tp->link_config.orig_autoneg = tp->link_config.autoneg;
2713 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
2714 tp->link_config.speed = SPEED_10;
2715 tp->link_config.duplex = DUPLEX_HALF;
2716 tp->link_config.autoneg = AUTONEG_ENABLE;
2717 tg3_setup_phy(tp, 0);
2721 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2722 u32 val;
2724 val = tr32(GRC_VCPU_EXT_CTRL);
2725 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2726 } else if (!tg3_flag(tp, ENABLE_ASF)) {
2727 int i;
2728 u32 val;
2730 for (i = 0; i < 200; i++) {
2731 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2732 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2733 break;
2734 msleep(1);
2737 if (tg3_flag(tp, WOL_CAP))
2738 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2739 WOL_DRV_STATE_SHUTDOWN |
2740 WOL_DRV_WOL |
2741 WOL_SET_MAGIC_PKT);
2743 if (device_should_wake) {
2744 u32 mac_mode;
2746 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
2747 if (do_low_power &&
2748 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
2749 tg3_phy_auxctl_write(tp,
2750 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
2751 MII_TG3_AUXCTL_PCTL_WOL_EN |
2752 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2753 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
2754 udelay(40);
2757 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
2758 mac_mode = MAC_MODE_PORT_MODE_GMII;
2759 else
2760 mac_mode = MAC_MODE_PORT_MODE_MII;
2762 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2763 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2764 ASIC_REV_5700) {
2765 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
2766 SPEED_100 : SPEED_10;
2767 if (tg3_5700_link_polarity(tp, speed))
2768 mac_mode |= MAC_MODE_LINK_POLARITY;
2769 else
2770 mac_mode &= ~MAC_MODE_LINK_POLARITY;
2772 } else {
2773 mac_mode = MAC_MODE_PORT_MODE_TBI;
2776 if (!tg3_flag(tp, 5750_PLUS))
2777 tw32(MAC_LED_CTRL, tp->led_ctrl);
2779 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2780 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
2781 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
2782 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2784 if (tg3_flag(tp, ENABLE_APE))
2785 mac_mode |= MAC_MODE_APE_TX_EN |
2786 MAC_MODE_APE_RX_EN |
2787 MAC_MODE_TDE_ENABLE;
2789 tw32_f(MAC_MODE, mac_mode);
2790 udelay(100);
2792 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2793 udelay(10);
2796 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
2797 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2798 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2799 u32 base_val;
2801 base_val = tp->pci_clock_ctrl;
2802 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2803 CLOCK_CTRL_TXCLK_DISABLE);
2805 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2806 CLOCK_CTRL_PWRDOWN_PLL133, 40);
2807 } else if (tg3_flag(tp, 5780_CLASS) ||
2808 tg3_flag(tp, CPMU_PRESENT) ||
2809 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2810 /* do nothing */
2811 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
2812 u32 newbits1, newbits2;
2814 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2815 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2816 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2817 CLOCK_CTRL_TXCLK_DISABLE |
2818 CLOCK_CTRL_ALTCLK);
2819 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2820 } else if (tg3_flag(tp, 5705_PLUS)) {
2821 newbits1 = CLOCK_CTRL_625_CORE;
2822 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2823 } else {
2824 newbits1 = CLOCK_CTRL_ALTCLK;
2825 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2828 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2829 40);
2831 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2832 40);
2834 if (!tg3_flag(tp, 5705_PLUS)) {
2835 u32 newbits3;
2837 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2838 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2839 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2840 CLOCK_CTRL_TXCLK_DISABLE |
2841 CLOCK_CTRL_44MHZ_CORE);
2842 } else {
2843 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2846 tw32_wait_f(TG3PCI_CLOCK_CTRL,
2847 tp->pci_clock_ctrl | newbits3, 40);
2851 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
2852 tg3_power_down_phy(tp, do_low_power);
2854 tg3_frob_aux_power(tp);
2856 /* Workaround for unstable PLL clock */
2857 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2858 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2859 u32 val = tr32(0x7d00);
2861 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2862 tw32(0x7d00, val);
2863 if (!tg3_flag(tp, ENABLE_ASF)) {
2864 int err;
2866 err = tg3_nvram_lock(tp);
2867 tg3_halt_cpu(tp, RX_CPU_BASE);
2868 if (!err)
2869 tg3_nvram_unlock(tp);
2873 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2875 return 0;
2878 static void tg3_power_down(struct tg3 *tp)
2880 tg3_power_down_prepare(tp);
2882 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
2883 pci_set_power_state(tp->pdev, PCI_D3hot);
2886 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2888 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2889 case MII_TG3_AUX_STAT_10HALF:
2890 *speed = SPEED_10;
2891 *duplex = DUPLEX_HALF;
2892 break;
2894 case MII_TG3_AUX_STAT_10FULL:
2895 *speed = SPEED_10;
2896 *duplex = DUPLEX_FULL;
2897 break;
2899 case MII_TG3_AUX_STAT_100HALF:
2900 *speed = SPEED_100;
2901 *duplex = DUPLEX_HALF;
2902 break;
2904 case MII_TG3_AUX_STAT_100FULL:
2905 *speed = SPEED_100;
2906 *duplex = DUPLEX_FULL;
2907 break;
2909 case MII_TG3_AUX_STAT_1000HALF:
2910 *speed = SPEED_1000;
2911 *duplex = DUPLEX_HALF;
2912 break;
2914 case MII_TG3_AUX_STAT_1000FULL:
2915 *speed = SPEED_1000;
2916 *duplex = DUPLEX_FULL;
2917 break;
2919 default:
2920 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2921 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2922 SPEED_10;
2923 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2924 DUPLEX_HALF;
2925 break;
2927 *speed = SPEED_INVALID;
2928 *duplex = DUPLEX_INVALID;
2929 break;
2933 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
2935 int err = 0;
2936 u32 val, new_adv;
2938 new_adv = ADVERTISE_CSMA;
2939 if (advertise & ADVERTISED_10baseT_Half)
2940 new_adv |= ADVERTISE_10HALF;
2941 if (advertise & ADVERTISED_10baseT_Full)
2942 new_adv |= ADVERTISE_10FULL;
2943 if (advertise & ADVERTISED_100baseT_Half)
2944 new_adv |= ADVERTISE_100HALF;
2945 if (advertise & ADVERTISED_100baseT_Full)
2946 new_adv |= ADVERTISE_100FULL;
2948 new_adv |= tg3_advert_flowctrl_1000T(flowctrl);
2950 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
2951 if (err)
2952 goto done;
2954 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
2955 goto done;
2957 new_adv = 0;
2958 if (advertise & ADVERTISED_1000baseT_Half)
2959 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2960 if (advertise & ADVERTISED_1000baseT_Full)
2961 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2963 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2964 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2965 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2966 MII_TG3_CTRL_ENABLE_AS_MASTER);
2968 err = tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2969 if (err)
2970 goto done;
2972 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2973 goto done;
2975 tw32(TG3_CPMU_EEE_MODE,
2976 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2978 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2979 if (!err) {
2980 u32 err2;
2982 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
2983 case ASIC_REV_5717:
2984 case ASIC_REV_57765:
2985 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
2986 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
2987 MII_TG3_DSP_CH34TP2_HIBW01);
2988 /* Fall through */
2989 case ASIC_REV_5719:
2990 val = MII_TG3_DSP_TAP26_ALNOKO |
2991 MII_TG3_DSP_TAP26_RMRXSTO |
2992 MII_TG3_DSP_TAP26_OPCSINPT;
2993 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2996 val = 0;
2997 /* Advertise 100-BaseTX EEE ability */
2998 if (advertise & ADVERTISED_100baseT_Full)
2999 val |= MDIO_AN_EEE_ADV_100TX;
3000 /* Advertise 1000-BaseT EEE ability */
3001 if (advertise & ADVERTISED_1000baseT_Full)
3002 val |= MDIO_AN_EEE_ADV_1000T;
3003 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3005 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3006 if (!err)
3007 err = err2;
3010 done:
3011 return err;
3014 static void tg3_phy_copper_begin(struct tg3 *tp)
3016 u32 new_adv;
3017 int i;
3019 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3020 new_adv = ADVERTISED_10baseT_Half |
3021 ADVERTISED_10baseT_Full;
3022 if (tg3_flag(tp, WOL_SPEED_100MB))
3023 new_adv |= ADVERTISED_100baseT_Half |
3024 ADVERTISED_100baseT_Full;
3026 tg3_phy_autoneg_cfg(tp, new_adv,
3027 FLOW_CTRL_TX | FLOW_CTRL_RX);
3028 } else if (tp->link_config.speed == SPEED_INVALID) {
3029 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3030 tp->link_config.advertising &=
3031 ~(ADVERTISED_1000baseT_Half |
3032 ADVERTISED_1000baseT_Full);
3034 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
3035 tp->link_config.flowctrl);
3036 } else {
3037 /* Asking for a specific link mode. */
3038 if (tp->link_config.speed == SPEED_1000) {
3039 if (tp->link_config.duplex == DUPLEX_FULL)
3040 new_adv = ADVERTISED_1000baseT_Full;
3041 else
3042 new_adv = ADVERTISED_1000baseT_Half;
3043 } else if (tp->link_config.speed == SPEED_100) {
3044 if (tp->link_config.duplex == DUPLEX_FULL)
3045 new_adv = ADVERTISED_100baseT_Full;
3046 else
3047 new_adv = ADVERTISED_100baseT_Half;
3048 } else {
3049 if (tp->link_config.duplex == DUPLEX_FULL)
3050 new_adv = ADVERTISED_10baseT_Full;
3051 else
3052 new_adv = ADVERTISED_10baseT_Half;
3055 tg3_phy_autoneg_cfg(tp, new_adv,
3056 tp->link_config.flowctrl);
3059 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3060 tp->link_config.speed != SPEED_INVALID) {
3061 u32 bmcr, orig_bmcr;
3063 tp->link_config.active_speed = tp->link_config.speed;
3064 tp->link_config.active_duplex = tp->link_config.duplex;
3066 bmcr = 0;
3067 switch (tp->link_config.speed) {
3068 default:
3069 case SPEED_10:
3070 break;
3072 case SPEED_100:
3073 bmcr |= BMCR_SPEED100;
3074 break;
3076 case SPEED_1000:
3077 bmcr |= TG3_BMCR_SPEED1000;
3078 break;
3081 if (tp->link_config.duplex == DUPLEX_FULL)
3082 bmcr |= BMCR_FULLDPLX;
3084 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3085 (bmcr != orig_bmcr)) {
3086 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3087 for (i = 0; i < 1500; i++) {
3088 u32 tmp;
3090 udelay(10);
3091 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3092 tg3_readphy(tp, MII_BMSR, &tmp))
3093 continue;
3094 if (!(tmp & BMSR_LSTATUS)) {
3095 udelay(40);
3096 break;
3099 tg3_writephy(tp, MII_BMCR, bmcr);
3100 udelay(40);
3102 } else {
3103 tg3_writephy(tp, MII_BMCR,
3104 BMCR_ANENABLE | BMCR_ANRESTART);
3108 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3110 int err;
3112 /* Turn off tap power management. */
3113 /* Set Extended packet length bit */
3114 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3116 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3117 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3118 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3119 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3120 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3122 udelay(40);
3124 return err;
3127 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
3129 u32 adv_reg, all_mask = 0;
3131 if (mask & ADVERTISED_10baseT_Half)
3132 all_mask |= ADVERTISE_10HALF;
3133 if (mask & ADVERTISED_10baseT_Full)
3134 all_mask |= ADVERTISE_10FULL;
3135 if (mask & ADVERTISED_100baseT_Half)
3136 all_mask |= ADVERTISE_100HALF;
3137 if (mask & ADVERTISED_100baseT_Full)
3138 all_mask |= ADVERTISE_100FULL;
3140 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
3141 return 0;
3143 if ((adv_reg & all_mask) != all_mask)
3144 return 0;
3145 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3146 u32 tg3_ctrl;
3148 all_mask = 0;
3149 if (mask & ADVERTISED_1000baseT_Half)
3150 all_mask |= ADVERTISE_1000HALF;
3151 if (mask & ADVERTISED_1000baseT_Full)
3152 all_mask |= ADVERTISE_1000FULL;
3154 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
3155 return 0;
3157 if ((tg3_ctrl & all_mask) != all_mask)
3158 return 0;
3160 return 1;
3163 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3165 u32 curadv, reqadv;
3167 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3168 return 1;
3170 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3171 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3173 if (tp->link_config.active_duplex == DUPLEX_FULL) {
3174 if (curadv != reqadv)
3175 return 0;
3177 if (tg3_flag(tp, PAUSE_AUTONEG))
3178 tg3_readphy(tp, MII_LPA, rmtadv);
3179 } else {
3180 /* Reprogram the advertisement register, even if it
3181 * does not affect the current link. If the link
3182 * gets renegotiated in the future, we can save an
3183 * additional renegotiation cycle by advertising
3184 * it correctly in the first place.
3186 if (curadv != reqadv) {
3187 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3188 ADVERTISE_PAUSE_ASYM);
3189 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3193 return 1;
3196 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3198 int current_link_up;
3199 u32 bmsr, val;
3200 u32 lcl_adv, rmt_adv;
3201 u16 current_speed;
3202 u8 current_duplex;
3203 int i, err;
3205 tw32(MAC_EVENT, 0);
3207 tw32_f(MAC_STATUS,
3208 (MAC_STATUS_SYNC_CHANGED |
3209 MAC_STATUS_CFG_CHANGED |
3210 MAC_STATUS_MI_COMPLETION |
3211 MAC_STATUS_LNKSTATE_CHANGED));
3212 udelay(40);
3214 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3215 tw32_f(MAC_MI_MODE,
3216 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3217 udelay(80);
3220 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
3222 /* Some third-party PHYs need to be reset on link going
3223 * down.
3225 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3226 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3227 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3228 netif_carrier_ok(tp->dev)) {
3229 tg3_readphy(tp, MII_BMSR, &bmsr);
3230 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3231 !(bmsr & BMSR_LSTATUS))
3232 force_reset = 1;
3234 if (force_reset)
3235 tg3_phy_reset(tp);
3237 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3238 tg3_readphy(tp, MII_BMSR, &bmsr);
3239 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3240 !tg3_flag(tp, INIT_COMPLETE))
3241 bmsr = 0;
3243 if (!(bmsr & BMSR_LSTATUS)) {
3244 err = tg3_init_5401phy_dsp(tp);
3245 if (err)
3246 return err;
3248 tg3_readphy(tp, MII_BMSR, &bmsr);
3249 for (i = 0; i < 1000; i++) {
3250 udelay(10);
3251 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3252 (bmsr & BMSR_LSTATUS)) {
3253 udelay(40);
3254 break;
3258 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3259 TG3_PHY_REV_BCM5401_B0 &&
3260 !(bmsr & BMSR_LSTATUS) &&
3261 tp->link_config.active_speed == SPEED_1000) {
3262 err = tg3_phy_reset(tp);
3263 if (!err)
3264 err = tg3_init_5401phy_dsp(tp);
3265 if (err)
3266 return err;
3269 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3270 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3271 /* 5701 {A0,B0} CRC bug workaround */
3272 tg3_writephy(tp, 0x15, 0x0a75);
3273 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3274 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3275 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3278 /* Clear pending interrupts... */
3279 tg3_readphy(tp, MII_TG3_ISTAT, &val);
3280 tg3_readphy(tp, MII_TG3_ISTAT, &val);
3282 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3283 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3284 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3285 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3287 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3288 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3289 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3290 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3291 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3292 else
3293 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3296 current_link_up = 0;
3297 current_speed = SPEED_INVALID;
3298 current_duplex = DUPLEX_INVALID;
3300 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3301 err = tg3_phy_auxctl_read(tp,
3302 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3303 &val);
3304 if (!err && !(val & (1 << 10))) {
3305 tg3_phy_auxctl_write(tp,
3306 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3307 val | (1 << 10));
3308 goto relink;
3312 bmsr = 0;
3313 for (i = 0; i < 100; i++) {
3314 tg3_readphy(tp, MII_BMSR, &bmsr);
3315 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3316 (bmsr & BMSR_LSTATUS))
3317 break;
3318 udelay(40);
3321 if (bmsr & BMSR_LSTATUS) {
3322 u32 aux_stat, bmcr;
3324 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3325 for (i = 0; i < 2000; i++) {
3326 udelay(10);
3327 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3328 aux_stat)
3329 break;
3332 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3333 &current_speed,
3334 &current_duplex);
3336 bmcr = 0;
3337 for (i = 0; i < 200; i++) {
3338 tg3_readphy(tp, MII_BMCR, &bmcr);
3339 if (tg3_readphy(tp, MII_BMCR, &bmcr))
3340 continue;
3341 if (bmcr && bmcr != 0x7fff)
3342 break;
3343 udelay(10);
3346 lcl_adv = 0;
3347 rmt_adv = 0;
3349 tp->link_config.active_speed = current_speed;
3350 tp->link_config.active_duplex = current_duplex;
3352 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3353 if ((bmcr & BMCR_ANENABLE) &&
3354 tg3_copper_is_advertising_all(tp,
3355 tp->link_config.advertising)) {
3356 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3357 &rmt_adv))
3358 current_link_up = 1;
3360 } else {
3361 if (!(bmcr & BMCR_ANENABLE) &&
3362 tp->link_config.speed == current_speed &&
3363 tp->link_config.duplex == current_duplex &&
3364 tp->link_config.flowctrl ==
3365 tp->link_config.active_flowctrl) {
3366 current_link_up = 1;
3370 if (current_link_up == 1 &&
3371 tp->link_config.active_duplex == DUPLEX_FULL)
3372 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3375 relink:
3376 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3377 tg3_phy_copper_begin(tp);
3379 tg3_readphy(tp, MII_BMSR, &bmsr);
3380 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
3381 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
3382 current_link_up = 1;
3385 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3386 if (current_link_up == 1) {
3387 if (tp->link_config.active_speed == SPEED_100 ||
3388 tp->link_config.active_speed == SPEED_10)
3389 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3390 else
3391 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3392 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
3393 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3394 else
3395 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3397 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3398 if (tp->link_config.active_duplex == DUPLEX_HALF)
3399 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3401 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3402 if (current_link_up == 1 &&
3403 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3404 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3405 else
3406 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3409 /* ??? Without this setting Netgear GA302T PHY does not
3410 * ??? send/receive packets...
3412 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
3413 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3414 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3415 tw32_f(MAC_MI_MODE, tp->mi_mode);
3416 udelay(80);
3419 tw32_f(MAC_MODE, tp->mac_mode);
3420 udelay(40);
3422 tg3_phy_eee_adjust(tp, current_link_up);
3424 if (tg3_flag(tp, USE_LINKCHG_REG)) {
3425 /* Polled via timer. */
3426 tw32_f(MAC_EVENT, 0);
3427 } else {
3428 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3430 udelay(40);
3432 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3433 current_link_up == 1 &&
3434 tp->link_config.active_speed == SPEED_1000 &&
3435 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
3436 udelay(120);
3437 tw32_f(MAC_STATUS,
3438 (MAC_STATUS_SYNC_CHANGED |
3439 MAC_STATUS_CFG_CHANGED));
3440 udelay(40);
3441 tg3_write_mem(tp,
3442 NIC_SRAM_FIRMWARE_MBOX,
3443 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3446 /* Prevent send BD corruption. */
3447 if (tg3_flag(tp, CLKREQ_BUG)) {
3448 u16 oldlnkctl, newlnkctl;
3450 pci_read_config_word(tp->pdev,
3451 tp->pcie_cap + PCI_EXP_LNKCTL,
3452 &oldlnkctl);
3453 if (tp->link_config.active_speed == SPEED_100 ||
3454 tp->link_config.active_speed == SPEED_10)
3455 newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3456 else
3457 newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3458 if (newlnkctl != oldlnkctl)
3459 pci_write_config_word(tp->pdev,
3460 tp->pcie_cap + PCI_EXP_LNKCTL,
3461 newlnkctl);
3464 if (current_link_up != netif_carrier_ok(tp->dev)) {
3465 if (current_link_up)
3466 netif_carrier_on(tp->dev);
3467 else
3468 netif_carrier_off(tp->dev);
3469 tg3_link_report(tp);
3472 return 0;
3475 struct tg3_fiber_aneginfo {
3476 int state;
3477 #define ANEG_STATE_UNKNOWN 0
3478 #define ANEG_STATE_AN_ENABLE 1
3479 #define ANEG_STATE_RESTART_INIT 2
3480 #define ANEG_STATE_RESTART 3
3481 #define ANEG_STATE_DISABLE_LINK_OK 4
3482 #define ANEG_STATE_ABILITY_DETECT_INIT 5
3483 #define ANEG_STATE_ABILITY_DETECT 6
3484 #define ANEG_STATE_ACK_DETECT_INIT 7
3485 #define ANEG_STATE_ACK_DETECT 8
3486 #define ANEG_STATE_COMPLETE_ACK_INIT 9
3487 #define ANEG_STATE_COMPLETE_ACK 10
3488 #define ANEG_STATE_IDLE_DETECT_INIT 11
3489 #define ANEG_STATE_IDLE_DETECT 12
3490 #define ANEG_STATE_LINK_OK 13
3491 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
3492 #define ANEG_STATE_NEXT_PAGE_WAIT 15
3494 u32 flags;
3495 #define MR_AN_ENABLE 0x00000001
3496 #define MR_RESTART_AN 0x00000002
3497 #define MR_AN_COMPLETE 0x00000004
3498 #define MR_PAGE_RX 0x00000008
3499 #define MR_NP_LOADED 0x00000010
3500 #define MR_TOGGLE_TX 0x00000020
3501 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
3502 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
3503 #define MR_LP_ADV_SYM_PAUSE 0x00000100
3504 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
3505 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3506 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3507 #define MR_LP_ADV_NEXT_PAGE 0x00001000
3508 #define MR_TOGGLE_RX 0x00002000
3509 #define MR_NP_RX 0x00004000
3511 #define MR_LINK_OK 0x80000000
3513 unsigned long link_time, cur_time;
3515 u32 ability_match_cfg;
3516 int ability_match_count;
3518 char ability_match, idle_match, ack_match;
3520 u32 txconfig, rxconfig;
3521 #define ANEG_CFG_NP 0x00000080
3522 #define ANEG_CFG_ACK 0x00000040
3523 #define ANEG_CFG_RF2 0x00000020
3524 #define ANEG_CFG_RF1 0x00000010
3525 #define ANEG_CFG_PS2 0x00000001
3526 #define ANEG_CFG_PS1 0x00008000
3527 #define ANEG_CFG_HD 0x00004000
3528 #define ANEG_CFG_FD 0x00002000
3529 #define ANEG_CFG_INVAL 0x00001f06
3532 #define ANEG_OK 0
3533 #define ANEG_DONE 1
3534 #define ANEG_TIMER_ENAB 2
3535 #define ANEG_FAILED -1
3537 #define ANEG_STATE_SETTLE_TIME 10000
3539 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3540 struct tg3_fiber_aneginfo *ap)
3542 u16 flowctrl;
3543 unsigned long delta;
3544 u32 rx_cfg_reg;
3545 int ret;
3547 if (ap->state == ANEG_STATE_UNKNOWN) {
3548 ap->rxconfig = 0;
3549 ap->link_time = 0;
3550 ap->cur_time = 0;
3551 ap->ability_match_cfg = 0;
3552 ap->ability_match_count = 0;
3553 ap->ability_match = 0;
3554 ap->idle_match = 0;
3555 ap->ack_match = 0;
3557 ap->cur_time++;
3559 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3560 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3562 if (rx_cfg_reg != ap->ability_match_cfg) {
3563 ap->ability_match_cfg = rx_cfg_reg;
3564 ap->ability_match = 0;
3565 ap->ability_match_count = 0;
3566 } else {
3567 if (++ap->ability_match_count > 1) {
3568 ap->ability_match = 1;
3569 ap->ability_match_cfg = rx_cfg_reg;
3572 if (rx_cfg_reg & ANEG_CFG_ACK)
3573 ap->ack_match = 1;
3574 else
3575 ap->ack_match = 0;
3577 ap->idle_match = 0;
3578 } else {
3579 ap->idle_match = 1;
3580 ap->ability_match_cfg = 0;
3581 ap->ability_match_count = 0;
3582 ap->ability_match = 0;
3583 ap->ack_match = 0;
3585 rx_cfg_reg = 0;
3588 ap->rxconfig = rx_cfg_reg;
3589 ret = ANEG_OK;
3591 switch (ap->state) {
3592 case ANEG_STATE_UNKNOWN:
3593 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3594 ap->state = ANEG_STATE_AN_ENABLE;
3596 /* fallthru */
3597 case ANEG_STATE_AN_ENABLE:
3598 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3599 if (ap->flags & MR_AN_ENABLE) {
3600 ap->link_time = 0;
3601 ap->cur_time = 0;
3602 ap->ability_match_cfg = 0;
3603 ap->ability_match_count = 0;
3604 ap->ability_match = 0;
3605 ap->idle_match = 0;
3606 ap->ack_match = 0;
3608 ap->state = ANEG_STATE_RESTART_INIT;
3609 } else {
3610 ap->state = ANEG_STATE_DISABLE_LINK_OK;
3612 break;
3614 case ANEG_STATE_RESTART_INIT:
3615 ap->link_time = ap->cur_time;
3616 ap->flags &= ~(MR_NP_LOADED);
3617 ap->txconfig = 0;
3618 tw32(MAC_TX_AUTO_NEG, 0);
3619 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3620 tw32_f(MAC_MODE, tp->mac_mode);
3621 udelay(40);
3623 ret = ANEG_TIMER_ENAB;
3624 ap->state = ANEG_STATE_RESTART;
3626 /* fallthru */
3627 case ANEG_STATE_RESTART:
3628 delta = ap->cur_time - ap->link_time;
3629 if (delta > ANEG_STATE_SETTLE_TIME)
3630 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3631 else
3632 ret = ANEG_TIMER_ENAB;
3633 break;
3635 case ANEG_STATE_DISABLE_LINK_OK:
3636 ret = ANEG_DONE;
3637 break;
3639 case ANEG_STATE_ABILITY_DETECT_INIT:
3640 ap->flags &= ~(MR_TOGGLE_TX);
3641 ap->txconfig = ANEG_CFG_FD;
3642 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3643 if (flowctrl & ADVERTISE_1000XPAUSE)
3644 ap->txconfig |= ANEG_CFG_PS1;
3645 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3646 ap->txconfig |= ANEG_CFG_PS2;
3647 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3648 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3649 tw32_f(MAC_MODE, tp->mac_mode);
3650 udelay(40);
3652 ap->state = ANEG_STATE_ABILITY_DETECT;
3653 break;
3655 case ANEG_STATE_ABILITY_DETECT:
3656 if (ap->ability_match != 0 && ap->rxconfig != 0)
3657 ap->state = ANEG_STATE_ACK_DETECT_INIT;
3658 break;
3660 case ANEG_STATE_ACK_DETECT_INIT:
3661 ap->txconfig |= ANEG_CFG_ACK;
3662 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3663 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3664 tw32_f(MAC_MODE, tp->mac_mode);
3665 udelay(40);
3667 ap->state = ANEG_STATE_ACK_DETECT;
3669 /* fallthru */
3670 case ANEG_STATE_ACK_DETECT:
3671 if (ap->ack_match != 0) {
3672 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3673 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3674 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3675 } else {
3676 ap->state = ANEG_STATE_AN_ENABLE;
3678 } else if (ap->ability_match != 0 &&
3679 ap->rxconfig == 0) {
3680 ap->state = ANEG_STATE_AN_ENABLE;
3682 break;
3684 case ANEG_STATE_COMPLETE_ACK_INIT:
3685 if (ap->rxconfig & ANEG_CFG_INVAL) {
3686 ret = ANEG_FAILED;
3687 break;
3689 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3690 MR_LP_ADV_HALF_DUPLEX |
3691 MR_LP_ADV_SYM_PAUSE |
3692 MR_LP_ADV_ASYM_PAUSE |
3693 MR_LP_ADV_REMOTE_FAULT1 |
3694 MR_LP_ADV_REMOTE_FAULT2 |
3695 MR_LP_ADV_NEXT_PAGE |
3696 MR_TOGGLE_RX |
3697 MR_NP_RX);
3698 if (ap->rxconfig & ANEG_CFG_FD)
3699 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3700 if (ap->rxconfig & ANEG_CFG_HD)
3701 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3702 if (ap->rxconfig & ANEG_CFG_PS1)
3703 ap->flags |= MR_LP_ADV_SYM_PAUSE;
3704 if (ap->rxconfig & ANEG_CFG_PS2)
3705 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3706 if (ap->rxconfig & ANEG_CFG_RF1)
3707 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3708 if (ap->rxconfig & ANEG_CFG_RF2)
3709 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3710 if (ap->rxconfig & ANEG_CFG_NP)
3711 ap->flags |= MR_LP_ADV_NEXT_PAGE;
3713 ap->link_time = ap->cur_time;
3715 ap->flags ^= (MR_TOGGLE_TX);
3716 if (ap->rxconfig & 0x0008)
3717 ap->flags |= MR_TOGGLE_RX;
3718 if (ap->rxconfig & ANEG_CFG_NP)
3719 ap->flags |= MR_NP_RX;
3720 ap->flags |= MR_PAGE_RX;
3722 ap->state = ANEG_STATE_COMPLETE_ACK;
3723 ret = ANEG_TIMER_ENAB;
3724 break;
3726 case ANEG_STATE_COMPLETE_ACK:
3727 if (ap->ability_match != 0 &&
3728 ap->rxconfig == 0) {
3729 ap->state = ANEG_STATE_AN_ENABLE;
3730 break;
3732 delta = ap->cur_time - ap->link_time;
3733 if (delta > ANEG_STATE_SETTLE_TIME) {
3734 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3735 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3736 } else {
3737 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3738 !(ap->flags & MR_NP_RX)) {
3739 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3740 } else {
3741 ret = ANEG_FAILED;
3745 break;
3747 case ANEG_STATE_IDLE_DETECT_INIT:
3748 ap->link_time = ap->cur_time;
3749 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3750 tw32_f(MAC_MODE, tp->mac_mode);
3751 udelay(40);
3753 ap->state = ANEG_STATE_IDLE_DETECT;
3754 ret = ANEG_TIMER_ENAB;
3755 break;
3757 case ANEG_STATE_IDLE_DETECT:
3758 if (ap->ability_match != 0 &&
3759 ap->rxconfig == 0) {
3760 ap->state = ANEG_STATE_AN_ENABLE;
3761 break;
3763 delta = ap->cur_time - ap->link_time;
3764 if (delta > ANEG_STATE_SETTLE_TIME) {
3765 /* XXX another gem from the Broadcom driver :( */
3766 ap->state = ANEG_STATE_LINK_OK;
3768 break;
3770 case ANEG_STATE_LINK_OK:
3771 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3772 ret = ANEG_DONE;
3773 break;
3775 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3776 /* ??? unimplemented */
3777 break;
3779 case ANEG_STATE_NEXT_PAGE_WAIT:
3780 /* ??? unimplemented */
3781 break;
3783 default:
3784 ret = ANEG_FAILED;
3785 break;
3788 return ret;
3791 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3793 int res = 0;
3794 struct tg3_fiber_aneginfo aninfo;
3795 int status = ANEG_FAILED;
3796 unsigned int tick;
3797 u32 tmp;
3799 tw32_f(MAC_TX_AUTO_NEG, 0);
3801 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3802 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3803 udelay(40);
3805 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3806 udelay(40);
3808 memset(&aninfo, 0, sizeof(aninfo));
3809 aninfo.flags |= MR_AN_ENABLE;
3810 aninfo.state = ANEG_STATE_UNKNOWN;
3811 aninfo.cur_time = 0;
3812 tick = 0;
3813 while (++tick < 195000) {
3814 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3815 if (status == ANEG_DONE || status == ANEG_FAILED)
3816 break;
3818 udelay(1);
3821 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3822 tw32_f(MAC_MODE, tp->mac_mode);
3823 udelay(40);
3825 *txflags = aninfo.txconfig;
3826 *rxflags = aninfo.flags;
3828 if (status == ANEG_DONE &&
3829 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3830 MR_LP_ADV_FULL_DUPLEX)))
3831 res = 1;
3833 return res;
3836 static void tg3_init_bcm8002(struct tg3 *tp)
3838 u32 mac_status = tr32(MAC_STATUS);
3839 int i;
3841 /* Reset when initting first time or we have a link. */
3842 if (tg3_flag(tp, INIT_COMPLETE) &&
3843 !(mac_status & MAC_STATUS_PCS_SYNCED))
3844 return;
3846 /* Set PLL lock range. */
3847 tg3_writephy(tp, 0x16, 0x8007);
3849 /* SW reset */
3850 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3852 /* Wait for reset to complete. */
3853 /* XXX schedule_timeout() ... */
3854 for (i = 0; i < 500; i++)
3855 udelay(10);
3857 /* Config mode; select PMA/Ch 1 regs. */
3858 tg3_writephy(tp, 0x10, 0x8411);
3860 /* Enable auto-lock and comdet, select txclk for tx. */
3861 tg3_writephy(tp, 0x11, 0x0a10);
3863 tg3_writephy(tp, 0x18, 0x00a0);
3864 tg3_writephy(tp, 0x16, 0x41ff);
3866 /* Assert and deassert POR. */
3867 tg3_writephy(tp, 0x13, 0x0400);
3868 udelay(40);
3869 tg3_writephy(tp, 0x13, 0x0000);
3871 tg3_writephy(tp, 0x11, 0x0a50);
3872 udelay(40);
3873 tg3_writephy(tp, 0x11, 0x0a10);
3875 /* Wait for signal to stabilize */
3876 /* XXX schedule_timeout() ... */
3877 for (i = 0; i < 15000; i++)
3878 udelay(10);
3880 /* Deselect the channel register so we can read the PHYID
3881 * later.
3883 tg3_writephy(tp, 0x10, 0x8011);
3886 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3888 u16 flowctrl;
3889 u32 sg_dig_ctrl, sg_dig_status;
3890 u32 serdes_cfg, expected_sg_dig_ctrl;
3891 int workaround, port_a;
3892 int current_link_up;
3894 serdes_cfg = 0;
3895 expected_sg_dig_ctrl = 0;
3896 workaround = 0;
3897 port_a = 1;
3898 current_link_up = 0;
3900 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3901 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3902 workaround = 1;
3903 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3904 port_a = 0;
3906 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3907 /* preserve bits 20-23 for voltage regulator */
3908 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3911 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3913 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3914 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3915 if (workaround) {
3916 u32 val = serdes_cfg;
3918 if (port_a)
3919 val |= 0xc010000;
3920 else
3921 val |= 0x4010000;
3922 tw32_f(MAC_SERDES_CFG, val);
3925 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3927 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3928 tg3_setup_flow_control(tp, 0, 0);
3929 current_link_up = 1;
3931 goto out;
3934 /* Want auto-negotiation. */
3935 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3937 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3938 if (flowctrl & ADVERTISE_1000XPAUSE)
3939 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3940 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3941 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3943 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3944 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
3945 tp->serdes_counter &&
3946 ((mac_status & (MAC_STATUS_PCS_SYNCED |
3947 MAC_STATUS_RCVD_CFG)) ==
3948 MAC_STATUS_PCS_SYNCED)) {
3949 tp->serdes_counter--;
3950 current_link_up = 1;
3951 goto out;
3953 restart_autoneg:
3954 if (workaround)
3955 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3956 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3957 udelay(5);
3958 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3960 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3961 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3962 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3963 MAC_STATUS_SIGNAL_DET)) {
3964 sg_dig_status = tr32(SG_DIG_STATUS);
3965 mac_status = tr32(MAC_STATUS);
3967 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3968 (mac_status & MAC_STATUS_PCS_SYNCED)) {
3969 u32 local_adv = 0, remote_adv = 0;
3971 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3972 local_adv |= ADVERTISE_1000XPAUSE;
3973 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3974 local_adv |= ADVERTISE_1000XPSE_ASYM;
3976 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3977 remote_adv |= LPA_1000XPAUSE;
3978 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3979 remote_adv |= LPA_1000XPAUSE_ASYM;
3981 tg3_setup_flow_control(tp, local_adv, remote_adv);
3982 current_link_up = 1;
3983 tp->serdes_counter = 0;
3984 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3985 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3986 if (tp->serdes_counter)
3987 tp->serdes_counter--;
3988 else {
3989 if (workaround) {
3990 u32 val = serdes_cfg;
3992 if (port_a)
3993 val |= 0xc010000;
3994 else
3995 val |= 0x4010000;
3997 tw32_f(MAC_SERDES_CFG, val);
4000 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4001 udelay(40);
4003 /* Link parallel detection - link is up */
4004 /* only if we have PCS_SYNC and not */
4005 /* receiving config code words */
4006 mac_status = tr32(MAC_STATUS);
4007 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4008 !(mac_status & MAC_STATUS_RCVD_CFG)) {
4009 tg3_setup_flow_control(tp, 0, 0);
4010 current_link_up = 1;
4011 tp->phy_flags |=
4012 TG3_PHYFLG_PARALLEL_DETECT;
4013 tp->serdes_counter =
4014 SERDES_PARALLEL_DET_TIMEOUT;
4015 } else
4016 goto restart_autoneg;
4019 } else {
4020 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4021 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4024 out:
4025 return current_link_up;
4028 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4030 int current_link_up = 0;
4032 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4033 goto out;
4035 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4036 u32 txflags, rxflags;
4037 int i;
4039 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4040 u32 local_adv = 0, remote_adv = 0;
4042 if (txflags & ANEG_CFG_PS1)
4043 local_adv |= ADVERTISE_1000XPAUSE;
4044 if (txflags & ANEG_CFG_PS2)
4045 local_adv |= ADVERTISE_1000XPSE_ASYM;
4047 if (rxflags & MR_LP_ADV_SYM_PAUSE)
4048 remote_adv |= LPA_1000XPAUSE;
4049 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4050 remote_adv |= LPA_1000XPAUSE_ASYM;
4052 tg3_setup_flow_control(tp, local_adv, remote_adv);
4054 current_link_up = 1;
4056 for (i = 0; i < 30; i++) {
4057 udelay(20);
4058 tw32_f(MAC_STATUS,
4059 (MAC_STATUS_SYNC_CHANGED |
4060 MAC_STATUS_CFG_CHANGED));
4061 udelay(40);
4062 if ((tr32(MAC_STATUS) &
4063 (MAC_STATUS_SYNC_CHANGED |
4064 MAC_STATUS_CFG_CHANGED)) == 0)
4065 break;
4068 mac_status = tr32(MAC_STATUS);
4069 if (current_link_up == 0 &&
4070 (mac_status & MAC_STATUS_PCS_SYNCED) &&
4071 !(mac_status & MAC_STATUS_RCVD_CFG))
4072 current_link_up = 1;
4073 } else {
4074 tg3_setup_flow_control(tp, 0, 0);
4076 /* Forcing 1000FD link up. */
4077 current_link_up = 1;
4079 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4080 udelay(40);
4082 tw32_f(MAC_MODE, tp->mac_mode);
4083 udelay(40);
4086 out:
4087 return current_link_up;
4090 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4092 u32 orig_pause_cfg;
4093 u16 orig_active_speed;
4094 u8 orig_active_duplex;
4095 u32 mac_status;
4096 int current_link_up;
4097 int i;
4099 orig_pause_cfg = tp->link_config.active_flowctrl;
4100 orig_active_speed = tp->link_config.active_speed;
4101 orig_active_duplex = tp->link_config.active_duplex;
4103 if (!tg3_flag(tp, HW_AUTONEG) &&
4104 netif_carrier_ok(tp->dev) &&
4105 tg3_flag(tp, INIT_COMPLETE)) {
4106 mac_status = tr32(MAC_STATUS);
4107 mac_status &= (MAC_STATUS_PCS_SYNCED |
4108 MAC_STATUS_SIGNAL_DET |
4109 MAC_STATUS_CFG_CHANGED |
4110 MAC_STATUS_RCVD_CFG);
4111 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4112 MAC_STATUS_SIGNAL_DET)) {
4113 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4114 MAC_STATUS_CFG_CHANGED));
4115 return 0;
4119 tw32_f(MAC_TX_AUTO_NEG, 0);
4121 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4122 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4123 tw32_f(MAC_MODE, tp->mac_mode);
4124 udelay(40);
4126 if (tp->phy_id == TG3_PHY_ID_BCM8002)
4127 tg3_init_bcm8002(tp);
4129 /* Enable link change event even when serdes polling. */
4130 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4131 udelay(40);
4133 current_link_up = 0;
4134 mac_status = tr32(MAC_STATUS);
4136 if (tg3_flag(tp, HW_AUTONEG))
4137 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4138 else
4139 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4141 tp->napi[0].hw_status->status =
4142 (SD_STATUS_UPDATED |
4143 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4145 for (i = 0; i < 100; i++) {
4146 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4147 MAC_STATUS_CFG_CHANGED));
4148 udelay(5);
4149 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4150 MAC_STATUS_CFG_CHANGED |
4151 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4152 break;
4155 mac_status = tr32(MAC_STATUS);
4156 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4157 current_link_up = 0;
4158 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4159 tp->serdes_counter == 0) {
4160 tw32_f(MAC_MODE, (tp->mac_mode |
4161 MAC_MODE_SEND_CONFIGS));
4162 udelay(1);
4163 tw32_f(MAC_MODE, tp->mac_mode);
4167 if (current_link_up == 1) {
4168 tp->link_config.active_speed = SPEED_1000;
4169 tp->link_config.active_duplex = DUPLEX_FULL;
4170 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4171 LED_CTRL_LNKLED_OVERRIDE |
4172 LED_CTRL_1000MBPS_ON));
4173 } else {
4174 tp->link_config.active_speed = SPEED_INVALID;
4175 tp->link_config.active_duplex = DUPLEX_INVALID;
4176 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4177 LED_CTRL_LNKLED_OVERRIDE |
4178 LED_CTRL_TRAFFIC_OVERRIDE));
4181 if (current_link_up != netif_carrier_ok(tp->dev)) {
4182 if (current_link_up)
4183 netif_carrier_on(tp->dev);
4184 else
4185 netif_carrier_off(tp->dev);
4186 tg3_link_report(tp);
4187 } else {
4188 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4189 if (orig_pause_cfg != now_pause_cfg ||
4190 orig_active_speed != tp->link_config.active_speed ||
4191 orig_active_duplex != tp->link_config.active_duplex)
4192 tg3_link_report(tp);
4195 return 0;
4198 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4200 int current_link_up, err = 0;
4201 u32 bmsr, bmcr;
4202 u16 current_speed;
4203 u8 current_duplex;
4204 u32 local_adv, remote_adv;
4206 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4207 tw32_f(MAC_MODE, tp->mac_mode);
4208 udelay(40);
4210 tw32(MAC_EVENT, 0);
4212 tw32_f(MAC_STATUS,
4213 (MAC_STATUS_SYNC_CHANGED |
4214 MAC_STATUS_CFG_CHANGED |
4215 MAC_STATUS_MI_COMPLETION |
4216 MAC_STATUS_LNKSTATE_CHANGED));
4217 udelay(40);
4219 if (force_reset)
4220 tg3_phy_reset(tp);
4222 current_link_up = 0;
4223 current_speed = SPEED_INVALID;
4224 current_duplex = DUPLEX_INVALID;
4226 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4227 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4228 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4229 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4230 bmsr |= BMSR_LSTATUS;
4231 else
4232 bmsr &= ~BMSR_LSTATUS;
4235 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4237 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4238 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4239 /* do nothing, just check for link up at the end */
4240 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4241 u32 adv, new_adv;
4243 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4244 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4245 ADVERTISE_1000XPAUSE |
4246 ADVERTISE_1000XPSE_ASYM |
4247 ADVERTISE_SLCT);
4249 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4251 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4252 new_adv |= ADVERTISE_1000XHALF;
4253 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4254 new_adv |= ADVERTISE_1000XFULL;
4256 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4257 tg3_writephy(tp, MII_ADVERTISE, new_adv);
4258 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4259 tg3_writephy(tp, MII_BMCR, bmcr);
4261 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4262 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4263 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4265 return err;
4267 } else {
4268 u32 new_bmcr;
4270 bmcr &= ~BMCR_SPEED1000;
4271 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4273 if (tp->link_config.duplex == DUPLEX_FULL)
4274 new_bmcr |= BMCR_FULLDPLX;
4276 if (new_bmcr != bmcr) {
4277 /* BMCR_SPEED1000 is a reserved bit that needs
4278 * to be set on write.
4280 new_bmcr |= BMCR_SPEED1000;
4282 /* Force a linkdown */
4283 if (netif_carrier_ok(tp->dev)) {
4284 u32 adv;
4286 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4287 adv &= ~(ADVERTISE_1000XFULL |
4288 ADVERTISE_1000XHALF |
4289 ADVERTISE_SLCT);
4290 tg3_writephy(tp, MII_ADVERTISE, adv);
4291 tg3_writephy(tp, MII_BMCR, bmcr |
4292 BMCR_ANRESTART |
4293 BMCR_ANENABLE);
4294 udelay(10);
4295 netif_carrier_off(tp->dev);
4297 tg3_writephy(tp, MII_BMCR, new_bmcr);
4298 bmcr = new_bmcr;
4299 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4300 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4301 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4302 ASIC_REV_5714) {
4303 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4304 bmsr |= BMSR_LSTATUS;
4305 else
4306 bmsr &= ~BMSR_LSTATUS;
4308 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4312 if (bmsr & BMSR_LSTATUS) {
4313 current_speed = SPEED_1000;
4314 current_link_up = 1;
4315 if (bmcr & BMCR_FULLDPLX)
4316 current_duplex = DUPLEX_FULL;
4317 else
4318 current_duplex = DUPLEX_HALF;
4320 local_adv = 0;
4321 remote_adv = 0;
4323 if (bmcr & BMCR_ANENABLE) {
4324 u32 common;
4326 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4327 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4328 common = local_adv & remote_adv;
4329 if (common & (ADVERTISE_1000XHALF |
4330 ADVERTISE_1000XFULL)) {
4331 if (common & ADVERTISE_1000XFULL)
4332 current_duplex = DUPLEX_FULL;
4333 else
4334 current_duplex = DUPLEX_HALF;
4335 } else if (!tg3_flag(tp, 5780_CLASS)) {
4336 /* Link is up via parallel detect */
4337 } else {
4338 current_link_up = 0;
4343 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4344 tg3_setup_flow_control(tp, local_adv, remote_adv);
4346 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4347 if (tp->link_config.active_duplex == DUPLEX_HALF)
4348 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4350 tw32_f(MAC_MODE, tp->mac_mode);
4351 udelay(40);
4353 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4355 tp->link_config.active_speed = current_speed;
4356 tp->link_config.active_duplex = current_duplex;
4358 if (current_link_up != netif_carrier_ok(tp->dev)) {
4359 if (current_link_up)
4360 netif_carrier_on(tp->dev);
4361 else {
4362 netif_carrier_off(tp->dev);
4363 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4365 tg3_link_report(tp);
4367 return err;
4370 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4372 if (tp->serdes_counter) {
4373 /* Give autoneg time to complete. */
4374 tp->serdes_counter--;
4375 return;
4378 if (!netif_carrier_ok(tp->dev) &&
4379 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4380 u32 bmcr;
4382 tg3_readphy(tp, MII_BMCR, &bmcr);
4383 if (bmcr & BMCR_ANENABLE) {
4384 u32 phy1, phy2;
4386 /* Select shadow register 0x1f */
4387 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
4388 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
4390 /* Select expansion interrupt status register */
4391 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4392 MII_TG3_DSP_EXP1_INT_STAT);
4393 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4394 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4396 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4397 /* We have signal detect and not receiving
4398 * config code words, link is up by parallel
4399 * detection.
4402 bmcr &= ~BMCR_ANENABLE;
4403 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4404 tg3_writephy(tp, MII_BMCR, bmcr);
4405 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
4408 } else if (netif_carrier_ok(tp->dev) &&
4409 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4410 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4411 u32 phy2;
4413 /* Select expansion interrupt status register */
4414 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4415 MII_TG3_DSP_EXP1_INT_STAT);
4416 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4417 if (phy2 & 0x20) {
4418 u32 bmcr;
4420 /* Config code words received, turn on autoneg. */
4421 tg3_readphy(tp, MII_BMCR, &bmcr);
4422 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4424 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4430 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4432 u32 val;
4433 int err;
4435 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
4436 err = tg3_setup_fiber_phy(tp, force_reset);
4437 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4438 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4439 else
4440 err = tg3_setup_copper_phy(tp, force_reset);
4442 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4443 u32 scale;
4445 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4446 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4447 scale = 65;
4448 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4449 scale = 6;
4450 else
4451 scale = 12;
4453 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4454 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4455 tw32(GRC_MISC_CFG, val);
4458 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4459 (6 << TX_LENGTHS_IPG_SHIFT);
4460 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
4461 val |= tr32(MAC_TX_LENGTHS) &
4462 (TX_LENGTHS_JMB_FRM_LEN_MSK |
4463 TX_LENGTHS_CNT_DWN_VAL_MSK);
4465 if (tp->link_config.active_speed == SPEED_1000 &&
4466 tp->link_config.active_duplex == DUPLEX_HALF)
4467 tw32(MAC_TX_LENGTHS, val |
4468 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
4469 else
4470 tw32(MAC_TX_LENGTHS, val |
4471 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
4473 if (!tg3_flag(tp, 5705_PLUS)) {
4474 if (netif_carrier_ok(tp->dev)) {
4475 tw32(HOSTCC_STAT_COAL_TICKS,
4476 tp->coal.stats_block_coalesce_usecs);
4477 } else {
4478 tw32(HOSTCC_STAT_COAL_TICKS, 0);
4482 if (tg3_flag(tp, ASPM_WORKAROUND)) {
4483 val = tr32(PCIE_PWR_MGMT_THRESH);
4484 if (!netif_carrier_ok(tp->dev))
4485 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4486 tp->pwrmgmt_thresh;
4487 else
4488 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4489 tw32(PCIE_PWR_MGMT_THRESH, val);
4492 return err;
4495 static inline int tg3_irq_sync(struct tg3 *tp)
4497 return tp->irq_sync;
4500 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
4502 int i;
4504 dst = (u32 *)((u8 *)dst + off);
4505 for (i = 0; i < len; i += sizeof(u32))
4506 *dst++ = tr32(off + i);
4509 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
4511 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
4512 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
4513 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
4514 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
4515 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
4516 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
4517 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
4518 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
4519 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
4520 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
4521 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
4522 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
4523 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
4524 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
4525 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
4526 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
4527 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
4528 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
4529 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
4531 if (tg3_flag(tp, SUPPORT_MSIX))
4532 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
4534 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
4535 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
4536 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
4537 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
4538 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
4539 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
4540 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
4541 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
4543 if (!tg3_flag(tp, 5705_PLUS)) {
4544 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
4545 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
4546 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
4549 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
4550 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
4551 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
4552 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
4553 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
4555 if (tg3_flag(tp, NVRAM))
4556 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
4559 static void tg3_dump_state(struct tg3 *tp)
4561 int i;
4562 u32 *regs;
4564 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
4565 if (!regs) {
4566 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
4567 return;
4570 if (tg3_flag(tp, PCI_EXPRESS)) {
4571 /* Read up to but not including private PCI registers */
4572 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
4573 regs[i / sizeof(u32)] = tr32(i);
4574 } else
4575 tg3_dump_legacy_regs(tp, regs);
4577 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
4578 if (!regs[i + 0] && !regs[i + 1] &&
4579 !regs[i + 2] && !regs[i + 3])
4580 continue;
4582 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
4583 i * 4,
4584 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
4587 kfree(regs);
4589 for (i = 0; i < tp->irq_cnt; i++) {
4590 struct tg3_napi *tnapi = &tp->napi[i];
4592 /* SW status block */
4593 netdev_err(tp->dev,
4594 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
4596 tnapi->hw_status->status,
4597 tnapi->hw_status->status_tag,
4598 tnapi->hw_status->rx_jumbo_consumer,
4599 tnapi->hw_status->rx_consumer,
4600 tnapi->hw_status->rx_mini_consumer,
4601 tnapi->hw_status->idx[0].rx_producer,
4602 tnapi->hw_status->idx[0].tx_consumer);
4604 netdev_err(tp->dev,
4605 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
4607 tnapi->last_tag, tnapi->last_irq_tag,
4608 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
4609 tnapi->rx_rcb_ptr,
4610 tnapi->prodring.rx_std_prod_idx,
4611 tnapi->prodring.rx_std_cons_idx,
4612 tnapi->prodring.rx_jmb_prod_idx,
4613 tnapi->prodring.rx_jmb_cons_idx);
4617 /* This is called whenever we suspect that the system chipset is re-
4618 * ordering the sequence of MMIO to the tx send mailbox. The symptom
4619 * is bogus tx completions. We try to recover by setting the
4620 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4621 * in the workqueue.
4623 static void tg3_tx_recover(struct tg3 *tp)
4625 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
4626 tp->write32_tx_mbox == tg3_write_indirect_mbox);
4628 netdev_warn(tp->dev,
4629 "The system may be re-ordering memory-mapped I/O "
4630 "cycles to the network device, attempting to recover. "
4631 "Please report the problem to the driver maintainer "
4632 "and include system chipset information.\n");
4634 spin_lock(&tp->lock);
4635 tg3_flag_set(tp, TX_RECOVERY_PENDING);
4636 spin_unlock(&tp->lock);
4639 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4641 /* Tell compiler to fetch tx indices from memory. */
4642 barrier();
4643 return tnapi->tx_pending -
4644 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4647 /* Tigon3 never reports partial packet sends. So we do not
4648 * need special logic to handle SKBs that have not had all
4649 * of their frags sent yet, like SunGEM does.
4651 static void tg3_tx(struct tg3_napi *tnapi)
4653 struct tg3 *tp = tnapi->tp;
4654 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4655 u32 sw_idx = tnapi->tx_cons;
4656 struct netdev_queue *txq;
4657 int index = tnapi - tp->napi;
4659 if (tg3_flag(tp, ENABLE_TSS))
4660 index--;
4662 txq = netdev_get_tx_queue(tp->dev, index);
4664 while (sw_idx != hw_idx) {
4665 struct ring_info *ri = &tnapi->tx_buffers[sw_idx];
4666 struct sk_buff *skb = ri->skb;
4667 int i, tx_bug = 0;
4669 if (unlikely(skb == NULL)) {
4670 tg3_tx_recover(tp);
4671 return;
4674 pci_unmap_single(tp->pdev,
4675 dma_unmap_addr(ri, mapping),
4676 skb_headlen(skb),
4677 PCI_DMA_TODEVICE);
4679 ri->skb = NULL;
4681 sw_idx = NEXT_TX(sw_idx);
4683 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4684 ri = &tnapi->tx_buffers[sw_idx];
4685 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4686 tx_bug = 1;
4688 pci_unmap_page(tp->pdev,
4689 dma_unmap_addr(ri, mapping),
4690 skb_shinfo(skb)->frags[i].size,
4691 PCI_DMA_TODEVICE);
4692 sw_idx = NEXT_TX(sw_idx);
4695 dev_kfree_skb(skb);
4697 if (unlikely(tx_bug)) {
4698 tg3_tx_recover(tp);
4699 return;
4703 tnapi->tx_cons = sw_idx;
4705 /* Need to make the tx_cons update visible to tg3_start_xmit()
4706 * before checking for netif_queue_stopped(). Without the
4707 * memory barrier, there is a small possibility that tg3_start_xmit()
4708 * will miss it and cause the queue to be stopped forever.
4710 smp_mb();
4712 if (unlikely(netif_tx_queue_stopped(txq) &&
4713 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4714 __netif_tx_lock(txq, smp_processor_id());
4715 if (netif_tx_queue_stopped(txq) &&
4716 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4717 netif_tx_wake_queue(txq);
4718 __netif_tx_unlock(txq);
4722 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4724 if (!ri->skb)
4725 return;
4727 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
4728 map_sz, PCI_DMA_FROMDEVICE);
4729 dev_kfree_skb_any(ri->skb);
4730 ri->skb = NULL;
4733 /* Returns size of skb allocated or < 0 on error.
4735 * We only need to fill in the address because the other members
4736 * of the RX descriptor are invariant, see tg3_init_rings.
4738 * Note the purposeful assymetry of cpu vs. chip accesses. For
4739 * posting buffers we only dirty the first cache line of the RX
4740 * descriptor (containing the address). Whereas for the RX status
4741 * buffers the cpu only reads the last cacheline of the RX descriptor
4742 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4744 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4745 u32 opaque_key, u32 dest_idx_unmasked)
4747 struct tg3_rx_buffer_desc *desc;
4748 struct ring_info *map;
4749 struct sk_buff *skb;
4750 dma_addr_t mapping;
4751 int skb_size, dest_idx;
4753 switch (opaque_key) {
4754 case RXD_OPAQUE_RING_STD:
4755 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4756 desc = &tpr->rx_std[dest_idx];
4757 map = &tpr->rx_std_buffers[dest_idx];
4758 skb_size = tp->rx_pkt_map_sz;
4759 break;
4761 case RXD_OPAQUE_RING_JUMBO:
4762 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4763 desc = &tpr->rx_jmb[dest_idx].std;
4764 map = &tpr->rx_jmb_buffers[dest_idx];
4765 skb_size = TG3_RX_JMB_MAP_SZ;
4766 break;
4768 default:
4769 return -EINVAL;
4772 /* Do not overwrite any of the map or rp information
4773 * until we are sure we can commit to a new buffer.
4775 * Callers depend upon this behavior and assume that
4776 * we leave everything unchanged if we fail.
4778 skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
4779 if (skb == NULL)
4780 return -ENOMEM;
4782 skb_reserve(skb, tp->rx_offset);
4784 mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4785 PCI_DMA_FROMDEVICE);
4786 if (pci_dma_mapping_error(tp->pdev, mapping)) {
4787 dev_kfree_skb(skb);
4788 return -EIO;
4791 map->skb = skb;
4792 dma_unmap_addr_set(map, mapping, mapping);
4794 desc->addr_hi = ((u64)mapping >> 32);
4795 desc->addr_lo = ((u64)mapping & 0xffffffff);
4797 return skb_size;
4800 /* We only need to move over in the address because the other
4801 * members of the RX descriptor are invariant. See notes above
4802 * tg3_alloc_rx_skb for full details.
4804 static void tg3_recycle_rx(struct tg3_napi *tnapi,
4805 struct tg3_rx_prodring_set *dpr,
4806 u32 opaque_key, int src_idx,
4807 u32 dest_idx_unmasked)
4809 struct tg3 *tp = tnapi->tp;
4810 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4811 struct ring_info *src_map, *dest_map;
4812 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
4813 int dest_idx;
4815 switch (opaque_key) {
4816 case RXD_OPAQUE_RING_STD:
4817 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4818 dest_desc = &dpr->rx_std[dest_idx];
4819 dest_map = &dpr->rx_std_buffers[dest_idx];
4820 src_desc = &spr->rx_std[src_idx];
4821 src_map = &spr->rx_std_buffers[src_idx];
4822 break;
4824 case RXD_OPAQUE_RING_JUMBO:
4825 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4826 dest_desc = &dpr->rx_jmb[dest_idx].std;
4827 dest_map = &dpr->rx_jmb_buffers[dest_idx];
4828 src_desc = &spr->rx_jmb[src_idx].std;
4829 src_map = &spr->rx_jmb_buffers[src_idx];
4830 break;
4832 default:
4833 return;
4836 dest_map->skb = src_map->skb;
4837 dma_unmap_addr_set(dest_map, mapping,
4838 dma_unmap_addr(src_map, mapping));
4839 dest_desc->addr_hi = src_desc->addr_hi;
4840 dest_desc->addr_lo = src_desc->addr_lo;
4842 /* Ensure that the update to the skb happens after the physical
4843 * addresses have been transferred to the new BD location.
4845 smp_wmb();
4847 src_map->skb = NULL;
4850 /* The RX ring scheme is composed of multiple rings which post fresh
4851 * buffers to the chip, and one special ring the chip uses to report
4852 * status back to the host.
4854 * The special ring reports the status of received packets to the
4855 * host. The chip does not write into the original descriptor the
4856 * RX buffer was obtained from. The chip simply takes the original
4857 * descriptor as provided by the host, updates the status and length
4858 * field, then writes this into the next status ring entry.
4860 * Each ring the host uses to post buffers to the chip is described
4861 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
4862 * it is first placed into the on-chip ram. When the packet's length
4863 * is known, it walks down the TG3_BDINFO entries to select the ring.
4864 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4865 * which is within the range of the new packet's length is chosen.
4867 * The "separate ring for rx status" scheme may sound queer, but it makes
4868 * sense from a cache coherency perspective. If only the host writes
4869 * to the buffer post rings, and only the chip writes to the rx status
4870 * rings, then cache lines never move beyond shared-modified state.
4871 * If both the host and chip were to write into the same ring, cache line
4872 * eviction could occur since both entities want it in an exclusive state.
4874 static int tg3_rx(struct tg3_napi *tnapi, int budget)
4876 struct tg3 *tp = tnapi->tp;
4877 u32 work_mask, rx_std_posted = 0;
4878 u32 std_prod_idx, jmb_prod_idx;
4879 u32 sw_idx = tnapi->rx_rcb_ptr;
4880 u16 hw_idx;
4881 int received;
4882 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
4884 hw_idx = *(tnapi->rx_rcb_prod_idx);
4886 * We need to order the read of hw_idx and the read of
4887 * the opaque cookie.
4889 rmb();
4890 work_mask = 0;
4891 received = 0;
4892 std_prod_idx = tpr->rx_std_prod_idx;
4893 jmb_prod_idx = tpr->rx_jmb_prod_idx;
4894 while (sw_idx != hw_idx && budget > 0) {
4895 struct ring_info *ri;
4896 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
4897 unsigned int len;
4898 struct sk_buff *skb;
4899 dma_addr_t dma_addr;
4900 u32 opaque_key, desc_idx, *post_ptr;
4902 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4903 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4904 if (opaque_key == RXD_OPAQUE_RING_STD) {
4905 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
4906 dma_addr = dma_unmap_addr(ri, mapping);
4907 skb = ri->skb;
4908 post_ptr = &std_prod_idx;
4909 rx_std_posted++;
4910 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4911 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
4912 dma_addr = dma_unmap_addr(ri, mapping);
4913 skb = ri->skb;
4914 post_ptr = &jmb_prod_idx;
4915 } else
4916 goto next_pkt_nopost;
4918 work_mask |= opaque_key;
4920 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4921 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4922 drop_it:
4923 tg3_recycle_rx(tnapi, tpr, opaque_key,
4924 desc_idx, *post_ptr);
4925 drop_it_no_recycle:
4926 /* Other statistics kept track of by card. */
4927 tp->rx_dropped++;
4928 goto next_pkt;
4931 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
4932 ETH_FCS_LEN;
4934 if (len > TG3_RX_COPY_THRESH(tp)) {
4935 int skb_size;
4937 skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
4938 *post_ptr);
4939 if (skb_size < 0)
4940 goto drop_it;
4942 pci_unmap_single(tp->pdev, dma_addr, skb_size,
4943 PCI_DMA_FROMDEVICE);
4945 /* Ensure that the update to the skb happens
4946 * after the usage of the old DMA mapping.
4948 smp_wmb();
4950 ri->skb = NULL;
4952 skb_put(skb, len);
4953 } else {
4954 struct sk_buff *copy_skb;
4956 tg3_recycle_rx(tnapi, tpr, opaque_key,
4957 desc_idx, *post_ptr);
4959 copy_skb = netdev_alloc_skb(tp->dev, len +
4960 TG3_RAW_IP_ALIGN);
4961 if (copy_skb == NULL)
4962 goto drop_it_no_recycle;
4964 skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
4965 skb_put(copy_skb, len);
4966 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4967 skb_copy_from_linear_data(skb, copy_skb->data, len);
4968 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4970 /* We'll reuse the original ring buffer. */
4971 skb = copy_skb;
4974 if ((tp->dev->features & NETIF_F_RXCSUM) &&
4975 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4976 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4977 >> RXD_TCPCSUM_SHIFT) == 0xffff))
4978 skb->ip_summed = CHECKSUM_UNNECESSARY;
4979 else
4980 skb_checksum_none_assert(skb);
4982 skb->protocol = eth_type_trans(skb, tp->dev);
4984 if (len > (tp->dev->mtu + ETH_HLEN) &&
4985 skb->protocol != htons(ETH_P_8021Q)) {
4986 dev_kfree_skb(skb);
4987 goto drop_it_no_recycle;
4990 if (desc->type_flags & RXD_FLAG_VLAN &&
4991 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
4992 __vlan_hwaccel_put_tag(skb,
4993 desc->err_vlan & RXD_VLAN_MASK);
4995 napi_gro_receive(&tnapi->napi, skb);
4997 received++;
4998 budget--;
5000 next_pkt:
5001 (*post_ptr)++;
5003 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5004 tpr->rx_std_prod_idx = std_prod_idx &
5005 tp->rx_std_ring_mask;
5006 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5007 tpr->rx_std_prod_idx);
5008 work_mask &= ~RXD_OPAQUE_RING_STD;
5009 rx_std_posted = 0;
5011 next_pkt_nopost:
5012 sw_idx++;
5013 sw_idx &= tp->rx_ret_ring_mask;
5015 /* Refresh hw_idx to see if there is new work */
5016 if (sw_idx == hw_idx) {
5017 hw_idx = *(tnapi->rx_rcb_prod_idx);
5018 rmb();
5022 /* ACK the status ring. */
5023 tnapi->rx_rcb_ptr = sw_idx;
5024 tw32_rx_mbox(tnapi->consmbox, sw_idx);
5026 /* Refill RX ring(s). */
5027 if (!tg3_flag(tp, ENABLE_RSS)) {
5028 if (work_mask & RXD_OPAQUE_RING_STD) {
5029 tpr->rx_std_prod_idx = std_prod_idx &
5030 tp->rx_std_ring_mask;
5031 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5032 tpr->rx_std_prod_idx);
5034 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5035 tpr->rx_jmb_prod_idx = jmb_prod_idx &
5036 tp->rx_jmb_ring_mask;
5037 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5038 tpr->rx_jmb_prod_idx);
5040 mmiowb();
5041 } else if (work_mask) {
5042 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5043 * updated before the producer indices can be updated.
5045 smp_wmb();
5047 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5048 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5050 if (tnapi != &tp->napi[1])
5051 napi_schedule(&tp->napi[1].napi);
5054 return received;
5057 static void tg3_poll_link(struct tg3 *tp)
5059 /* handle link change and other phy events */
5060 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5061 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5063 if (sblk->status & SD_STATUS_LINK_CHG) {
5064 sblk->status = SD_STATUS_UPDATED |
5065 (sblk->status & ~SD_STATUS_LINK_CHG);
5066 spin_lock(&tp->lock);
5067 if (tg3_flag(tp, USE_PHYLIB)) {
5068 tw32_f(MAC_STATUS,
5069 (MAC_STATUS_SYNC_CHANGED |
5070 MAC_STATUS_CFG_CHANGED |
5071 MAC_STATUS_MI_COMPLETION |
5072 MAC_STATUS_LNKSTATE_CHANGED));
5073 udelay(40);
5074 } else
5075 tg3_setup_phy(tp, 0);
5076 spin_unlock(&tp->lock);
5081 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5082 struct tg3_rx_prodring_set *dpr,
5083 struct tg3_rx_prodring_set *spr)
5085 u32 si, di, cpycnt, src_prod_idx;
5086 int i, err = 0;
5088 while (1) {
5089 src_prod_idx = spr->rx_std_prod_idx;
5091 /* Make sure updates to the rx_std_buffers[] entries and the
5092 * standard producer index are seen in the correct order.
5094 smp_rmb();
5096 if (spr->rx_std_cons_idx == src_prod_idx)
5097 break;
5099 if (spr->rx_std_cons_idx < src_prod_idx)
5100 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
5101 else
5102 cpycnt = tp->rx_std_ring_mask + 1 -
5103 spr->rx_std_cons_idx;
5105 cpycnt = min(cpycnt,
5106 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
5108 si = spr->rx_std_cons_idx;
5109 di = dpr->rx_std_prod_idx;
5111 for (i = di; i < di + cpycnt; i++) {
5112 if (dpr->rx_std_buffers[i].skb) {
5113 cpycnt = i - di;
5114 err = -ENOSPC;
5115 break;
5119 if (!cpycnt)
5120 break;
5122 /* Ensure that updates to the rx_std_buffers ring and the
5123 * shadowed hardware producer ring from tg3_recycle_skb() are
5124 * ordered correctly WRT the skb check above.
5126 smp_rmb();
5128 memcpy(&dpr->rx_std_buffers[di],
5129 &spr->rx_std_buffers[si],
5130 cpycnt * sizeof(struct ring_info));
5132 for (i = 0; i < cpycnt; i++, di++, si++) {
5133 struct tg3_rx_buffer_desc *sbd, *dbd;
5134 sbd = &spr->rx_std[si];
5135 dbd = &dpr->rx_std[di];
5136 dbd->addr_hi = sbd->addr_hi;
5137 dbd->addr_lo = sbd->addr_lo;
5140 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
5141 tp->rx_std_ring_mask;
5142 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
5143 tp->rx_std_ring_mask;
5146 while (1) {
5147 src_prod_idx = spr->rx_jmb_prod_idx;
5149 /* Make sure updates to the rx_jmb_buffers[] entries and
5150 * the jumbo producer index are seen in the correct order.
5152 smp_rmb();
5154 if (spr->rx_jmb_cons_idx == src_prod_idx)
5155 break;
5157 if (spr->rx_jmb_cons_idx < src_prod_idx)
5158 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
5159 else
5160 cpycnt = tp->rx_jmb_ring_mask + 1 -
5161 spr->rx_jmb_cons_idx;
5163 cpycnt = min(cpycnt,
5164 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
5166 si = spr->rx_jmb_cons_idx;
5167 di = dpr->rx_jmb_prod_idx;
5169 for (i = di; i < di + cpycnt; i++) {
5170 if (dpr->rx_jmb_buffers[i].skb) {
5171 cpycnt = i - di;
5172 err = -ENOSPC;
5173 break;
5177 if (!cpycnt)
5178 break;
5180 /* Ensure that updates to the rx_jmb_buffers ring and the
5181 * shadowed hardware producer ring from tg3_recycle_skb() are
5182 * ordered correctly WRT the skb check above.
5184 smp_rmb();
5186 memcpy(&dpr->rx_jmb_buffers[di],
5187 &spr->rx_jmb_buffers[si],
5188 cpycnt * sizeof(struct ring_info));
5190 for (i = 0; i < cpycnt; i++, di++, si++) {
5191 struct tg3_rx_buffer_desc *sbd, *dbd;
5192 sbd = &spr->rx_jmb[si].std;
5193 dbd = &dpr->rx_jmb[di].std;
5194 dbd->addr_hi = sbd->addr_hi;
5195 dbd->addr_lo = sbd->addr_lo;
5198 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
5199 tp->rx_jmb_ring_mask;
5200 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
5201 tp->rx_jmb_ring_mask;
5204 return err;
5207 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5209 struct tg3 *tp = tnapi->tp;
5211 /* run TX completion thread */
5212 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
5213 tg3_tx(tnapi);
5214 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5215 return work_done;
5218 /* run RX thread, within the bounds set by NAPI.
5219 * All RX "locking" is done by ensuring outside
5220 * code synchronizes with tg3->napi.poll()
5222 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
5223 work_done += tg3_rx(tnapi, budget - work_done);
5225 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
5226 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
5227 int i, err = 0;
5228 u32 std_prod_idx = dpr->rx_std_prod_idx;
5229 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
5231 for (i = 1; i < tp->irq_cnt; i++)
5232 err |= tg3_rx_prodring_xfer(tp, dpr,
5233 &tp->napi[i].prodring);
5235 wmb();
5237 if (std_prod_idx != dpr->rx_std_prod_idx)
5238 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5239 dpr->rx_std_prod_idx);
5241 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5242 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5243 dpr->rx_jmb_prod_idx);
5245 mmiowb();
5247 if (err)
5248 tw32_f(HOSTCC_MODE, tp->coal_now);
5251 return work_done;
5254 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5256 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5257 struct tg3 *tp = tnapi->tp;
5258 int work_done = 0;
5259 struct tg3_hw_status *sblk = tnapi->hw_status;
5261 while (1) {
5262 work_done = tg3_poll_work(tnapi, work_done, budget);
5264 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5265 goto tx_recovery;
5267 if (unlikely(work_done >= budget))
5268 break;
5270 /* tp->last_tag is used in tg3_int_reenable() below
5271 * to tell the hw how much work has been processed,
5272 * so we must read it before checking for more work.
5274 tnapi->last_tag = sblk->status_tag;
5275 tnapi->last_irq_tag = tnapi->last_tag;
5276 rmb();
5278 /* check for RX/TX work to do */
5279 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5280 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5281 napi_complete(napi);
5282 /* Reenable interrupts. */
5283 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5284 mmiowb();
5285 break;
5289 return work_done;
5291 tx_recovery:
5292 /* work_done is guaranteed to be less than budget. */
5293 napi_complete(napi);
5294 schedule_work(&tp->reset_task);
5295 return work_done;
5298 static void tg3_process_error(struct tg3 *tp)
5300 u32 val;
5301 bool real_error = false;
5303 if (tg3_flag(tp, ERROR_PROCESSED))
5304 return;
5306 /* Check Flow Attention register */
5307 val = tr32(HOSTCC_FLOW_ATTN);
5308 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
5309 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
5310 real_error = true;
5313 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
5314 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
5315 real_error = true;
5318 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
5319 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
5320 real_error = true;
5323 if (!real_error)
5324 return;
5326 tg3_dump_state(tp);
5328 tg3_flag_set(tp, ERROR_PROCESSED);
5329 schedule_work(&tp->reset_task);
5332 static int tg3_poll(struct napi_struct *napi, int budget)
5334 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5335 struct tg3 *tp = tnapi->tp;
5336 int work_done = 0;
5337 struct tg3_hw_status *sblk = tnapi->hw_status;
5339 while (1) {
5340 if (sblk->status & SD_STATUS_ERROR)
5341 tg3_process_error(tp);
5343 tg3_poll_link(tp);
5345 work_done = tg3_poll_work(tnapi, work_done, budget);
5347 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5348 goto tx_recovery;
5350 if (unlikely(work_done >= budget))
5351 break;
5353 if (tg3_flag(tp, TAGGED_STATUS)) {
5354 /* tp->last_tag is used in tg3_int_reenable() below
5355 * to tell the hw how much work has been processed,
5356 * so we must read it before checking for more work.
5358 tnapi->last_tag = sblk->status_tag;
5359 tnapi->last_irq_tag = tnapi->last_tag;
5360 rmb();
5361 } else
5362 sblk->status &= ~SD_STATUS_UPDATED;
5364 if (likely(!tg3_has_work(tnapi))) {
5365 napi_complete(napi);
5366 tg3_int_reenable(tnapi);
5367 break;
5371 return work_done;
5373 tx_recovery:
5374 /* work_done is guaranteed to be less than budget. */
5375 napi_complete(napi);
5376 schedule_work(&tp->reset_task);
5377 return work_done;
5380 static void tg3_napi_disable(struct tg3 *tp)
5382 int i;
5384 for (i = tp->irq_cnt - 1; i >= 0; i--)
5385 napi_disable(&tp->napi[i].napi);
5388 static void tg3_napi_enable(struct tg3 *tp)
5390 int i;
5392 for (i = 0; i < tp->irq_cnt; i++)
5393 napi_enable(&tp->napi[i].napi);
5396 static void tg3_napi_init(struct tg3 *tp)
5398 int i;
5400 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
5401 for (i = 1; i < tp->irq_cnt; i++)
5402 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
5405 static void tg3_napi_fini(struct tg3 *tp)
5407 int i;
5409 for (i = 0; i < tp->irq_cnt; i++)
5410 netif_napi_del(&tp->napi[i].napi);
5413 static inline void tg3_netif_stop(struct tg3 *tp)
5415 tp->dev->trans_start = jiffies; /* prevent tx timeout */
5416 tg3_napi_disable(tp);
5417 netif_tx_disable(tp->dev);
5420 static inline void tg3_netif_start(struct tg3 *tp)
5422 /* NOTE: unconditional netif_tx_wake_all_queues is only
5423 * appropriate so long as all callers are assured to
5424 * have free tx slots (such as after tg3_init_hw)
5426 netif_tx_wake_all_queues(tp->dev);
5428 tg3_napi_enable(tp);
5429 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
5430 tg3_enable_ints(tp);
5433 static void tg3_irq_quiesce(struct tg3 *tp)
5435 int i;
5437 BUG_ON(tp->irq_sync);
5439 tp->irq_sync = 1;
5440 smp_mb();
5442 for (i = 0; i < tp->irq_cnt; i++)
5443 synchronize_irq(tp->napi[i].irq_vec);
5446 /* Fully shutdown all tg3 driver activity elsewhere in the system.
5447 * If irq_sync is non-zero, then the IRQ handler must be synchronized
5448 * with as well. Most of the time, this is not necessary except when
5449 * shutting down the device.
5451 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
5453 spin_lock_bh(&tp->lock);
5454 if (irq_sync)
5455 tg3_irq_quiesce(tp);
5458 static inline void tg3_full_unlock(struct tg3 *tp)
5460 spin_unlock_bh(&tp->lock);
5463 /* One-shot MSI handler - Chip automatically disables interrupt
5464 * after sending MSI so driver doesn't have to do it.
5466 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
5468 struct tg3_napi *tnapi = dev_id;
5469 struct tg3 *tp = tnapi->tp;
5471 prefetch(tnapi->hw_status);
5472 if (tnapi->rx_rcb)
5473 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5475 if (likely(!tg3_irq_sync(tp)))
5476 napi_schedule(&tnapi->napi);
5478 return IRQ_HANDLED;
5481 /* MSI ISR - No need to check for interrupt sharing and no need to
5482 * flush status block and interrupt mailbox. PCI ordering rules
5483 * guarantee that MSI will arrive after the status block.
5485 static irqreturn_t tg3_msi(int irq, void *dev_id)
5487 struct tg3_napi *tnapi = dev_id;
5488 struct tg3 *tp = tnapi->tp;
5490 prefetch(tnapi->hw_status);
5491 if (tnapi->rx_rcb)
5492 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5494 * Writing any value to intr-mbox-0 clears PCI INTA# and
5495 * chip-internal interrupt pending events.
5496 * Writing non-zero to intr-mbox-0 additional tells the
5497 * NIC to stop sending us irqs, engaging "in-intr-handler"
5498 * event coalescing.
5500 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5501 if (likely(!tg3_irq_sync(tp)))
5502 napi_schedule(&tnapi->napi);
5504 return IRQ_RETVAL(1);
5507 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
5509 struct tg3_napi *tnapi = dev_id;
5510 struct tg3 *tp = tnapi->tp;
5511 struct tg3_hw_status *sblk = tnapi->hw_status;
5512 unsigned int handled = 1;
5514 /* In INTx mode, it is possible for the interrupt to arrive at
5515 * the CPU before the status block posted prior to the interrupt.
5516 * Reading the PCI State register will confirm whether the
5517 * interrupt is ours and will flush the status block.
5519 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
5520 if (tg3_flag(tp, CHIP_RESETTING) ||
5521 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5522 handled = 0;
5523 goto out;
5528 * Writing any value to intr-mbox-0 clears PCI INTA# and
5529 * chip-internal interrupt pending events.
5530 * Writing non-zero to intr-mbox-0 additional tells the
5531 * NIC to stop sending us irqs, engaging "in-intr-handler"
5532 * event coalescing.
5534 * Flush the mailbox to de-assert the IRQ immediately to prevent
5535 * spurious interrupts. The flush impacts performance but
5536 * excessive spurious interrupts can be worse in some cases.
5538 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5539 if (tg3_irq_sync(tp))
5540 goto out;
5541 sblk->status &= ~SD_STATUS_UPDATED;
5542 if (likely(tg3_has_work(tnapi))) {
5543 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5544 napi_schedule(&tnapi->napi);
5545 } else {
5546 /* No work, shared interrupt perhaps? re-enable
5547 * interrupts, and flush that PCI write
5549 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
5550 0x00000000);
5552 out:
5553 return IRQ_RETVAL(handled);
5556 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
5558 struct tg3_napi *tnapi = dev_id;
5559 struct tg3 *tp = tnapi->tp;
5560 struct tg3_hw_status *sblk = tnapi->hw_status;
5561 unsigned int handled = 1;
5563 /* In INTx mode, it is possible for the interrupt to arrive at
5564 * the CPU before the status block posted prior to the interrupt.
5565 * Reading the PCI State register will confirm whether the
5566 * interrupt is ours and will flush the status block.
5568 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
5569 if (tg3_flag(tp, CHIP_RESETTING) ||
5570 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5571 handled = 0;
5572 goto out;
5577 * writing any value to intr-mbox-0 clears PCI INTA# and
5578 * chip-internal interrupt pending events.
5579 * writing non-zero to intr-mbox-0 additional tells the
5580 * NIC to stop sending us irqs, engaging "in-intr-handler"
5581 * event coalescing.
5583 * Flush the mailbox to de-assert the IRQ immediately to prevent
5584 * spurious interrupts. The flush impacts performance but
5585 * excessive spurious interrupts can be worse in some cases.
5587 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5590 * In a shared interrupt configuration, sometimes other devices'
5591 * interrupts will scream. We record the current status tag here
5592 * so that the above check can report that the screaming interrupts
5593 * are unhandled. Eventually they will be silenced.
5595 tnapi->last_irq_tag = sblk->status_tag;
5597 if (tg3_irq_sync(tp))
5598 goto out;
5600 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5602 napi_schedule(&tnapi->napi);
5604 out:
5605 return IRQ_RETVAL(handled);
5608 /* ISR for interrupt test */
5609 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
5611 struct tg3_napi *tnapi = dev_id;
5612 struct tg3 *tp = tnapi->tp;
5613 struct tg3_hw_status *sblk = tnapi->hw_status;
5615 if ((sblk->status & SD_STATUS_UPDATED) ||
5616 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5617 tg3_disable_ints(tp);
5618 return IRQ_RETVAL(1);
5620 return IRQ_RETVAL(0);
5623 static int tg3_init_hw(struct tg3 *, int);
5624 static int tg3_halt(struct tg3 *, int, int);
5626 /* Restart hardware after configuration changes, self-test, etc.
5627 * Invoked with tp->lock held.
5629 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
5630 __releases(tp->lock)
5631 __acquires(tp->lock)
5633 int err;
5635 err = tg3_init_hw(tp, reset_phy);
5636 if (err) {
5637 netdev_err(tp->dev,
5638 "Failed to re-initialize device, aborting\n");
5639 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5640 tg3_full_unlock(tp);
5641 del_timer_sync(&tp->timer);
5642 tp->irq_sync = 0;
5643 tg3_napi_enable(tp);
5644 dev_close(tp->dev);
5645 tg3_full_lock(tp, 0);
5647 return err;
5650 #ifdef CONFIG_NET_POLL_CONTROLLER
5651 static void tg3_poll_controller(struct net_device *dev)
5653 int i;
5654 struct tg3 *tp = netdev_priv(dev);
5656 for (i = 0; i < tp->irq_cnt; i++)
5657 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
5659 #endif
5661 static void tg3_reset_task(struct work_struct *work)
5663 struct tg3 *tp = container_of(work, struct tg3, reset_task);
5664 int err;
5665 unsigned int restart_timer;
5667 tg3_full_lock(tp, 0);
5669 if (!netif_running(tp->dev)) {
5670 tg3_full_unlock(tp);
5671 return;
5674 tg3_full_unlock(tp);
5676 tg3_phy_stop(tp);
5678 tg3_netif_stop(tp);
5680 tg3_full_lock(tp, 1);
5682 restart_timer = tg3_flag(tp, RESTART_TIMER);
5683 tg3_flag_clear(tp, RESTART_TIMER);
5685 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
5686 tp->write32_tx_mbox = tg3_write32_tx_mbox;
5687 tp->write32_rx_mbox = tg3_write_flush_reg32;
5688 tg3_flag_set(tp, MBOX_WRITE_REORDER);
5689 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
5692 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
5693 err = tg3_init_hw(tp, 1);
5694 if (err)
5695 goto out;
5697 tg3_netif_start(tp);
5699 if (restart_timer)
5700 mod_timer(&tp->timer, jiffies + 1);
5702 out:
5703 tg3_full_unlock(tp);
5705 if (!err)
5706 tg3_phy_start(tp);
5709 static void tg3_tx_timeout(struct net_device *dev)
5711 struct tg3 *tp = netdev_priv(dev);
5713 if (netif_msg_tx_err(tp)) {
5714 netdev_err(dev, "transmit timed out, resetting\n");
5715 tg3_dump_state(tp);
5718 schedule_work(&tp->reset_task);
5721 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5722 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5724 u32 base = (u32) mapping & 0xffffffff;
5726 return (base > 0xffffdcc0) && (base + len + 8 < base);
5729 /* Test for DMA addresses > 40-bit */
5730 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5731 int len)
5733 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5734 if (tg3_flag(tp, 40BIT_DMA_BUG))
5735 return ((u64) mapping + len) > DMA_BIT_MASK(40);
5736 return 0;
5737 #else
5738 return 0;
5739 #endif
5742 static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
5743 dma_addr_t mapping, int len, u32 flags,
5744 u32 mss_and_is_end)
5746 struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry];
5747 int is_end = (mss_and_is_end & 0x1);
5748 u32 mss = (mss_and_is_end >> 1);
5749 u32 vlan_tag = 0;
5751 if (is_end)
5752 flags |= TXD_FLAG_END;
5753 if (flags & TXD_FLAG_VLAN) {
5754 vlan_tag = flags >> 16;
5755 flags &= 0xffff;
5757 vlan_tag |= (mss << TXD_MSS_SHIFT);
5759 txd->addr_hi = ((u64) mapping >> 32);
5760 txd->addr_lo = ((u64) mapping & 0xffffffff);
5761 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
5762 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
5765 static void tg3_skb_error_unmap(struct tg3_napi *tnapi,
5766 struct sk_buff *skb, int last)
5768 int i;
5769 u32 entry = tnapi->tx_prod;
5770 struct ring_info *txb = &tnapi->tx_buffers[entry];
5772 pci_unmap_single(tnapi->tp->pdev,
5773 dma_unmap_addr(txb, mapping),
5774 skb_headlen(skb),
5775 PCI_DMA_TODEVICE);
5776 for (i = 0; i <= last; i++) {
5777 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5779 entry = NEXT_TX(entry);
5780 txb = &tnapi->tx_buffers[entry];
5782 pci_unmap_page(tnapi->tp->pdev,
5783 dma_unmap_addr(txb, mapping),
5784 frag->size, PCI_DMA_TODEVICE);
5788 /* Workaround 4GB and 40-bit hardware DMA bugs. */
5789 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5790 struct sk_buff *skb,
5791 u32 base_flags, u32 mss)
5793 struct tg3 *tp = tnapi->tp;
5794 struct sk_buff *new_skb;
5795 dma_addr_t new_addr = 0;
5796 u32 entry = tnapi->tx_prod;
5797 int ret = 0;
5799 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
5800 new_skb = skb_copy(skb, GFP_ATOMIC);
5801 else {
5802 int more_headroom = 4 - ((unsigned long)skb->data & 3);
5804 new_skb = skb_copy_expand(skb,
5805 skb_headroom(skb) + more_headroom,
5806 skb_tailroom(skb), GFP_ATOMIC);
5809 if (!new_skb) {
5810 ret = -1;
5811 } else {
5812 /* New SKB is guaranteed to be linear. */
5813 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
5814 PCI_DMA_TODEVICE);
5815 /* Make sure the mapping succeeded */
5816 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
5817 ret = -1;
5818 dev_kfree_skb(new_skb);
5820 /* Make sure new skb does not cross any 4G boundaries.
5821 * Drop the packet if it does.
5823 } else if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
5824 tg3_4g_overflow_test(new_addr, new_skb->len)) {
5825 pci_unmap_single(tp->pdev, new_addr, new_skb->len,
5826 PCI_DMA_TODEVICE);
5827 ret = -1;
5828 dev_kfree_skb(new_skb);
5829 } else {
5830 tnapi->tx_buffers[entry].skb = new_skb;
5831 dma_unmap_addr_set(&tnapi->tx_buffers[entry],
5832 mapping, new_addr);
5834 tg3_set_txd(tnapi, entry, new_addr, new_skb->len,
5835 base_flags, 1 | (mss << 1));
5839 dev_kfree_skb(skb);
5841 return ret;
5844 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
5846 /* Use GSO to workaround a rare TSO bug that may be triggered when the
5847 * TSO header is greater than 80 bytes.
5849 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
5851 struct sk_buff *segs, *nskb;
5852 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
5854 /* Estimate the number of fragments in the worst case */
5855 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
5856 netif_stop_queue(tp->dev);
5858 /* netif_tx_stop_queue() must be done before checking
5859 * checking tx index in tg3_tx_avail() below, because in
5860 * tg3_tx(), we update tx index before checking for
5861 * netif_tx_queue_stopped().
5863 smp_mb();
5864 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
5865 return NETDEV_TX_BUSY;
5867 netif_wake_queue(tp->dev);
5870 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
5871 if (IS_ERR(segs))
5872 goto tg3_tso_bug_end;
5874 do {
5875 nskb = segs;
5876 segs = segs->next;
5877 nskb->next = NULL;
5878 tg3_start_xmit(nskb, tp->dev);
5879 } while (segs);
5881 tg3_tso_bug_end:
5882 dev_kfree_skb(skb);
5884 return NETDEV_TX_OK;
5887 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
5888 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
5890 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
5892 struct tg3 *tp = netdev_priv(dev);
5893 u32 len, entry, base_flags, mss;
5894 int i = -1, would_hit_hwbug;
5895 dma_addr_t mapping;
5896 struct tg3_napi *tnapi;
5897 struct netdev_queue *txq;
5898 unsigned int last;
5900 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5901 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5902 if (tg3_flag(tp, ENABLE_TSS))
5903 tnapi++;
5905 /* We are running in BH disabled context with netif_tx_lock
5906 * and TX reclaim runs via tp->napi.poll inside of a software
5907 * interrupt. Furthermore, IRQ processing runs lockless so we have
5908 * no IRQ context deadlocks to worry about either. Rejoice!
5910 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5911 if (!netif_tx_queue_stopped(txq)) {
5912 netif_tx_stop_queue(txq);
5914 /* This is a hard error, log it. */
5915 netdev_err(dev,
5916 "BUG! Tx Ring full when queue awake!\n");
5918 return NETDEV_TX_BUSY;
5921 entry = tnapi->tx_prod;
5922 base_flags = 0;
5923 if (skb->ip_summed == CHECKSUM_PARTIAL)
5924 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5926 mss = skb_shinfo(skb)->gso_size;
5927 if (mss) {
5928 struct iphdr *iph;
5929 u32 tcp_opt_len, hdr_len;
5931 if (skb_header_cloned(skb) &&
5932 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5933 dev_kfree_skb(skb);
5934 goto out_unlock;
5937 iph = ip_hdr(skb);
5938 tcp_opt_len = tcp_optlen(skb);
5940 if (skb_is_gso_v6(skb)) {
5941 hdr_len = skb_headlen(skb) - ETH_HLEN;
5942 } else {
5943 u32 ip_tcp_len;
5945 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5946 hdr_len = ip_tcp_len + tcp_opt_len;
5948 iph->check = 0;
5949 iph->tot_len = htons(mss + hdr_len);
5952 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
5953 tg3_flag(tp, TSO_BUG))
5954 return tg3_tso_bug(tp, skb);
5956 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5957 TXD_FLAG_CPU_POST_DMA);
5959 if (tg3_flag(tp, HW_TSO_1) ||
5960 tg3_flag(tp, HW_TSO_2) ||
5961 tg3_flag(tp, HW_TSO_3)) {
5962 tcp_hdr(skb)->check = 0;
5963 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
5964 } else
5965 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5966 iph->daddr, 0,
5967 IPPROTO_TCP,
5970 if (tg3_flag(tp, HW_TSO_3)) {
5971 mss |= (hdr_len & 0xc) << 12;
5972 if (hdr_len & 0x10)
5973 base_flags |= 0x00000010;
5974 base_flags |= (hdr_len & 0x3e0) << 5;
5975 } else if (tg3_flag(tp, HW_TSO_2))
5976 mss |= hdr_len << 9;
5977 else if (tg3_flag(tp, HW_TSO_1) ||
5978 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5979 if (tcp_opt_len || iph->ihl > 5) {
5980 int tsflags;
5982 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5983 mss |= (tsflags << 11);
5985 } else {
5986 if (tcp_opt_len || iph->ihl > 5) {
5987 int tsflags;
5989 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5990 base_flags |= tsflags << 12;
5995 if (vlan_tx_tag_present(skb))
5996 base_flags |= (TXD_FLAG_VLAN |
5997 (vlan_tx_tag_get(skb) << 16));
5999 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6000 !mss && skb->len > VLAN_ETH_FRAME_LEN)
6001 base_flags |= TXD_FLAG_JMB_PKT;
6003 len = skb_headlen(skb);
6005 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6006 if (pci_dma_mapping_error(tp->pdev, mapping)) {
6007 dev_kfree_skb(skb);
6008 goto out_unlock;
6011 tnapi->tx_buffers[entry].skb = skb;
6012 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6014 would_hit_hwbug = 0;
6016 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6017 would_hit_hwbug = 1;
6019 if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
6020 tg3_4g_overflow_test(mapping, len))
6021 would_hit_hwbug = 1;
6023 if (tg3_flag(tp, 40BIT_DMA_LIMIT_BUG) &&
6024 tg3_40bit_overflow_test(tp, mapping, len))
6025 would_hit_hwbug = 1;
6027 if (tg3_flag(tp, 5701_DMA_BUG))
6028 would_hit_hwbug = 1;
6030 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
6031 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
6033 entry = NEXT_TX(entry);
6035 /* Now loop through additional data fragments, and queue them. */
6036 if (skb_shinfo(skb)->nr_frags > 0) {
6037 last = skb_shinfo(skb)->nr_frags - 1;
6038 for (i = 0; i <= last; i++) {
6039 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6041 len = frag->size;
6042 mapping = pci_map_page(tp->pdev,
6043 frag->page,
6044 frag->page_offset,
6045 len, PCI_DMA_TODEVICE);
6047 tnapi->tx_buffers[entry].skb = NULL;
6048 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6049 mapping);
6050 if (pci_dma_mapping_error(tp->pdev, mapping))
6051 goto dma_error;
6053 if (tg3_flag(tp, SHORT_DMA_BUG) &&
6054 len <= 8)
6055 would_hit_hwbug = 1;
6057 if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
6058 tg3_4g_overflow_test(mapping, len))
6059 would_hit_hwbug = 1;
6061 if (tg3_flag(tp, 40BIT_DMA_LIMIT_BUG) &&
6062 tg3_40bit_overflow_test(tp, mapping, len))
6063 would_hit_hwbug = 1;
6065 if (tg3_flag(tp, HW_TSO_1) ||
6066 tg3_flag(tp, HW_TSO_2) ||
6067 tg3_flag(tp, HW_TSO_3))
6068 tg3_set_txd(tnapi, entry, mapping, len,
6069 base_flags, (i == last)|(mss << 1));
6070 else
6071 tg3_set_txd(tnapi, entry, mapping, len,
6072 base_flags, (i == last));
6074 entry = NEXT_TX(entry);
6078 if (would_hit_hwbug) {
6079 tg3_skb_error_unmap(tnapi, skb, i);
6081 /* If the workaround fails due to memory/mapping
6082 * failure, silently drop this packet.
6084 if (tigon3_dma_hwbug_workaround(tnapi, skb, base_flags, mss))
6085 goto out_unlock;
6087 entry = NEXT_TX(tnapi->tx_prod);
6090 /* Packets are ready, update Tx producer idx local and on card. */
6091 tw32_tx_mbox(tnapi->prodmbox, entry);
6093 tnapi->tx_prod = entry;
6094 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
6095 netif_tx_stop_queue(txq);
6097 /* netif_tx_stop_queue() must be done before checking
6098 * checking tx index in tg3_tx_avail() below, because in
6099 * tg3_tx(), we update tx index before checking for
6100 * netif_tx_queue_stopped().
6102 smp_mb();
6103 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
6104 netif_tx_wake_queue(txq);
6107 out_unlock:
6108 mmiowb();
6110 return NETDEV_TX_OK;
6112 dma_error:
6113 tg3_skb_error_unmap(tnapi, skb, i);
6114 dev_kfree_skb(skb);
6115 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
6116 return NETDEV_TX_OK;
6119 static void tg3_set_loopback(struct net_device *dev, u32 features)
6121 struct tg3 *tp = netdev_priv(dev);
6123 if (features & NETIF_F_LOOPBACK) {
6124 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
6125 return;
6128 * Clear MAC_MODE_HALF_DUPLEX or you won't get packets back in
6129 * loopback mode if Half-Duplex mode was negotiated earlier.
6131 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
6133 /* Enable internal MAC loopback mode */
6134 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
6135 spin_lock_bh(&tp->lock);
6136 tw32(MAC_MODE, tp->mac_mode);
6137 netif_carrier_on(tp->dev);
6138 spin_unlock_bh(&tp->lock);
6139 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
6140 } else {
6141 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
6142 return;
6144 /* Disable internal MAC loopback mode */
6145 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
6146 spin_lock_bh(&tp->lock);
6147 tw32(MAC_MODE, tp->mac_mode);
6148 /* Force link status check */
6149 tg3_setup_phy(tp, 1);
6150 spin_unlock_bh(&tp->lock);
6151 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
6155 static u32 tg3_fix_features(struct net_device *dev, u32 features)
6157 struct tg3 *tp = netdev_priv(dev);
6159 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
6160 features &= ~NETIF_F_ALL_TSO;
6162 return features;
6165 static int tg3_set_features(struct net_device *dev, u32 features)
6167 u32 changed = dev->features ^ features;
6169 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
6170 tg3_set_loopback(dev, features);
6172 return 0;
6175 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
6176 int new_mtu)
6178 dev->mtu = new_mtu;
6180 if (new_mtu > ETH_DATA_LEN) {
6181 if (tg3_flag(tp, 5780_CLASS)) {
6182 netdev_update_features(dev);
6183 tg3_flag_clear(tp, TSO_CAPABLE);
6184 } else {
6185 tg3_flag_set(tp, JUMBO_RING_ENABLE);
6187 } else {
6188 if (tg3_flag(tp, 5780_CLASS)) {
6189 tg3_flag_set(tp, TSO_CAPABLE);
6190 netdev_update_features(dev);
6192 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
6196 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
6198 struct tg3 *tp = netdev_priv(dev);
6199 int err;
6201 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
6202 return -EINVAL;
6204 if (!netif_running(dev)) {
6205 /* We'll just catch it later when the
6206 * device is up'd.
6208 tg3_set_mtu(dev, tp, new_mtu);
6209 return 0;
6212 tg3_phy_stop(tp);
6214 tg3_netif_stop(tp);
6216 tg3_full_lock(tp, 1);
6218 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6220 tg3_set_mtu(dev, tp, new_mtu);
6222 err = tg3_restart_hw(tp, 0);
6224 if (!err)
6225 tg3_netif_start(tp);
6227 tg3_full_unlock(tp);
6229 if (!err)
6230 tg3_phy_start(tp);
6232 return err;
6235 static void tg3_rx_prodring_free(struct tg3 *tp,
6236 struct tg3_rx_prodring_set *tpr)
6238 int i;
6240 if (tpr != &tp->napi[0].prodring) {
6241 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
6242 i = (i + 1) & tp->rx_std_ring_mask)
6243 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6244 tp->rx_pkt_map_sz);
6246 if (tg3_flag(tp, JUMBO_CAPABLE)) {
6247 for (i = tpr->rx_jmb_cons_idx;
6248 i != tpr->rx_jmb_prod_idx;
6249 i = (i + 1) & tp->rx_jmb_ring_mask) {
6250 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6251 TG3_RX_JMB_MAP_SZ);
6255 return;
6258 for (i = 0; i <= tp->rx_std_ring_mask; i++)
6259 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6260 tp->rx_pkt_map_sz);
6262 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6263 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
6264 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6265 TG3_RX_JMB_MAP_SZ);
6269 /* Initialize rx rings for packet processing.
6271 * The chip has been shut down and the driver detached from
6272 * the networking, so no interrupts or new tx packets will
6273 * end up in the driver. tp->{tx,}lock are held and thus
6274 * we may not sleep.
6276 static int tg3_rx_prodring_alloc(struct tg3 *tp,
6277 struct tg3_rx_prodring_set *tpr)
6279 u32 i, rx_pkt_dma_sz;
6281 tpr->rx_std_cons_idx = 0;
6282 tpr->rx_std_prod_idx = 0;
6283 tpr->rx_jmb_cons_idx = 0;
6284 tpr->rx_jmb_prod_idx = 0;
6286 if (tpr != &tp->napi[0].prodring) {
6287 memset(&tpr->rx_std_buffers[0], 0,
6288 TG3_RX_STD_BUFF_RING_SIZE(tp));
6289 if (tpr->rx_jmb_buffers)
6290 memset(&tpr->rx_jmb_buffers[0], 0,
6291 TG3_RX_JMB_BUFF_RING_SIZE(tp));
6292 goto done;
6295 /* Zero out all descriptors. */
6296 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
6298 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
6299 if (tg3_flag(tp, 5780_CLASS) &&
6300 tp->dev->mtu > ETH_DATA_LEN)
6301 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
6302 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
6304 /* Initialize invariants of the rings, we only set this
6305 * stuff once. This works because the card does not
6306 * write into the rx buffer posting rings.
6308 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
6309 struct tg3_rx_buffer_desc *rxd;
6311 rxd = &tpr->rx_std[i];
6312 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
6313 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
6314 rxd->opaque = (RXD_OPAQUE_RING_STD |
6315 (i << RXD_OPAQUE_INDEX_SHIFT));
6318 /* Now allocate fresh SKBs for each rx ring. */
6319 for (i = 0; i < tp->rx_pending; i++) {
6320 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
6321 netdev_warn(tp->dev,
6322 "Using a smaller RX standard ring. Only "
6323 "%d out of %d buffers were allocated "
6324 "successfully\n", i, tp->rx_pending);
6325 if (i == 0)
6326 goto initfail;
6327 tp->rx_pending = i;
6328 break;
6332 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
6333 goto done;
6335 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
6337 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
6338 goto done;
6340 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
6341 struct tg3_rx_buffer_desc *rxd;
6343 rxd = &tpr->rx_jmb[i].std;
6344 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
6345 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
6346 RXD_FLAG_JUMBO;
6347 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
6348 (i << RXD_OPAQUE_INDEX_SHIFT));
6351 for (i = 0; i < tp->rx_jumbo_pending; i++) {
6352 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
6353 netdev_warn(tp->dev,
6354 "Using a smaller RX jumbo ring. Only %d "
6355 "out of %d buffers were allocated "
6356 "successfully\n", i, tp->rx_jumbo_pending);
6357 if (i == 0)
6358 goto initfail;
6359 tp->rx_jumbo_pending = i;
6360 break;
6364 done:
6365 return 0;
6367 initfail:
6368 tg3_rx_prodring_free(tp, tpr);
6369 return -ENOMEM;
6372 static void tg3_rx_prodring_fini(struct tg3 *tp,
6373 struct tg3_rx_prodring_set *tpr)
6375 kfree(tpr->rx_std_buffers);
6376 tpr->rx_std_buffers = NULL;
6377 kfree(tpr->rx_jmb_buffers);
6378 tpr->rx_jmb_buffers = NULL;
6379 if (tpr->rx_std) {
6380 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
6381 tpr->rx_std, tpr->rx_std_mapping);
6382 tpr->rx_std = NULL;
6384 if (tpr->rx_jmb) {
6385 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
6386 tpr->rx_jmb, tpr->rx_jmb_mapping);
6387 tpr->rx_jmb = NULL;
6391 static int tg3_rx_prodring_init(struct tg3 *tp,
6392 struct tg3_rx_prodring_set *tpr)
6394 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
6395 GFP_KERNEL);
6396 if (!tpr->rx_std_buffers)
6397 return -ENOMEM;
6399 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
6400 TG3_RX_STD_RING_BYTES(tp),
6401 &tpr->rx_std_mapping,
6402 GFP_KERNEL);
6403 if (!tpr->rx_std)
6404 goto err_out;
6406 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6407 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
6408 GFP_KERNEL);
6409 if (!tpr->rx_jmb_buffers)
6410 goto err_out;
6412 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
6413 TG3_RX_JMB_RING_BYTES(tp),
6414 &tpr->rx_jmb_mapping,
6415 GFP_KERNEL);
6416 if (!tpr->rx_jmb)
6417 goto err_out;
6420 return 0;
6422 err_out:
6423 tg3_rx_prodring_fini(tp, tpr);
6424 return -ENOMEM;
6427 /* Free up pending packets in all rx/tx rings.
6429 * The chip has been shut down and the driver detached from
6430 * the networking, so no interrupts or new tx packets will
6431 * end up in the driver. tp->{tx,}lock is not held and we are not
6432 * in an interrupt context and thus may sleep.
6434 static void tg3_free_rings(struct tg3 *tp)
6436 int i, j;
6438 for (j = 0; j < tp->irq_cnt; j++) {
6439 struct tg3_napi *tnapi = &tp->napi[j];
6441 tg3_rx_prodring_free(tp, &tnapi->prodring);
6443 if (!tnapi->tx_buffers)
6444 continue;
6446 for (i = 0; i < TG3_TX_RING_SIZE; ) {
6447 struct ring_info *txp;
6448 struct sk_buff *skb;
6449 unsigned int k;
6451 txp = &tnapi->tx_buffers[i];
6452 skb = txp->skb;
6454 if (skb == NULL) {
6455 i++;
6456 continue;
6459 pci_unmap_single(tp->pdev,
6460 dma_unmap_addr(txp, mapping),
6461 skb_headlen(skb),
6462 PCI_DMA_TODEVICE);
6463 txp->skb = NULL;
6465 i++;
6467 for (k = 0; k < skb_shinfo(skb)->nr_frags; k++) {
6468 txp = &tnapi->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
6469 pci_unmap_page(tp->pdev,
6470 dma_unmap_addr(txp, mapping),
6471 skb_shinfo(skb)->frags[k].size,
6472 PCI_DMA_TODEVICE);
6473 i++;
6476 dev_kfree_skb_any(skb);
6481 /* Initialize tx/rx rings for packet processing.
6483 * The chip has been shut down and the driver detached from
6484 * the networking, so no interrupts or new tx packets will
6485 * end up in the driver. tp->{tx,}lock are held and thus
6486 * we may not sleep.
6488 static int tg3_init_rings(struct tg3 *tp)
6490 int i;
6492 /* Free up all the SKBs. */
6493 tg3_free_rings(tp);
6495 for (i = 0; i < tp->irq_cnt; i++) {
6496 struct tg3_napi *tnapi = &tp->napi[i];
6498 tnapi->last_tag = 0;
6499 tnapi->last_irq_tag = 0;
6500 tnapi->hw_status->status = 0;
6501 tnapi->hw_status->status_tag = 0;
6502 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6504 tnapi->tx_prod = 0;
6505 tnapi->tx_cons = 0;
6506 if (tnapi->tx_ring)
6507 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
6509 tnapi->rx_rcb_ptr = 0;
6510 if (tnapi->rx_rcb)
6511 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6513 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
6514 tg3_free_rings(tp);
6515 return -ENOMEM;
6519 return 0;
6523 * Must not be invoked with interrupt sources disabled and
6524 * the hardware shutdown down.
6526 static void tg3_free_consistent(struct tg3 *tp)
6528 int i;
6530 for (i = 0; i < tp->irq_cnt; i++) {
6531 struct tg3_napi *tnapi = &tp->napi[i];
6533 if (tnapi->tx_ring) {
6534 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
6535 tnapi->tx_ring, tnapi->tx_desc_mapping);
6536 tnapi->tx_ring = NULL;
6539 kfree(tnapi->tx_buffers);
6540 tnapi->tx_buffers = NULL;
6542 if (tnapi->rx_rcb) {
6543 dma_free_coherent(&tp->pdev->dev,
6544 TG3_RX_RCB_RING_BYTES(tp),
6545 tnapi->rx_rcb,
6546 tnapi->rx_rcb_mapping);
6547 tnapi->rx_rcb = NULL;
6550 tg3_rx_prodring_fini(tp, &tnapi->prodring);
6552 if (tnapi->hw_status) {
6553 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
6554 tnapi->hw_status,
6555 tnapi->status_mapping);
6556 tnapi->hw_status = NULL;
6560 if (tp->hw_stats) {
6561 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
6562 tp->hw_stats, tp->stats_mapping);
6563 tp->hw_stats = NULL;
6568 * Must not be invoked with interrupt sources disabled and
6569 * the hardware shutdown down. Can sleep.
6571 static int tg3_alloc_consistent(struct tg3 *tp)
6573 int i;
6575 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
6576 sizeof(struct tg3_hw_stats),
6577 &tp->stats_mapping,
6578 GFP_KERNEL);
6579 if (!tp->hw_stats)
6580 goto err_out;
6582 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6584 for (i = 0; i < tp->irq_cnt; i++) {
6585 struct tg3_napi *tnapi = &tp->napi[i];
6586 struct tg3_hw_status *sblk;
6588 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
6589 TG3_HW_STATUS_SIZE,
6590 &tnapi->status_mapping,
6591 GFP_KERNEL);
6592 if (!tnapi->hw_status)
6593 goto err_out;
6595 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6596 sblk = tnapi->hw_status;
6598 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
6599 goto err_out;
6601 /* If multivector TSS is enabled, vector 0 does not handle
6602 * tx interrupts. Don't allocate any resources for it.
6604 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
6605 (i && tg3_flag(tp, ENABLE_TSS))) {
6606 tnapi->tx_buffers = kzalloc(sizeof(struct ring_info) *
6607 TG3_TX_RING_SIZE,
6608 GFP_KERNEL);
6609 if (!tnapi->tx_buffers)
6610 goto err_out;
6612 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
6613 TG3_TX_RING_BYTES,
6614 &tnapi->tx_desc_mapping,
6615 GFP_KERNEL);
6616 if (!tnapi->tx_ring)
6617 goto err_out;
6621 * When RSS is enabled, the status block format changes
6622 * slightly. The "rx_jumbo_consumer", "reserved",
6623 * and "rx_mini_consumer" members get mapped to the
6624 * other three rx return ring producer indexes.
6626 switch (i) {
6627 default:
6628 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
6629 break;
6630 case 2:
6631 tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
6632 break;
6633 case 3:
6634 tnapi->rx_rcb_prod_idx = &sblk->reserved;
6635 break;
6636 case 4:
6637 tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
6638 break;
6642 * If multivector RSS is enabled, vector 0 does not handle
6643 * rx or tx interrupts. Don't allocate any resources for it.
6645 if (!i && tg3_flag(tp, ENABLE_RSS))
6646 continue;
6648 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
6649 TG3_RX_RCB_RING_BYTES(tp),
6650 &tnapi->rx_rcb_mapping,
6651 GFP_KERNEL);
6652 if (!tnapi->rx_rcb)
6653 goto err_out;
6655 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6658 return 0;
6660 err_out:
6661 tg3_free_consistent(tp);
6662 return -ENOMEM;
6665 #define MAX_WAIT_CNT 1000
6667 /* To stop a block, clear the enable bit and poll till it
6668 * clears. tp->lock is held.
6670 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
6672 unsigned int i;
6673 u32 val;
6675 if (tg3_flag(tp, 5705_PLUS)) {
6676 switch (ofs) {
6677 case RCVLSC_MODE:
6678 case DMAC_MODE:
6679 case MBFREE_MODE:
6680 case BUFMGR_MODE:
6681 case MEMARB_MODE:
6682 /* We can't enable/disable these bits of the
6683 * 5705/5750, just say success.
6685 return 0;
6687 default:
6688 break;
6692 val = tr32(ofs);
6693 val &= ~enable_bit;
6694 tw32_f(ofs, val);
6696 for (i = 0; i < MAX_WAIT_CNT; i++) {
6697 udelay(100);
6698 val = tr32(ofs);
6699 if ((val & enable_bit) == 0)
6700 break;
6703 if (i == MAX_WAIT_CNT && !silent) {
6704 dev_err(&tp->pdev->dev,
6705 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
6706 ofs, enable_bit);
6707 return -ENODEV;
6710 return 0;
6713 /* tp->lock is held. */
6714 static int tg3_abort_hw(struct tg3 *tp, int silent)
6716 int i, err;
6718 tg3_disable_ints(tp);
6720 tp->rx_mode &= ~RX_MODE_ENABLE;
6721 tw32_f(MAC_RX_MODE, tp->rx_mode);
6722 udelay(10);
6724 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
6725 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
6726 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
6727 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
6728 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
6729 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
6731 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
6732 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
6733 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
6734 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
6735 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
6736 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
6737 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
6739 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
6740 tw32_f(MAC_MODE, tp->mac_mode);
6741 udelay(40);
6743 tp->tx_mode &= ~TX_MODE_ENABLE;
6744 tw32_f(MAC_TX_MODE, tp->tx_mode);
6746 for (i = 0; i < MAX_WAIT_CNT; i++) {
6747 udelay(100);
6748 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
6749 break;
6751 if (i >= MAX_WAIT_CNT) {
6752 dev_err(&tp->pdev->dev,
6753 "%s timed out, TX_MODE_ENABLE will not clear "
6754 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
6755 err |= -ENODEV;
6758 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
6759 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
6760 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
6762 tw32(FTQ_RESET, 0xffffffff);
6763 tw32(FTQ_RESET, 0x00000000);
6765 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
6766 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
6768 for (i = 0; i < tp->irq_cnt; i++) {
6769 struct tg3_napi *tnapi = &tp->napi[i];
6770 if (tnapi->hw_status)
6771 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6773 if (tp->hw_stats)
6774 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6776 return err;
6779 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
6781 int i;
6782 u32 apedata;
6784 /* NCSI does not support APE events */
6785 if (tg3_flag(tp, APE_HAS_NCSI))
6786 return;
6788 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
6789 if (apedata != APE_SEG_SIG_MAGIC)
6790 return;
6792 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
6793 if (!(apedata & APE_FW_STATUS_READY))
6794 return;
6796 /* Wait for up to 1 millisecond for APE to service previous event. */
6797 for (i = 0; i < 10; i++) {
6798 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
6799 return;
6801 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
6803 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6804 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
6805 event | APE_EVENT_STATUS_EVENT_PENDING);
6807 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
6809 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6810 break;
6812 udelay(100);
6815 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6816 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
6819 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
6821 u32 event;
6822 u32 apedata;
6824 if (!tg3_flag(tp, ENABLE_APE))
6825 return;
6827 switch (kind) {
6828 case RESET_KIND_INIT:
6829 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
6830 APE_HOST_SEG_SIG_MAGIC);
6831 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
6832 APE_HOST_SEG_LEN_MAGIC);
6833 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
6834 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
6835 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
6836 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
6837 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
6838 APE_HOST_BEHAV_NO_PHYLOCK);
6839 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
6840 TG3_APE_HOST_DRVR_STATE_START);
6842 event = APE_EVENT_STATUS_STATE_START;
6843 break;
6844 case RESET_KIND_SHUTDOWN:
6845 /* With the interface we are currently using,
6846 * APE does not track driver state. Wiping
6847 * out the HOST SEGMENT SIGNATURE forces
6848 * the APE to assume OS absent status.
6850 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
6852 if (device_may_wakeup(&tp->pdev->dev) &&
6853 tg3_flag(tp, WOL_ENABLE)) {
6854 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
6855 TG3_APE_HOST_WOL_SPEED_AUTO);
6856 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
6857 } else
6858 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
6860 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
6862 event = APE_EVENT_STATUS_STATE_UNLOAD;
6863 break;
6864 case RESET_KIND_SUSPEND:
6865 event = APE_EVENT_STATUS_STATE_SUSPEND;
6866 break;
6867 default:
6868 return;
6871 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
6873 tg3_ape_send_event(tp, event);
6876 /* tp->lock is held. */
6877 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
6879 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
6880 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
6882 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
6883 switch (kind) {
6884 case RESET_KIND_INIT:
6885 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6886 DRV_STATE_START);
6887 break;
6889 case RESET_KIND_SHUTDOWN:
6890 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6891 DRV_STATE_UNLOAD);
6892 break;
6894 case RESET_KIND_SUSPEND:
6895 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6896 DRV_STATE_SUSPEND);
6897 break;
6899 default:
6900 break;
6904 if (kind == RESET_KIND_INIT ||
6905 kind == RESET_KIND_SUSPEND)
6906 tg3_ape_driver_state_change(tp, kind);
6909 /* tp->lock is held. */
6910 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
6912 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
6913 switch (kind) {
6914 case RESET_KIND_INIT:
6915 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6916 DRV_STATE_START_DONE);
6917 break;
6919 case RESET_KIND_SHUTDOWN:
6920 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6921 DRV_STATE_UNLOAD_DONE);
6922 break;
6924 default:
6925 break;
6929 if (kind == RESET_KIND_SHUTDOWN)
6930 tg3_ape_driver_state_change(tp, kind);
6933 /* tp->lock is held. */
6934 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
6936 if (tg3_flag(tp, ENABLE_ASF)) {
6937 switch (kind) {
6938 case RESET_KIND_INIT:
6939 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6940 DRV_STATE_START);
6941 break;
6943 case RESET_KIND_SHUTDOWN:
6944 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6945 DRV_STATE_UNLOAD);
6946 break;
6948 case RESET_KIND_SUSPEND:
6949 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6950 DRV_STATE_SUSPEND);
6951 break;
6953 default:
6954 break;
6959 static int tg3_poll_fw(struct tg3 *tp)
6961 int i;
6962 u32 val;
6964 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6965 /* Wait up to 20ms for init done. */
6966 for (i = 0; i < 200; i++) {
6967 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
6968 return 0;
6969 udelay(100);
6971 return -ENODEV;
6974 /* Wait for firmware initialization to complete. */
6975 for (i = 0; i < 100000; i++) {
6976 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
6977 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
6978 break;
6979 udelay(10);
6982 /* Chip might not be fitted with firmware. Some Sun onboard
6983 * parts are configured like that. So don't signal the timeout
6984 * of the above loop as an error, but do report the lack of
6985 * running firmware once.
6987 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
6988 tg3_flag_set(tp, NO_FWARE_REPORTED);
6990 netdev_info(tp->dev, "No firmware running\n");
6993 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
6994 /* The 57765 A0 needs a little more
6995 * time to do some important work.
6997 mdelay(10);
7000 return 0;
7003 /* Save PCI command register before chip reset */
7004 static void tg3_save_pci_state(struct tg3 *tp)
7006 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7009 /* Restore PCI state after chip reset */
7010 static void tg3_restore_pci_state(struct tg3 *tp)
7012 u32 val;
7014 /* Re-enable indirect register accesses. */
7015 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7016 tp->misc_host_ctrl);
7018 /* Set MAX PCI retry to zero. */
7019 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7020 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7021 tg3_flag(tp, PCIX_MODE))
7022 val |= PCISTATE_RETRY_SAME_DMA;
7023 /* Allow reads and writes to the APE register and memory space. */
7024 if (tg3_flag(tp, ENABLE_APE))
7025 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7026 PCISTATE_ALLOW_APE_SHMEM_WR |
7027 PCISTATE_ALLOW_APE_PSPACE_WR;
7028 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7030 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7032 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
7033 if (tg3_flag(tp, PCI_EXPRESS))
7034 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7035 else {
7036 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7037 tp->pci_cacheline_sz);
7038 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7039 tp->pci_lat_timer);
7043 /* Make sure PCI-X relaxed ordering bit is clear. */
7044 if (tg3_flag(tp, PCIX_MODE)) {
7045 u16 pcix_cmd;
7047 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7048 &pcix_cmd);
7049 pcix_cmd &= ~PCI_X_CMD_ERO;
7050 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7051 pcix_cmd);
7054 if (tg3_flag(tp, 5780_CLASS)) {
7056 /* Chip reset on 5780 will reset MSI enable bit,
7057 * so need to restore it.
7059 if (tg3_flag(tp, USING_MSI)) {
7060 u16 ctrl;
7062 pci_read_config_word(tp->pdev,
7063 tp->msi_cap + PCI_MSI_FLAGS,
7064 &ctrl);
7065 pci_write_config_word(tp->pdev,
7066 tp->msi_cap + PCI_MSI_FLAGS,
7067 ctrl | PCI_MSI_FLAGS_ENABLE);
7068 val = tr32(MSGINT_MODE);
7069 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7074 static void tg3_stop_fw(struct tg3 *);
7076 /* tp->lock is held. */
7077 static int tg3_chip_reset(struct tg3 *tp)
7079 u32 val;
7080 void (*write_op)(struct tg3 *, u32, u32);
7081 int i, err;
7083 tg3_nvram_lock(tp);
7085 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7087 /* No matching tg3_nvram_unlock() after this because
7088 * chip reset below will undo the nvram lock.
7090 tp->nvram_lock_cnt = 0;
7092 /* GRC_MISC_CFG core clock reset will clear the memory
7093 * enable bit in PCI register 4 and the MSI enable bit
7094 * on some chips, so we save relevant registers here.
7096 tg3_save_pci_state(tp);
7098 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7099 tg3_flag(tp, 5755_PLUS))
7100 tw32(GRC_FASTBOOT_PC, 0);
7103 * We must avoid the readl() that normally takes place.
7104 * It locks machines, causes machine checks, and other
7105 * fun things. So, temporarily disable the 5701
7106 * hardware workaround, while we do the reset.
7108 write_op = tp->write32;
7109 if (write_op == tg3_write_flush_reg32)
7110 tp->write32 = tg3_write32;
7112 /* Prevent the irq handler from reading or writing PCI registers
7113 * during chip reset when the memory enable bit in the PCI command
7114 * register may be cleared. The chip does not generate interrupt
7115 * at this time, but the irq handler may still be called due to irq
7116 * sharing or irqpoll.
7118 tg3_flag_set(tp, CHIP_RESETTING);
7119 for (i = 0; i < tp->irq_cnt; i++) {
7120 struct tg3_napi *tnapi = &tp->napi[i];
7121 if (tnapi->hw_status) {
7122 tnapi->hw_status->status = 0;
7123 tnapi->hw_status->status_tag = 0;
7125 tnapi->last_tag = 0;
7126 tnapi->last_irq_tag = 0;
7128 smp_mb();
7130 for (i = 0; i < tp->irq_cnt; i++)
7131 synchronize_irq(tp->napi[i].irq_vec);
7133 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7134 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7135 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7138 /* do the reset */
7139 val = GRC_MISC_CFG_CORECLK_RESET;
7141 if (tg3_flag(tp, PCI_EXPRESS)) {
7142 /* Force PCIe 1.0a mode */
7143 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7144 !tg3_flag(tp, 57765_PLUS) &&
7145 tr32(TG3_PCIE_PHY_TSTCTL) ==
7146 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7147 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7149 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7150 tw32(GRC_MISC_CFG, (1 << 29));
7151 val |= (1 << 29);
7155 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7156 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7157 tw32(GRC_VCPU_EXT_CTRL,
7158 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7161 /* Manage gphy power for all CPMU absent PCIe devices. */
7162 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7163 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7165 tw32(GRC_MISC_CFG, val);
7167 /* restore 5701 hardware bug workaround write method */
7168 tp->write32 = write_op;
7170 /* Unfortunately, we have to delay before the PCI read back.
7171 * Some 575X chips even will not respond to a PCI cfg access
7172 * when the reset command is given to the chip.
7174 * How do these hardware designers expect things to work
7175 * properly if the PCI write is posted for a long period
7176 * of time? It is always necessary to have some method by
7177 * which a register read back can occur to push the write
7178 * out which does the reset.
7180 * For most tg3 variants the trick below was working.
7181 * Ho hum...
7183 udelay(120);
7185 /* Flush PCI posted writes. The normal MMIO registers
7186 * are inaccessible at this time so this is the only
7187 * way to make this reliably (actually, this is no longer
7188 * the case, see above). I tried to use indirect
7189 * register read/write but this upset some 5701 variants.
7191 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7193 udelay(120);
7195 if (tg3_flag(tp, PCI_EXPRESS) && tp->pcie_cap) {
7196 u16 val16;
7198 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7199 int i;
7200 u32 cfg_val;
7202 /* Wait for link training to complete. */
7203 for (i = 0; i < 5000; i++)
7204 udelay(100);
7206 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7207 pci_write_config_dword(tp->pdev, 0xc4,
7208 cfg_val | (1 << 15));
7211 /* Clear the "no snoop" and "relaxed ordering" bits. */
7212 pci_read_config_word(tp->pdev,
7213 tp->pcie_cap + PCI_EXP_DEVCTL,
7214 &val16);
7215 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7216 PCI_EXP_DEVCTL_NOSNOOP_EN);
7218 * Older PCIe devices only support the 128 byte
7219 * MPS setting. Enforce the restriction.
7221 if (!tg3_flag(tp, CPMU_PRESENT))
7222 val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7223 pci_write_config_word(tp->pdev,
7224 tp->pcie_cap + PCI_EXP_DEVCTL,
7225 val16);
7227 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7229 /* Clear error status */
7230 pci_write_config_word(tp->pdev,
7231 tp->pcie_cap + PCI_EXP_DEVSTA,
7232 PCI_EXP_DEVSTA_CED |
7233 PCI_EXP_DEVSTA_NFED |
7234 PCI_EXP_DEVSTA_FED |
7235 PCI_EXP_DEVSTA_URD);
7238 tg3_restore_pci_state(tp);
7240 tg3_flag_clear(tp, CHIP_RESETTING);
7241 tg3_flag_clear(tp, ERROR_PROCESSED);
7243 val = 0;
7244 if (tg3_flag(tp, 5780_CLASS))
7245 val = tr32(MEMARB_MODE);
7246 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7248 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7249 tg3_stop_fw(tp);
7250 tw32(0x5000, 0x400);
7253 tw32(GRC_MODE, tp->grc_mode);
7255 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7256 val = tr32(0xc4);
7258 tw32(0xc4, val | (1 << 15));
7261 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7262 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7263 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7264 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7265 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7266 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7269 if (tg3_flag(tp, ENABLE_APE))
7270 tp->mac_mode = MAC_MODE_APE_TX_EN |
7271 MAC_MODE_APE_RX_EN |
7272 MAC_MODE_TDE_ENABLE;
7274 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7275 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
7276 val = tp->mac_mode;
7277 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7278 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7279 val = tp->mac_mode;
7280 } else
7281 val = 0;
7283 tw32_f(MAC_MODE, val);
7284 udelay(40);
7286 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7288 err = tg3_poll_fw(tp);
7289 if (err)
7290 return err;
7292 tg3_mdio_start(tp);
7294 if (tg3_flag(tp, PCI_EXPRESS) &&
7295 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7296 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7297 !tg3_flag(tp, 57765_PLUS)) {
7298 val = tr32(0x7c00);
7300 tw32(0x7c00, val | (1 << 25));
7303 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
7304 val = tr32(TG3_CPMU_CLCK_ORIDE);
7305 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
7308 /* Reprobe ASF enable state. */
7309 tg3_flag_clear(tp, ENABLE_ASF);
7310 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
7311 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7312 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7313 u32 nic_cfg;
7315 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7316 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7317 tg3_flag_set(tp, ENABLE_ASF);
7318 tp->last_event_jiffies = jiffies;
7319 if (tg3_flag(tp, 5750_PLUS))
7320 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
7324 return 0;
7327 /* tp->lock is held. */
7328 static void tg3_stop_fw(struct tg3 *tp)
7330 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
7331 /* Wait for RX cpu to ACK the previous event. */
7332 tg3_wait_for_event_ack(tp);
7334 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
7336 tg3_generate_fw_event(tp);
7338 /* Wait for RX cpu to ACK this event. */
7339 tg3_wait_for_event_ack(tp);
7343 /* tp->lock is held. */
7344 static int tg3_halt(struct tg3 *tp, int kind, int silent)
7346 int err;
7348 tg3_stop_fw(tp);
7350 tg3_write_sig_pre_reset(tp, kind);
7352 tg3_abort_hw(tp, silent);
7353 err = tg3_chip_reset(tp);
7355 __tg3_set_mac_addr(tp, 0);
7357 tg3_write_sig_legacy(tp, kind);
7358 tg3_write_sig_post_reset(tp, kind);
7360 if (err)
7361 return err;
7363 return 0;
7366 #define RX_CPU_SCRATCH_BASE 0x30000
7367 #define RX_CPU_SCRATCH_SIZE 0x04000
7368 #define TX_CPU_SCRATCH_BASE 0x34000
7369 #define TX_CPU_SCRATCH_SIZE 0x04000
7371 /* tp->lock is held. */
7372 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
7374 int i;
7376 BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
7378 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7379 u32 val = tr32(GRC_VCPU_EXT_CTRL);
7381 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
7382 return 0;
7384 if (offset == RX_CPU_BASE) {
7385 for (i = 0; i < 10000; i++) {
7386 tw32(offset + CPU_STATE, 0xffffffff);
7387 tw32(offset + CPU_MODE, CPU_MODE_HALT);
7388 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7389 break;
7392 tw32(offset + CPU_STATE, 0xffffffff);
7393 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
7394 udelay(10);
7395 } else {
7396 for (i = 0; i < 10000; i++) {
7397 tw32(offset + CPU_STATE, 0xffffffff);
7398 tw32(offset + CPU_MODE, CPU_MODE_HALT);
7399 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7400 break;
7404 if (i >= 10000) {
7405 netdev_err(tp->dev, "%s timed out, %s CPU\n",
7406 __func__, offset == RX_CPU_BASE ? "RX" : "TX");
7407 return -ENODEV;
7410 /* Clear firmware's nvram arbitration. */
7411 if (tg3_flag(tp, NVRAM))
7412 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
7413 return 0;
7416 struct fw_info {
7417 unsigned int fw_base;
7418 unsigned int fw_len;
7419 const __be32 *fw_data;
7422 /* tp->lock is held. */
7423 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
7424 int cpu_scratch_size, struct fw_info *info)
7426 int err, lock_err, i;
7427 void (*write_op)(struct tg3 *, u32, u32);
7429 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
7430 netdev_err(tp->dev,
7431 "%s: Trying to load TX cpu firmware which is 5705\n",
7432 __func__);
7433 return -EINVAL;
7436 if (tg3_flag(tp, 5705_PLUS))
7437 write_op = tg3_write_mem;
7438 else
7439 write_op = tg3_write_indirect_reg32;
7441 /* It is possible that bootcode is still loading at this point.
7442 * Get the nvram lock first before halting the cpu.
7444 lock_err = tg3_nvram_lock(tp);
7445 err = tg3_halt_cpu(tp, cpu_base);
7446 if (!lock_err)
7447 tg3_nvram_unlock(tp);
7448 if (err)
7449 goto out;
7451 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
7452 write_op(tp, cpu_scratch_base + i, 0);
7453 tw32(cpu_base + CPU_STATE, 0xffffffff);
7454 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
7455 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
7456 write_op(tp, (cpu_scratch_base +
7457 (info->fw_base & 0xffff) +
7458 (i * sizeof(u32))),
7459 be32_to_cpu(info->fw_data[i]));
7461 err = 0;
7463 out:
7464 return err;
7467 /* tp->lock is held. */
7468 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
7470 struct fw_info info;
7471 const __be32 *fw_data;
7472 int err, i;
7474 fw_data = (void *)tp->fw->data;
7476 /* Firmware blob starts with version numbers, followed by
7477 start address and length. We are setting complete length.
7478 length = end_address_of_bss - start_address_of_text.
7479 Remainder is the blob to be loaded contiguously
7480 from start address. */
7482 info.fw_base = be32_to_cpu(fw_data[1]);
7483 info.fw_len = tp->fw->size - 12;
7484 info.fw_data = &fw_data[3];
7486 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
7487 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
7488 &info);
7489 if (err)
7490 return err;
7492 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
7493 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
7494 &info);
7495 if (err)
7496 return err;
7498 /* Now startup only the RX cpu. */
7499 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7500 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7502 for (i = 0; i < 5; i++) {
7503 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
7504 break;
7505 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7506 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
7507 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7508 udelay(1000);
7510 if (i >= 5) {
7511 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
7512 "should be %08x\n", __func__,
7513 tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
7514 return -ENODEV;
7516 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7517 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
7519 return 0;
7522 /* tp->lock is held. */
7523 static int tg3_load_tso_firmware(struct tg3 *tp)
7525 struct fw_info info;
7526 const __be32 *fw_data;
7527 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
7528 int err, i;
7530 if (tg3_flag(tp, HW_TSO_1) ||
7531 tg3_flag(tp, HW_TSO_2) ||
7532 tg3_flag(tp, HW_TSO_3))
7533 return 0;
7535 fw_data = (void *)tp->fw->data;
7537 /* Firmware blob starts with version numbers, followed by
7538 start address and length. We are setting complete length.
7539 length = end_address_of_bss - start_address_of_text.
7540 Remainder is the blob to be loaded contiguously
7541 from start address. */
7543 info.fw_base = be32_to_cpu(fw_data[1]);
7544 cpu_scratch_size = tp->fw_len;
7545 info.fw_len = tp->fw->size - 12;
7546 info.fw_data = &fw_data[3];
7548 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7549 cpu_base = RX_CPU_BASE;
7550 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
7551 } else {
7552 cpu_base = TX_CPU_BASE;
7553 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
7554 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
7557 err = tg3_load_firmware_cpu(tp, cpu_base,
7558 cpu_scratch_base, cpu_scratch_size,
7559 &info);
7560 if (err)
7561 return err;
7563 /* Now startup the cpu. */
7564 tw32(cpu_base + CPU_STATE, 0xffffffff);
7565 tw32_f(cpu_base + CPU_PC, info.fw_base);
7567 for (i = 0; i < 5; i++) {
7568 if (tr32(cpu_base + CPU_PC) == info.fw_base)
7569 break;
7570 tw32(cpu_base + CPU_STATE, 0xffffffff);
7571 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
7572 tw32_f(cpu_base + CPU_PC, info.fw_base);
7573 udelay(1000);
7575 if (i >= 5) {
7576 netdev_err(tp->dev,
7577 "%s fails to set CPU PC, is %08x should be %08x\n",
7578 __func__, tr32(cpu_base + CPU_PC), info.fw_base);
7579 return -ENODEV;
7581 tw32(cpu_base + CPU_STATE, 0xffffffff);
7582 tw32_f(cpu_base + CPU_MODE, 0x00000000);
7583 return 0;
7587 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7589 struct tg3 *tp = netdev_priv(dev);
7590 struct sockaddr *addr = p;
7591 int err = 0, skip_mac_1 = 0;
7593 if (!is_valid_ether_addr(addr->sa_data))
7594 return -EINVAL;
7596 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7598 if (!netif_running(dev))
7599 return 0;
7601 if (tg3_flag(tp, ENABLE_ASF)) {
7602 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7604 addr0_high = tr32(MAC_ADDR_0_HIGH);
7605 addr0_low = tr32(MAC_ADDR_0_LOW);
7606 addr1_high = tr32(MAC_ADDR_1_HIGH);
7607 addr1_low = tr32(MAC_ADDR_1_LOW);
7609 /* Skip MAC addr 1 if ASF is using it. */
7610 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7611 !(addr1_high == 0 && addr1_low == 0))
7612 skip_mac_1 = 1;
7614 spin_lock_bh(&tp->lock);
7615 __tg3_set_mac_addr(tp, skip_mac_1);
7616 spin_unlock_bh(&tp->lock);
7618 return err;
7621 /* tp->lock is held. */
7622 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7623 dma_addr_t mapping, u32 maxlen_flags,
7624 u32 nic_addr)
7626 tg3_write_mem(tp,
7627 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7628 ((u64) mapping >> 32));
7629 tg3_write_mem(tp,
7630 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7631 ((u64) mapping & 0xffffffff));
7632 tg3_write_mem(tp,
7633 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7634 maxlen_flags);
7636 if (!tg3_flag(tp, 5705_PLUS))
7637 tg3_write_mem(tp,
7638 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7639 nic_addr);
7642 static void __tg3_set_rx_mode(struct net_device *);
7643 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7645 int i;
7647 if (!tg3_flag(tp, ENABLE_TSS)) {
7648 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7649 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7650 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7651 } else {
7652 tw32(HOSTCC_TXCOL_TICKS, 0);
7653 tw32(HOSTCC_TXMAX_FRAMES, 0);
7654 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
7657 if (!tg3_flag(tp, ENABLE_RSS)) {
7658 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7659 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7660 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7661 } else {
7662 tw32(HOSTCC_RXCOL_TICKS, 0);
7663 tw32(HOSTCC_RXMAX_FRAMES, 0);
7664 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
7667 if (!tg3_flag(tp, 5705_PLUS)) {
7668 u32 val = ec->stats_block_coalesce_usecs;
7670 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7671 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7673 if (!netif_carrier_ok(tp->dev))
7674 val = 0;
7676 tw32(HOSTCC_STAT_COAL_TICKS, val);
7679 for (i = 0; i < tp->irq_cnt - 1; i++) {
7680 u32 reg;
7682 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
7683 tw32(reg, ec->rx_coalesce_usecs);
7684 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
7685 tw32(reg, ec->rx_max_coalesced_frames);
7686 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
7687 tw32(reg, ec->rx_max_coalesced_frames_irq);
7689 if (tg3_flag(tp, ENABLE_TSS)) {
7690 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
7691 tw32(reg, ec->tx_coalesce_usecs);
7692 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
7693 tw32(reg, ec->tx_max_coalesced_frames);
7694 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
7695 tw32(reg, ec->tx_max_coalesced_frames_irq);
7699 for (; i < tp->irq_max - 1; i++) {
7700 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
7701 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
7702 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7704 if (tg3_flag(tp, ENABLE_TSS)) {
7705 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
7706 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
7707 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7712 /* tp->lock is held. */
7713 static void tg3_rings_reset(struct tg3 *tp)
7715 int i;
7716 u32 stblk, txrcb, rxrcb, limit;
7717 struct tg3_napi *tnapi = &tp->napi[0];
7719 /* Disable all transmit rings but the first. */
7720 if (!tg3_flag(tp, 5705_PLUS))
7721 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
7722 else if (tg3_flag(tp, 5717_PLUS))
7723 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
7724 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7725 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
7726 else
7727 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7729 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7730 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
7731 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
7732 BDINFO_FLAGS_DISABLED);
7735 /* Disable all receive return rings but the first. */
7736 if (tg3_flag(tp, 5717_PLUS))
7737 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
7738 else if (!tg3_flag(tp, 5705_PLUS))
7739 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
7740 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7741 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7742 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
7743 else
7744 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7746 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7747 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
7748 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
7749 BDINFO_FLAGS_DISABLED);
7751 /* Disable interrupts */
7752 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
7754 /* Zero mailbox registers. */
7755 if (tg3_flag(tp, SUPPORT_MSIX)) {
7756 for (i = 1; i < tp->irq_max; i++) {
7757 tp->napi[i].tx_prod = 0;
7758 tp->napi[i].tx_cons = 0;
7759 if (tg3_flag(tp, ENABLE_TSS))
7760 tw32_mailbox(tp->napi[i].prodmbox, 0);
7761 tw32_rx_mbox(tp->napi[i].consmbox, 0);
7762 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
7764 if (!tg3_flag(tp, ENABLE_TSS))
7765 tw32_mailbox(tp->napi[0].prodmbox, 0);
7766 } else {
7767 tp->napi[0].tx_prod = 0;
7768 tp->napi[0].tx_cons = 0;
7769 tw32_mailbox(tp->napi[0].prodmbox, 0);
7770 tw32_rx_mbox(tp->napi[0].consmbox, 0);
7773 /* Make sure the NIC-based send BD rings are disabled. */
7774 if (!tg3_flag(tp, 5705_PLUS)) {
7775 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
7776 for (i = 0; i < 16; i++)
7777 tw32_tx_mbox(mbox + i * 8, 0);
7780 txrcb = NIC_SRAM_SEND_RCB;
7781 rxrcb = NIC_SRAM_RCV_RET_RCB;
7783 /* Clear status block in ram. */
7784 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7786 /* Set status block DMA address */
7787 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7788 ((u64) tnapi->status_mapping >> 32));
7789 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7790 ((u64) tnapi->status_mapping & 0xffffffff));
7792 if (tnapi->tx_ring) {
7793 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7794 (TG3_TX_RING_SIZE <<
7795 BDINFO_FLAGS_MAXLEN_SHIFT),
7796 NIC_SRAM_TX_BUFFER_DESC);
7797 txrcb += TG3_BDINFO_SIZE;
7800 if (tnapi->rx_rcb) {
7801 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7802 (tp->rx_ret_ring_mask + 1) <<
7803 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
7804 rxrcb += TG3_BDINFO_SIZE;
7807 stblk = HOSTCC_STATBLCK_RING1;
7809 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
7810 u64 mapping = (u64)tnapi->status_mapping;
7811 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
7812 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
7814 /* Clear status block in ram. */
7815 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7817 if (tnapi->tx_ring) {
7818 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7819 (TG3_TX_RING_SIZE <<
7820 BDINFO_FLAGS_MAXLEN_SHIFT),
7821 NIC_SRAM_TX_BUFFER_DESC);
7822 txrcb += TG3_BDINFO_SIZE;
7825 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7826 ((tp->rx_ret_ring_mask + 1) <<
7827 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
7829 stblk += 8;
7830 rxrcb += TG3_BDINFO_SIZE;
7834 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
7836 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
7838 if (!tg3_flag(tp, 5750_PLUS) ||
7839 tg3_flag(tp, 5780_CLASS) ||
7840 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
7841 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7842 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
7843 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7844 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
7845 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
7846 else
7847 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
7849 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
7850 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
7852 val = min(nic_rep_thresh, host_rep_thresh);
7853 tw32(RCVBDI_STD_THRESH, val);
7855 if (tg3_flag(tp, 57765_PLUS))
7856 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
7858 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7859 return;
7861 if (!tg3_flag(tp, 5705_PLUS))
7862 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
7863 else
7864 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717;
7866 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
7868 val = min(bdcache_maxcnt / 2, host_rep_thresh);
7869 tw32(RCVBDI_JUMBO_THRESH, val);
7871 if (tg3_flag(tp, 57765_PLUS))
7872 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
7875 /* tp->lock is held. */
7876 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7878 u32 val, rdmac_mode;
7879 int i, err, limit;
7880 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
7882 tg3_disable_ints(tp);
7884 tg3_stop_fw(tp);
7886 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
7888 if (tg3_flag(tp, INIT_COMPLETE))
7889 tg3_abort_hw(tp, 1);
7891 /* Enable MAC control of LPI */
7892 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
7893 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
7894 TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
7895 TG3_CPMU_EEE_LNKIDL_UART_IDL);
7897 tw32_f(TG3_CPMU_EEE_CTRL,
7898 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
7900 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
7901 TG3_CPMU_EEEMD_LPI_IN_TX |
7902 TG3_CPMU_EEEMD_LPI_IN_RX |
7903 TG3_CPMU_EEEMD_EEE_ENABLE;
7905 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
7906 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
7908 if (tg3_flag(tp, ENABLE_APE))
7909 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
7911 tw32_f(TG3_CPMU_EEE_MODE, val);
7913 tw32_f(TG3_CPMU_EEE_DBTMR1,
7914 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
7915 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
7917 tw32_f(TG3_CPMU_EEE_DBTMR2,
7918 TG3_CPMU_DBTMR2_APE_TX_2047US |
7919 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
7922 if (reset_phy)
7923 tg3_phy_reset(tp);
7925 err = tg3_chip_reset(tp);
7926 if (err)
7927 return err;
7929 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
7931 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
7932 val = tr32(TG3_CPMU_CTRL);
7933 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
7934 tw32(TG3_CPMU_CTRL, val);
7936 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7937 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7938 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7939 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7941 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
7942 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
7943 val |= CPMU_LNK_AWARE_MACCLK_6_25;
7944 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
7946 val = tr32(TG3_CPMU_HST_ACC);
7947 val &= ~CPMU_HST_ACC_MACCLK_MASK;
7948 val |= CPMU_HST_ACC_MACCLK_6_25;
7949 tw32(TG3_CPMU_HST_ACC, val);
7952 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7953 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
7954 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
7955 PCIE_PWR_MGMT_L1_THRESH_4MS;
7956 tw32(PCIE_PWR_MGMT_THRESH, val);
7958 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
7959 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
7961 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
7963 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7964 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7967 if (tg3_flag(tp, L1PLLPD_EN)) {
7968 u32 grc_mode = tr32(GRC_MODE);
7970 /* Access the lower 1K of PL PCIE block registers. */
7971 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
7972 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
7974 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
7975 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
7976 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
7978 tw32(GRC_MODE, grc_mode);
7981 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
7982 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
7983 u32 grc_mode = tr32(GRC_MODE);
7985 /* Access the lower 1K of PL PCIE block registers. */
7986 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
7987 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
7989 val = tr32(TG3_PCIE_TLDLPL_PORT +
7990 TG3_PCIE_PL_LO_PHYCTL5);
7991 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
7992 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
7994 tw32(GRC_MODE, grc_mode);
7997 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
7998 u32 grc_mode = tr32(GRC_MODE);
8000 /* Access the lower 1K of DL PCIE block registers. */
8001 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8002 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8004 val = tr32(TG3_PCIE_TLDLPL_PORT +
8005 TG3_PCIE_DL_LO_FTSMAX);
8006 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8007 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8008 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8010 tw32(GRC_MODE, grc_mode);
8013 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8014 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8015 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8016 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8019 /* This works around an issue with Athlon chipsets on
8020 * B3 tigon3 silicon. This bit has no effect on any
8021 * other revision. But do not set this on PCI Express
8022 * chips and don't even touch the clocks if the CPMU is present.
8024 if (!tg3_flag(tp, CPMU_PRESENT)) {
8025 if (!tg3_flag(tp, PCI_EXPRESS))
8026 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8027 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8030 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8031 tg3_flag(tp, PCIX_MODE)) {
8032 val = tr32(TG3PCI_PCISTATE);
8033 val |= PCISTATE_RETRY_SAME_DMA;
8034 tw32(TG3PCI_PCISTATE, val);
8037 if (tg3_flag(tp, ENABLE_APE)) {
8038 /* Allow reads and writes to the
8039 * APE register and memory space.
8041 val = tr32(TG3PCI_PCISTATE);
8042 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8043 PCISTATE_ALLOW_APE_SHMEM_WR |
8044 PCISTATE_ALLOW_APE_PSPACE_WR;
8045 tw32(TG3PCI_PCISTATE, val);
8048 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8049 /* Enable some hw fixes. */
8050 val = tr32(TG3PCI_MSI_DATA);
8051 val |= (1 << 26) | (1 << 28) | (1 << 29);
8052 tw32(TG3PCI_MSI_DATA, val);
8055 /* Descriptor ring init may make accesses to the
8056 * NIC SRAM area to setup the TX descriptors, so we
8057 * can only do this after the hardware has been
8058 * successfully reset.
8060 err = tg3_init_rings(tp);
8061 if (err)
8062 return err;
8064 if (tg3_flag(tp, 57765_PLUS)) {
8065 val = tr32(TG3PCI_DMA_RW_CTRL) &
8066 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8067 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8068 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8069 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
8070 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8071 val |= DMA_RWCTRL_TAGGED_STAT_WA;
8072 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8073 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8074 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8075 /* This value is determined during the probe time DMA
8076 * engine test, tg3_test_dma.
8078 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8081 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8082 GRC_MODE_4X_NIC_SEND_RINGS |
8083 GRC_MODE_NO_TX_PHDR_CSUM |
8084 GRC_MODE_NO_RX_PHDR_CSUM);
8085 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8087 /* Pseudo-header checksum is done by hardware logic and not
8088 * the offload processers, so make the chip do the pseudo-
8089 * header checksums on receive. For transmit it is more
8090 * convenient to do the pseudo-header checksum in software
8091 * as Linux does that on transmit for us in all cases.
8093 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8095 tw32(GRC_MODE,
8096 tp->grc_mode |
8097 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8099 /* Setup the timer prescalar register. Clock is always 66Mhz. */
8100 val = tr32(GRC_MISC_CFG);
8101 val &= ~0xff;
8102 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8103 tw32(GRC_MISC_CFG, val);
8105 /* Initialize MBUF/DESC pool. */
8106 if (tg3_flag(tp, 5750_PLUS)) {
8107 /* Do nothing. */
8108 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8109 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8110 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8111 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8112 else
8113 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8114 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8115 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8116 } else if (tg3_flag(tp, TSO_CAPABLE)) {
8117 int fw_len;
8119 fw_len = tp->fw_len;
8120 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8121 tw32(BUFMGR_MB_POOL_ADDR,
8122 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8123 tw32(BUFMGR_MB_POOL_SIZE,
8124 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8127 if (tp->dev->mtu <= ETH_DATA_LEN) {
8128 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8129 tp->bufmgr_config.mbuf_read_dma_low_water);
8130 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8131 tp->bufmgr_config.mbuf_mac_rx_low_water);
8132 tw32(BUFMGR_MB_HIGH_WATER,
8133 tp->bufmgr_config.mbuf_high_water);
8134 } else {
8135 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8136 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8137 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8138 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8139 tw32(BUFMGR_MB_HIGH_WATER,
8140 tp->bufmgr_config.mbuf_high_water_jumbo);
8142 tw32(BUFMGR_DMA_LOW_WATER,
8143 tp->bufmgr_config.dma_low_water);
8144 tw32(BUFMGR_DMA_HIGH_WATER,
8145 tp->bufmgr_config.dma_high_water);
8147 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8148 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8149 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8150 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8151 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8152 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8153 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8154 tw32(BUFMGR_MODE, val);
8155 for (i = 0; i < 2000; i++) {
8156 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8157 break;
8158 udelay(10);
8160 if (i >= 2000) {
8161 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8162 return -ENODEV;
8165 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8166 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8168 tg3_setup_rxbd_thresholds(tp);
8170 /* Initialize TG3_BDINFO's at:
8171 * RCVDBDI_STD_BD: standard eth size rx ring
8172 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
8173 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
8175 * like so:
8176 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
8177 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
8178 * ring attribute flags
8179 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
8181 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8182 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8184 * The size of each ring is fixed in the firmware, but the location is
8185 * configurable.
8187 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8188 ((u64) tpr->rx_std_mapping >> 32));
8189 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8190 ((u64) tpr->rx_std_mapping & 0xffffffff));
8191 if (!tg3_flag(tp, 5717_PLUS))
8192 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8193 NIC_SRAM_RX_BUFFER_DESC);
8195 /* Disable the mini ring */
8196 if (!tg3_flag(tp, 5705_PLUS))
8197 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8198 BDINFO_FLAGS_DISABLED);
8200 /* Program the jumbo buffer descriptor ring control
8201 * blocks on those devices that have them.
8203 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8204 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8206 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8207 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8208 ((u64) tpr->rx_jmb_mapping >> 32));
8209 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8210 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8211 val = TG3_RX_JMB_RING_SIZE(tp) <<
8212 BDINFO_FLAGS_MAXLEN_SHIFT;
8213 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8214 val | BDINFO_FLAGS_USE_EXT_RECV);
8215 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8216 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8217 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8218 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8219 } else {
8220 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8221 BDINFO_FLAGS_DISABLED);
8224 if (tg3_flag(tp, 57765_PLUS)) {
8225 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8226 val = TG3_RX_STD_MAX_SIZE_5700;
8227 else
8228 val = TG3_RX_STD_MAX_SIZE_5717;
8229 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8230 val |= (TG3_RX_STD_DMA_SZ << 2);
8231 } else
8232 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8233 } else
8234 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8236 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8238 tpr->rx_std_prod_idx = tp->rx_pending;
8239 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8241 tpr->rx_jmb_prod_idx =
8242 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8243 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8245 tg3_rings_reset(tp);
8247 /* Initialize MAC address and backoff seed. */
8248 __tg3_set_mac_addr(tp, 0);
8250 /* MTU + ethernet header + FCS + optional VLAN tag */
8251 tw32(MAC_RX_MTU_SIZE,
8252 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8254 /* The slot time is changed by tg3_setup_phy if we
8255 * run at gigabit with half duplex.
8257 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8258 (6 << TX_LENGTHS_IPG_SHIFT) |
8259 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8261 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8262 val |= tr32(MAC_TX_LENGTHS) &
8263 (TX_LENGTHS_JMB_FRM_LEN_MSK |
8264 TX_LENGTHS_CNT_DWN_VAL_MSK);
8266 tw32(MAC_TX_LENGTHS, val);
8268 /* Receive rules. */
8269 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8270 tw32(RCVLPC_CONFIG, 0x0181);
8272 /* Calculate RDMAC_MODE setting early, we need it to determine
8273 * the RCVLPC_STATE_ENABLE mask.
8275 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8276 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8277 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8278 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8279 RDMAC_MODE_LNGREAD_ENAB);
8281 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8282 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8284 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8285 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8286 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8287 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8288 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8289 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8291 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8292 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8293 if (tg3_flag(tp, TSO_CAPABLE) &&
8294 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8295 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8296 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8297 !tg3_flag(tp, IS_5788)) {
8298 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8302 if (tg3_flag(tp, PCI_EXPRESS))
8303 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8305 if (tg3_flag(tp, HW_TSO_1) ||
8306 tg3_flag(tp, HW_TSO_2) ||
8307 tg3_flag(tp, HW_TSO_3))
8308 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8310 if (tg3_flag(tp, 57765_PLUS) ||
8311 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8312 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8313 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8315 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8316 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8318 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8319 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8320 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8321 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8322 tg3_flag(tp, 57765_PLUS)) {
8323 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8324 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8325 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8326 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8327 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8328 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8329 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8330 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8331 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8333 tw32(TG3_RDMA_RSRVCTRL_REG,
8334 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8337 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8338 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8339 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8340 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8341 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8342 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8345 /* Receive/send statistics. */
8346 if (tg3_flag(tp, 5750_PLUS)) {
8347 val = tr32(RCVLPC_STATS_ENABLE);
8348 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8349 tw32(RCVLPC_STATS_ENABLE, val);
8350 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8351 tg3_flag(tp, TSO_CAPABLE)) {
8352 val = tr32(RCVLPC_STATS_ENABLE);
8353 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8354 tw32(RCVLPC_STATS_ENABLE, val);
8355 } else {
8356 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8358 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8359 tw32(SNDDATAI_STATSENAB, 0xffffff);
8360 tw32(SNDDATAI_STATSCTRL,
8361 (SNDDATAI_SCTRL_ENABLE |
8362 SNDDATAI_SCTRL_FASTUPD));
8364 /* Setup host coalescing engine. */
8365 tw32(HOSTCC_MODE, 0);
8366 for (i = 0; i < 2000; i++) {
8367 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8368 break;
8369 udelay(10);
8372 __tg3_set_coalesce(tp, &tp->coal);
8374 if (!tg3_flag(tp, 5705_PLUS)) {
8375 /* Status/statistics block address. See tg3_timer,
8376 * the tg3_periodic_fetch_stats call there, and
8377 * tg3_get_stats to see how this works for 5705/5750 chips.
8379 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8380 ((u64) tp->stats_mapping >> 32));
8381 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8382 ((u64) tp->stats_mapping & 0xffffffff));
8383 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8385 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8387 /* Clear statistics and status block memory areas */
8388 for (i = NIC_SRAM_STATS_BLK;
8389 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8390 i += sizeof(u32)) {
8391 tg3_write_mem(tp, i, 0);
8392 udelay(40);
8396 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8398 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8399 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8400 if (!tg3_flag(tp, 5705_PLUS))
8401 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8403 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8404 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
8405 /* reset to prevent losing 1st rx packet intermittently */
8406 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8407 udelay(10);
8410 if (tg3_flag(tp, ENABLE_APE))
8411 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8412 else
8413 tp->mac_mode = 0;
8414 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8415 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
8416 if (!tg3_flag(tp, 5705_PLUS) &&
8417 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8418 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8419 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8420 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8421 udelay(40);
8423 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8424 * If TG3_FLAG_IS_NIC is zero, we should read the
8425 * register to preserve the GPIO settings for LOMs. The GPIOs,
8426 * whether used as inputs or outputs, are set by boot code after
8427 * reset.
8429 if (!tg3_flag(tp, IS_NIC)) {
8430 u32 gpio_mask;
8432 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8433 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8434 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
8436 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8437 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8438 GRC_LCLCTRL_GPIO_OUTPUT3;
8440 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8441 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8443 tp->grc_local_ctrl &= ~gpio_mask;
8444 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8446 /* GPIO1 must be driven high for eeprom write protect */
8447 if (tg3_flag(tp, EEPROM_WRITE_PROT))
8448 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8449 GRC_LCLCTRL_GPIO_OUTPUT1);
8451 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8452 udelay(100);
8454 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) {
8455 val = tr32(MSGINT_MODE);
8456 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8457 tw32(MSGINT_MODE, val);
8460 if (!tg3_flag(tp, 5705_PLUS)) {
8461 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8462 udelay(40);
8465 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8466 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8467 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8468 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8469 WDMAC_MODE_LNGREAD_ENAB);
8471 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8472 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8473 if (tg3_flag(tp, TSO_CAPABLE) &&
8474 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8475 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8476 /* nothing */
8477 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8478 !tg3_flag(tp, IS_5788)) {
8479 val |= WDMAC_MODE_RX_ACCEL;
8483 /* Enable host coalescing bug fix */
8484 if (tg3_flag(tp, 5755_PLUS))
8485 val |= WDMAC_MODE_STATUS_TAG_FIX;
8487 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8488 val |= WDMAC_MODE_BURST_ALL_DATA;
8490 tw32_f(WDMAC_MODE, val);
8491 udelay(40);
8493 if (tg3_flag(tp, PCIX_MODE)) {
8494 u16 pcix_cmd;
8496 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8497 &pcix_cmd);
8498 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
8499 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8500 pcix_cmd |= PCI_X_CMD_READ_2K;
8501 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8502 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8503 pcix_cmd |= PCI_X_CMD_READ_2K;
8505 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8506 pcix_cmd);
8509 tw32_f(RDMAC_MODE, rdmac_mode);
8510 udelay(40);
8512 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8513 if (!tg3_flag(tp, 5705_PLUS))
8514 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8516 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8517 tw32(SNDDATAC_MODE,
8518 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8519 else
8520 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8522 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8523 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8524 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
8525 if (tg3_flag(tp, LRG_PROD_RING_CAP))
8526 val |= RCVDBDI_MODE_LRG_RING_SZ;
8527 tw32(RCVDBDI_MODE, val);
8528 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8529 if (tg3_flag(tp, HW_TSO_1) ||
8530 tg3_flag(tp, HW_TSO_2) ||
8531 tg3_flag(tp, HW_TSO_3))
8532 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8533 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8534 if (tg3_flag(tp, ENABLE_TSS))
8535 val |= SNDBDI_MODE_MULTI_TXQ_EN;
8536 tw32(SNDBDI_MODE, val);
8537 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8539 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8540 err = tg3_load_5701_a0_firmware_fix(tp);
8541 if (err)
8542 return err;
8545 if (tg3_flag(tp, TSO_CAPABLE)) {
8546 err = tg3_load_tso_firmware(tp);
8547 if (err)
8548 return err;
8551 tp->tx_mode = TX_MODE_ENABLE;
8553 if (tg3_flag(tp, 5755_PLUS) ||
8554 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8555 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
8557 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8558 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
8559 tp->tx_mode &= ~val;
8560 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
8563 tw32_f(MAC_TX_MODE, tp->tx_mode);
8564 udelay(100);
8566 if (tg3_flag(tp, ENABLE_RSS)) {
8567 u32 reg = MAC_RSS_INDIR_TBL_0;
8568 u8 *ent = (u8 *)&val;
8570 /* Setup the indirection table */
8571 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8572 int idx = i % sizeof(val);
8574 ent[idx] = i % (tp->irq_cnt - 1);
8575 if (idx == sizeof(val) - 1) {
8576 tw32(reg, val);
8577 reg += 4;
8581 /* Setup the "secret" hash key. */
8582 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
8583 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
8584 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
8585 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
8586 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
8587 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
8588 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
8589 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
8590 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
8591 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
8594 tp->rx_mode = RX_MODE_ENABLE;
8595 if (tg3_flag(tp, 5755_PLUS))
8596 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
8598 if (tg3_flag(tp, ENABLE_RSS))
8599 tp->rx_mode |= RX_MODE_RSS_ENABLE |
8600 RX_MODE_RSS_ITBL_HASH_BITS_7 |
8601 RX_MODE_RSS_IPV6_HASH_EN |
8602 RX_MODE_RSS_TCP_IPV6_HASH_EN |
8603 RX_MODE_RSS_IPV4_HASH_EN |
8604 RX_MODE_RSS_TCP_IPV4_HASH_EN;
8606 tw32_f(MAC_RX_MODE, tp->rx_mode);
8607 udelay(10);
8609 tw32(MAC_LED_CTRL, tp->led_ctrl);
8611 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
8612 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8613 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8614 udelay(10);
8616 tw32_f(MAC_RX_MODE, tp->rx_mode);
8617 udelay(10);
8619 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8620 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
8621 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
8622 /* Set drive transmission level to 1.2V */
8623 /* only if the signal pre-emphasis bit is not set */
8624 val = tr32(MAC_SERDES_CFG);
8625 val &= 0xfffff000;
8626 val |= 0x880;
8627 tw32(MAC_SERDES_CFG, val);
8629 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
8630 tw32(MAC_SERDES_CFG, 0x616000);
8633 /* Prevent chip from dropping frames when flow control
8634 * is enabled.
8636 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8637 val = 1;
8638 else
8639 val = 2;
8640 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
8642 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8643 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
8644 /* Use hardware link auto-negotiation */
8645 tg3_flag_set(tp, HW_AUTONEG);
8648 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8649 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
8650 u32 tmp;
8652 tmp = tr32(SERDES_RX_CTRL);
8653 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
8654 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
8655 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
8656 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8659 if (!tg3_flag(tp, USE_PHYLIB)) {
8660 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
8661 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
8662 tp->link_config.speed = tp->link_config.orig_speed;
8663 tp->link_config.duplex = tp->link_config.orig_duplex;
8664 tp->link_config.autoneg = tp->link_config.orig_autoneg;
8667 err = tg3_setup_phy(tp, 0);
8668 if (err)
8669 return err;
8671 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8672 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8673 u32 tmp;
8675 /* Clear CRC stats. */
8676 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
8677 tg3_writephy(tp, MII_TG3_TEST1,
8678 tmp | MII_TG3_TEST1_CRC_EN);
8679 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
8684 __tg3_set_rx_mode(tp->dev);
8686 /* Initialize receive rules. */
8687 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
8688 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
8689 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
8690 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
8692 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
8693 limit = 8;
8694 else
8695 limit = 16;
8696 if (tg3_flag(tp, ENABLE_ASF))
8697 limit -= 4;
8698 switch (limit) {
8699 case 16:
8700 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
8701 case 15:
8702 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
8703 case 14:
8704 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
8705 case 13:
8706 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
8707 case 12:
8708 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
8709 case 11:
8710 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
8711 case 10:
8712 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
8713 case 9:
8714 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
8715 case 8:
8716 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
8717 case 7:
8718 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
8719 case 6:
8720 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
8721 case 5:
8722 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
8723 case 4:
8724 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
8725 case 3:
8726 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
8727 case 2:
8728 case 1:
8730 default:
8731 break;
8734 if (tg3_flag(tp, ENABLE_APE))
8735 /* Write our heartbeat update interval to APE. */
8736 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
8737 APE_HOST_HEARTBEAT_INT_DISABLE);
8739 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
8741 return 0;
8744 /* Called at device open time to get the chip ready for
8745 * packet processing. Invoked with tp->lock held.
8747 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
8749 tg3_switch_clocks(tp);
8751 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
8753 return tg3_reset_hw(tp, reset_phy);
8756 #define TG3_STAT_ADD32(PSTAT, REG) \
8757 do { u32 __val = tr32(REG); \
8758 (PSTAT)->low += __val; \
8759 if ((PSTAT)->low < __val) \
8760 (PSTAT)->high += 1; \
8761 } while (0)
8763 static void tg3_periodic_fetch_stats(struct tg3 *tp)
8765 struct tg3_hw_stats *sp = tp->hw_stats;
8767 if (!netif_carrier_ok(tp->dev))
8768 return;
8770 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
8771 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
8772 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
8773 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
8774 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
8775 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
8776 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
8777 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
8778 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
8779 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
8780 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
8781 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
8782 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
8784 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
8785 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
8786 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
8787 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
8788 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
8789 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
8790 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
8791 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
8792 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
8793 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
8794 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
8795 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
8796 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
8797 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
8799 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
8800 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
8801 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
8802 tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
8803 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
8804 } else {
8805 u32 val = tr32(HOSTCC_FLOW_ATTN);
8806 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
8807 if (val) {
8808 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
8809 sp->rx_discards.low += val;
8810 if (sp->rx_discards.low < val)
8811 sp->rx_discards.high += 1;
8813 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
8815 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
8818 static void tg3_timer(unsigned long __opaque)
8820 struct tg3 *tp = (struct tg3 *) __opaque;
8822 if (tp->irq_sync)
8823 goto restart_timer;
8825 spin_lock(&tp->lock);
8827 if (!tg3_flag(tp, TAGGED_STATUS)) {
8828 /* All of this garbage is because when using non-tagged
8829 * IRQ status the mailbox/status_block protocol the chip
8830 * uses with the cpu is race prone.
8832 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
8833 tw32(GRC_LOCAL_CTRL,
8834 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
8835 } else {
8836 tw32(HOSTCC_MODE, tp->coalesce_mode |
8837 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
8840 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
8841 tg3_flag_set(tp, RESTART_TIMER);
8842 spin_unlock(&tp->lock);
8843 schedule_work(&tp->reset_task);
8844 return;
8848 /* This part only runs once per second. */
8849 if (!--tp->timer_counter) {
8850 if (tg3_flag(tp, 5705_PLUS))
8851 tg3_periodic_fetch_stats(tp);
8853 if (tp->setlpicnt && !--tp->setlpicnt)
8854 tg3_phy_eee_enable(tp);
8856 if (tg3_flag(tp, USE_LINKCHG_REG)) {
8857 u32 mac_stat;
8858 int phy_event;
8860 mac_stat = tr32(MAC_STATUS);
8862 phy_event = 0;
8863 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
8864 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
8865 phy_event = 1;
8866 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
8867 phy_event = 1;
8869 if (phy_event)
8870 tg3_setup_phy(tp, 0);
8871 } else if (tg3_flag(tp, POLL_SERDES)) {
8872 u32 mac_stat = tr32(MAC_STATUS);
8873 int need_setup = 0;
8875 if (netif_carrier_ok(tp->dev) &&
8876 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
8877 need_setup = 1;
8879 if (!netif_carrier_ok(tp->dev) &&
8880 (mac_stat & (MAC_STATUS_PCS_SYNCED |
8881 MAC_STATUS_SIGNAL_DET))) {
8882 need_setup = 1;
8884 if (need_setup) {
8885 if (!tp->serdes_counter) {
8886 tw32_f(MAC_MODE,
8887 (tp->mac_mode &
8888 ~MAC_MODE_PORT_MODE_MASK));
8889 udelay(40);
8890 tw32_f(MAC_MODE, tp->mac_mode);
8891 udelay(40);
8893 tg3_setup_phy(tp, 0);
8895 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8896 tg3_flag(tp, 5780_CLASS)) {
8897 tg3_serdes_parallel_detect(tp);
8900 tp->timer_counter = tp->timer_multiplier;
8903 /* Heartbeat is only sent once every 2 seconds.
8905 * The heartbeat is to tell the ASF firmware that the host
8906 * driver is still alive. In the event that the OS crashes,
8907 * ASF needs to reset the hardware to free up the FIFO space
8908 * that may be filled with rx packets destined for the host.
8909 * If the FIFO is full, ASF will no longer function properly.
8911 * Unintended resets have been reported on real time kernels
8912 * where the timer doesn't run on time. Netpoll will also have
8913 * same problem.
8915 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
8916 * to check the ring condition when the heartbeat is expiring
8917 * before doing the reset. This will prevent most unintended
8918 * resets.
8920 if (!--tp->asf_counter) {
8921 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
8922 tg3_wait_for_event_ack(tp);
8924 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
8925 FWCMD_NICDRV_ALIVE3);
8926 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
8927 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
8928 TG3_FW_UPDATE_TIMEOUT_SEC);
8930 tg3_generate_fw_event(tp);
8932 tp->asf_counter = tp->asf_multiplier;
8935 spin_unlock(&tp->lock);
8937 restart_timer:
8938 tp->timer.expires = jiffies + tp->timer_offset;
8939 add_timer(&tp->timer);
8942 static int tg3_request_irq(struct tg3 *tp, int irq_num)
8944 irq_handler_t fn;
8945 unsigned long flags;
8946 char *name;
8947 struct tg3_napi *tnapi = &tp->napi[irq_num];
8949 if (tp->irq_cnt == 1)
8950 name = tp->dev->name;
8951 else {
8952 name = &tnapi->irq_lbl[0];
8953 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
8954 name[IFNAMSIZ-1] = 0;
8957 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
8958 fn = tg3_msi;
8959 if (tg3_flag(tp, 1SHOT_MSI))
8960 fn = tg3_msi_1shot;
8961 flags = 0;
8962 } else {
8963 fn = tg3_interrupt;
8964 if (tg3_flag(tp, TAGGED_STATUS))
8965 fn = tg3_interrupt_tagged;
8966 flags = IRQF_SHARED;
8969 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
8972 static int tg3_test_interrupt(struct tg3 *tp)
8974 struct tg3_napi *tnapi = &tp->napi[0];
8975 struct net_device *dev = tp->dev;
8976 int err, i, intr_ok = 0;
8977 u32 val;
8979 if (!netif_running(dev))
8980 return -ENODEV;
8982 tg3_disable_ints(tp);
8984 free_irq(tnapi->irq_vec, tnapi);
8987 * Turn off MSI one shot mode. Otherwise this test has no
8988 * observable way to know whether the interrupt was delivered.
8990 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
8991 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
8992 tw32(MSGINT_MODE, val);
8995 err = request_irq(tnapi->irq_vec, tg3_test_isr,
8996 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
8997 if (err)
8998 return err;
9000 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9001 tg3_enable_ints(tp);
9003 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9004 tnapi->coal_now);
9006 for (i = 0; i < 5; i++) {
9007 u32 int_mbox, misc_host_ctrl;
9009 int_mbox = tr32_mailbox(tnapi->int_mbox);
9010 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9012 if ((int_mbox != 0) ||
9013 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9014 intr_ok = 1;
9015 break;
9018 msleep(10);
9021 tg3_disable_ints(tp);
9023 free_irq(tnapi->irq_vec, tnapi);
9025 err = tg3_request_irq(tp, 0);
9027 if (err)
9028 return err;
9030 if (intr_ok) {
9031 /* Reenable MSI one shot mode. */
9032 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9033 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9034 tw32(MSGINT_MODE, val);
9036 return 0;
9039 return -EIO;
9042 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9043 * successfully restored
9045 static int tg3_test_msi(struct tg3 *tp)
9047 int err;
9048 u16 pci_cmd;
9050 if (!tg3_flag(tp, USING_MSI))
9051 return 0;
9053 /* Turn off SERR reporting in case MSI terminates with Master
9054 * Abort.
9056 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9057 pci_write_config_word(tp->pdev, PCI_COMMAND,
9058 pci_cmd & ~PCI_COMMAND_SERR);
9060 err = tg3_test_interrupt(tp);
9062 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9064 if (!err)
9065 return 0;
9067 /* other failures */
9068 if (err != -EIO)
9069 return err;
9071 /* MSI test failed, go back to INTx mode */
9072 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9073 "to INTx mode. Please report this failure to the PCI "
9074 "maintainer and include system chipset information\n");
9076 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9078 pci_disable_msi(tp->pdev);
9080 tg3_flag_clear(tp, USING_MSI);
9081 tp->napi[0].irq_vec = tp->pdev->irq;
9083 err = tg3_request_irq(tp, 0);
9084 if (err)
9085 return err;
9087 /* Need to reset the chip because the MSI cycle may have terminated
9088 * with Master Abort.
9090 tg3_full_lock(tp, 1);
9092 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9093 err = tg3_init_hw(tp, 1);
9095 tg3_full_unlock(tp);
9097 if (err)
9098 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9100 return err;
9103 static int tg3_request_firmware(struct tg3 *tp)
9105 const __be32 *fw_data;
9107 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9108 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9109 tp->fw_needed);
9110 return -ENOENT;
9113 fw_data = (void *)tp->fw->data;
9115 /* Firmware blob starts with version numbers, followed by
9116 * start address and _full_ length including BSS sections
9117 * (which must be longer than the actual data, of course
9120 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
9121 if (tp->fw_len < (tp->fw->size - 12)) {
9122 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9123 tp->fw_len, tp->fw_needed);
9124 release_firmware(tp->fw);
9125 tp->fw = NULL;
9126 return -EINVAL;
9129 /* We no longer need firmware; we have it. */
9130 tp->fw_needed = NULL;
9131 return 0;
9134 static bool tg3_enable_msix(struct tg3 *tp)
9136 int i, rc, cpus = num_online_cpus();
9137 struct msix_entry msix_ent[tp->irq_max];
9139 if (cpus == 1)
9140 /* Just fallback to the simpler MSI mode. */
9141 return false;
9144 * We want as many rx rings enabled as there are cpus.
9145 * The first MSIX vector only deals with link interrupts, etc,
9146 * so we add one to the number of vectors we are requesting.
9148 tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
9150 for (i = 0; i < tp->irq_max; i++) {
9151 msix_ent[i].entry = i;
9152 msix_ent[i].vector = 0;
9155 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9156 if (rc < 0) {
9157 return false;
9158 } else if (rc != 0) {
9159 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9160 return false;
9161 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9162 tp->irq_cnt, rc);
9163 tp->irq_cnt = rc;
9166 for (i = 0; i < tp->irq_max; i++)
9167 tp->napi[i].irq_vec = msix_ent[i].vector;
9169 netif_set_real_num_tx_queues(tp->dev, 1);
9170 rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9171 if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9172 pci_disable_msix(tp->pdev);
9173 return false;
9176 if (tp->irq_cnt > 1) {
9177 tg3_flag_set(tp, ENABLE_RSS);
9179 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9180 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9181 tg3_flag_set(tp, ENABLE_TSS);
9182 netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9186 return true;
9189 static void tg3_ints_init(struct tg3 *tp)
9191 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9192 !tg3_flag(tp, TAGGED_STATUS)) {
9193 /* All MSI supporting chips should support tagged
9194 * status. Assert that this is the case.
9196 netdev_warn(tp->dev,
9197 "MSI without TAGGED_STATUS? Not using MSI\n");
9198 goto defcfg;
9201 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9202 tg3_flag_set(tp, USING_MSIX);
9203 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9204 tg3_flag_set(tp, USING_MSI);
9206 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9207 u32 msi_mode = tr32(MSGINT_MODE);
9208 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9209 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9210 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9212 defcfg:
9213 if (!tg3_flag(tp, USING_MSIX)) {
9214 tp->irq_cnt = 1;
9215 tp->napi[0].irq_vec = tp->pdev->irq;
9216 netif_set_real_num_tx_queues(tp->dev, 1);
9217 netif_set_real_num_rx_queues(tp->dev, 1);
9221 static void tg3_ints_fini(struct tg3 *tp)
9223 if (tg3_flag(tp, USING_MSIX))
9224 pci_disable_msix(tp->pdev);
9225 else if (tg3_flag(tp, USING_MSI))
9226 pci_disable_msi(tp->pdev);
9227 tg3_flag_clear(tp, USING_MSI);
9228 tg3_flag_clear(tp, USING_MSIX);
9229 tg3_flag_clear(tp, ENABLE_RSS);
9230 tg3_flag_clear(tp, ENABLE_TSS);
9233 static int tg3_open(struct net_device *dev)
9235 struct tg3 *tp = netdev_priv(dev);
9236 int i, err;
9238 if (tp->fw_needed) {
9239 err = tg3_request_firmware(tp);
9240 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9241 if (err)
9242 return err;
9243 } else if (err) {
9244 netdev_warn(tp->dev, "TSO capability disabled\n");
9245 tg3_flag_clear(tp, TSO_CAPABLE);
9246 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
9247 netdev_notice(tp->dev, "TSO capability restored\n");
9248 tg3_flag_set(tp, TSO_CAPABLE);
9252 netif_carrier_off(tp->dev);
9254 err = tg3_power_up(tp);
9255 if (err)
9256 return err;
9258 tg3_full_lock(tp, 0);
9260 tg3_disable_ints(tp);
9261 tg3_flag_clear(tp, INIT_COMPLETE);
9263 tg3_full_unlock(tp);
9266 * Setup interrupts first so we know how
9267 * many NAPI resources to allocate
9269 tg3_ints_init(tp);
9271 /* The placement of this call is tied
9272 * to the setup and use of Host TX descriptors.
9274 err = tg3_alloc_consistent(tp);
9275 if (err)
9276 goto err_out1;
9278 tg3_napi_init(tp);
9280 tg3_napi_enable(tp);
9282 for (i = 0; i < tp->irq_cnt; i++) {
9283 struct tg3_napi *tnapi = &tp->napi[i];
9284 err = tg3_request_irq(tp, i);
9285 if (err) {
9286 for (i--; i >= 0; i--)
9287 free_irq(tnapi->irq_vec, tnapi);
9288 break;
9292 if (err)
9293 goto err_out2;
9295 tg3_full_lock(tp, 0);
9297 err = tg3_init_hw(tp, 1);
9298 if (err) {
9299 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9300 tg3_free_rings(tp);
9301 } else {
9302 if (tg3_flag(tp, TAGGED_STATUS))
9303 tp->timer_offset = HZ;
9304 else
9305 tp->timer_offset = HZ / 10;
9307 BUG_ON(tp->timer_offset > HZ);
9308 tp->timer_counter = tp->timer_multiplier =
9309 (HZ / tp->timer_offset);
9310 tp->asf_counter = tp->asf_multiplier =
9311 ((HZ / tp->timer_offset) * 2);
9313 init_timer(&tp->timer);
9314 tp->timer.expires = jiffies + tp->timer_offset;
9315 tp->timer.data = (unsigned long) tp;
9316 tp->timer.function = tg3_timer;
9319 tg3_full_unlock(tp);
9321 if (err)
9322 goto err_out3;
9324 if (tg3_flag(tp, USING_MSI)) {
9325 err = tg3_test_msi(tp);
9327 if (err) {
9328 tg3_full_lock(tp, 0);
9329 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9330 tg3_free_rings(tp);
9331 tg3_full_unlock(tp);
9333 goto err_out2;
9336 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9337 u32 val = tr32(PCIE_TRANSACTION_CFG);
9339 tw32(PCIE_TRANSACTION_CFG,
9340 val | PCIE_TRANS_CFG_1SHOT_MSI);
9344 tg3_phy_start(tp);
9346 tg3_full_lock(tp, 0);
9348 add_timer(&tp->timer);
9349 tg3_flag_set(tp, INIT_COMPLETE);
9350 tg3_enable_ints(tp);
9352 tg3_full_unlock(tp);
9354 netif_tx_start_all_queues(dev);
9357 * Reset loopback feature if it was turned on while the device was down
9358 * make sure that it's installed properly now.
9360 if (dev->features & NETIF_F_LOOPBACK)
9361 tg3_set_loopback(dev, dev->features);
9363 return 0;
9365 err_out3:
9366 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9367 struct tg3_napi *tnapi = &tp->napi[i];
9368 free_irq(tnapi->irq_vec, tnapi);
9371 err_out2:
9372 tg3_napi_disable(tp);
9373 tg3_napi_fini(tp);
9374 tg3_free_consistent(tp);
9376 err_out1:
9377 tg3_ints_fini(tp);
9378 return err;
9381 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
9382 struct rtnl_link_stats64 *);
9383 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
9385 static int tg3_close(struct net_device *dev)
9387 int i;
9388 struct tg3 *tp = netdev_priv(dev);
9390 tg3_napi_disable(tp);
9391 cancel_work_sync(&tp->reset_task);
9393 netif_tx_stop_all_queues(dev);
9395 del_timer_sync(&tp->timer);
9397 tg3_phy_stop(tp);
9399 tg3_full_lock(tp, 1);
9401 tg3_disable_ints(tp);
9403 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9404 tg3_free_rings(tp);
9405 tg3_flag_clear(tp, INIT_COMPLETE);
9407 tg3_full_unlock(tp);
9409 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9410 struct tg3_napi *tnapi = &tp->napi[i];
9411 free_irq(tnapi->irq_vec, tnapi);
9414 tg3_ints_fini(tp);
9416 tg3_get_stats64(tp->dev, &tp->net_stats_prev);
9418 memcpy(&tp->estats_prev, tg3_get_estats(tp),
9419 sizeof(tp->estats_prev));
9421 tg3_napi_fini(tp);
9423 tg3_free_consistent(tp);
9425 tg3_power_down(tp);
9427 netif_carrier_off(tp->dev);
9429 return 0;
9432 static inline u64 get_stat64(tg3_stat64_t *val)
9434 return ((u64)val->high << 32) | ((u64)val->low);
9437 static u64 calc_crc_errors(struct tg3 *tp)
9439 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9441 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9442 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9443 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9444 u32 val;
9446 spin_lock_bh(&tp->lock);
9447 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9448 tg3_writephy(tp, MII_TG3_TEST1,
9449 val | MII_TG3_TEST1_CRC_EN);
9450 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
9451 } else
9452 val = 0;
9453 spin_unlock_bh(&tp->lock);
9455 tp->phy_crc_errors += val;
9457 return tp->phy_crc_errors;
9460 return get_stat64(&hw_stats->rx_fcs_errors);
9463 #define ESTAT_ADD(member) \
9464 estats->member = old_estats->member + \
9465 get_stat64(&hw_stats->member)
9467 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
9469 struct tg3_ethtool_stats *estats = &tp->estats;
9470 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9471 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9473 if (!hw_stats)
9474 return old_estats;
9476 ESTAT_ADD(rx_octets);
9477 ESTAT_ADD(rx_fragments);
9478 ESTAT_ADD(rx_ucast_packets);
9479 ESTAT_ADD(rx_mcast_packets);
9480 ESTAT_ADD(rx_bcast_packets);
9481 ESTAT_ADD(rx_fcs_errors);
9482 ESTAT_ADD(rx_align_errors);
9483 ESTAT_ADD(rx_xon_pause_rcvd);
9484 ESTAT_ADD(rx_xoff_pause_rcvd);
9485 ESTAT_ADD(rx_mac_ctrl_rcvd);
9486 ESTAT_ADD(rx_xoff_entered);
9487 ESTAT_ADD(rx_frame_too_long_errors);
9488 ESTAT_ADD(rx_jabbers);
9489 ESTAT_ADD(rx_undersize_packets);
9490 ESTAT_ADD(rx_in_length_errors);
9491 ESTAT_ADD(rx_out_length_errors);
9492 ESTAT_ADD(rx_64_or_less_octet_packets);
9493 ESTAT_ADD(rx_65_to_127_octet_packets);
9494 ESTAT_ADD(rx_128_to_255_octet_packets);
9495 ESTAT_ADD(rx_256_to_511_octet_packets);
9496 ESTAT_ADD(rx_512_to_1023_octet_packets);
9497 ESTAT_ADD(rx_1024_to_1522_octet_packets);
9498 ESTAT_ADD(rx_1523_to_2047_octet_packets);
9499 ESTAT_ADD(rx_2048_to_4095_octet_packets);
9500 ESTAT_ADD(rx_4096_to_8191_octet_packets);
9501 ESTAT_ADD(rx_8192_to_9022_octet_packets);
9503 ESTAT_ADD(tx_octets);
9504 ESTAT_ADD(tx_collisions);
9505 ESTAT_ADD(tx_xon_sent);
9506 ESTAT_ADD(tx_xoff_sent);
9507 ESTAT_ADD(tx_flow_control);
9508 ESTAT_ADD(tx_mac_errors);
9509 ESTAT_ADD(tx_single_collisions);
9510 ESTAT_ADD(tx_mult_collisions);
9511 ESTAT_ADD(tx_deferred);
9512 ESTAT_ADD(tx_excessive_collisions);
9513 ESTAT_ADD(tx_late_collisions);
9514 ESTAT_ADD(tx_collide_2times);
9515 ESTAT_ADD(tx_collide_3times);
9516 ESTAT_ADD(tx_collide_4times);
9517 ESTAT_ADD(tx_collide_5times);
9518 ESTAT_ADD(tx_collide_6times);
9519 ESTAT_ADD(tx_collide_7times);
9520 ESTAT_ADD(tx_collide_8times);
9521 ESTAT_ADD(tx_collide_9times);
9522 ESTAT_ADD(tx_collide_10times);
9523 ESTAT_ADD(tx_collide_11times);
9524 ESTAT_ADD(tx_collide_12times);
9525 ESTAT_ADD(tx_collide_13times);
9526 ESTAT_ADD(tx_collide_14times);
9527 ESTAT_ADD(tx_collide_15times);
9528 ESTAT_ADD(tx_ucast_packets);
9529 ESTAT_ADD(tx_mcast_packets);
9530 ESTAT_ADD(tx_bcast_packets);
9531 ESTAT_ADD(tx_carrier_sense_errors);
9532 ESTAT_ADD(tx_discards);
9533 ESTAT_ADD(tx_errors);
9535 ESTAT_ADD(dma_writeq_full);
9536 ESTAT_ADD(dma_write_prioq_full);
9537 ESTAT_ADD(rxbds_empty);
9538 ESTAT_ADD(rx_discards);
9539 ESTAT_ADD(rx_errors);
9540 ESTAT_ADD(rx_threshold_hit);
9542 ESTAT_ADD(dma_readq_full);
9543 ESTAT_ADD(dma_read_prioq_full);
9544 ESTAT_ADD(tx_comp_queue_full);
9546 ESTAT_ADD(ring_set_send_prod_index);
9547 ESTAT_ADD(ring_status_update);
9548 ESTAT_ADD(nic_irqs);
9549 ESTAT_ADD(nic_avoided_irqs);
9550 ESTAT_ADD(nic_tx_threshold_hit);
9552 ESTAT_ADD(mbuf_lwm_thresh_hit);
9554 return estats;
9557 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
9558 struct rtnl_link_stats64 *stats)
9560 struct tg3 *tp = netdev_priv(dev);
9561 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
9562 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9564 if (!hw_stats)
9565 return old_stats;
9567 stats->rx_packets = old_stats->rx_packets +
9568 get_stat64(&hw_stats->rx_ucast_packets) +
9569 get_stat64(&hw_stats->rx_mcast_packets) +
9570 get_stat64(&hw_stats->rx_bcast_packets);
9572 stats->tx_packets = old_stats->tx_packets +
9573 get_stat64(&hw_stats->tx_ucast_packets) +
9574 get_stat64(&hw_stats->tx_mcast_packets) +
9575 get_stat64(&hw_stats->tx_bcast_packets);
9577 stats->rx_bytes = old_stats->rx_bytes +
9578 get_stat64(&hw_stats->rx_octets);
9579 stats->tx_bytes = old_stats->tx_bytes +
9580 get_stat64(&hw_stats->tx_octets);
9582 stats->rx_errors = old_stats->rx_errors +
9583 get_stat64(&hw_stats->rx_errors);
9584 stats->tx_errors = old_stats->tx_errors +
9585 get_stat64(&hw_stats->tx_errors) +
9586 get_stat64(&hw_stats->tx_mac_errors) +
9587 get_stat64(&hw_stats->tx_carrier_sense_errors) +
9588 get_stat64(&hw_stats->tx_discards);
9590 stats->multicast = old_stats->multicast +
9591 get_stat64(&hw_stats->rx_mcast_packets);
9592 stats->collisions = old_stats->collisions +
9593 get_stat64(&hw_stats->tx_collisions);
9595 stats->rx_length_errors = old_stats->rx_length_errors +
9596 get_stat64(&hw_stats->rx_frame_too_long_errors) +
9597 get_stat64(&hw_stats->rx_undersize_packets);
9599 stats->rx_over_errors = old_stats->rx_over_errors +
9600 get_stat64(&hw_stats->rxbds_empty);
9601 stats->rx_frame_errors = old_stats->rx_frame_errors +
9602 get_stat64(&hw_stats->rx_align_errors);
9603 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
9604 get_stat64(&hw_stats->tx_discards);
9605 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
9606 get_stat64(&hw_stats->tx_carrier_sense_errors);
9608 stats->rx_crc_errors = old_stats->rx_crc_errors +
9609 calc_crc_errors(tp);
9611 stats->rx_missed_errors = old_stats->rx_missed_errors +
9612 get_stat64(&hw_stats->rx_discards);
9614 stats->rx_dropped = tp->rx_dropped;
9616 return stats;
9619 static inline u32 calc_crc(unsigned char *buf, int len)
9621 u32 reg;
9622 u32 tmp;
9623 int j, k;
9625 reg = 0xffffffff;
9627 for (j = 0; j < len; j++) {
9628 reg ^= buf[j];
9630 for (k = 0; k < 8; k++) {
9631 tmp = reg & 0x01;
9633 reg >>= 1;
9635 if (tmp)
9636 reg ^= 0xedb88320;
9640 return ~reg;
9643 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9645 /* accept or reject all multicast frames */
9646 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9647 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9648 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9649 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9652 static void __tg3_set_rx_mode(struct net_device *dev)
9654 struct tg3 *tp = netdev_priv(dev);
9655 u32 rx_mode;
9657 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9658 RX_MODE_KEEP_VLAN_TAG);
9660 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9661 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9662 * flag clear.
9664 if (!tg3_flag(tp, ENABLE_ASF))
9665 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9666 #endif
9668 if (dev->flags & IFF_PROMISC) {
9669 /* Promiscuous mode. */
9670 rx_mode |= RX_MODE_PROMISC;
9671 } else if (dev->flags & IFF_ALLMULTI) {
9672 /* Accept all multicast. */
9673 tg3_set_multi(tp, 1);
9674 } else if (netdev_mc_empty(dev)) {
9675 /* Reject all multicast. */
9676 tg3_set_multi(tp, 0);
9677 } else {
9678 /* Accept one or more multicast(s). */
9679 struct netdev_hw_addr *ha;
9680 u32 mc_filter[4] = { 0, };
9681 u32 regidx;
9682 u32 bit;
9683 u32 crc;
9685 netdev_for_each_mc_addr(ha, dev) {
9686 crc = calc_crc(ha->addr, ETH_ALEN);
9687 bit = ~crc & 0x7f;
9688 regidx = (bit & 0x60) >> 5;
9689 bit &= 0x1f;
9690 mc_filter[regidx] |= (1 << bit);
9693 tw32(MAC_HASH_REG_0, mc_filter[0]);
9694 tw32(MAC_HASH_REG_1, mc_filter[1]);
9695 tw32(MAC_HASH_REG_2, mc_filter[2]);
9696 tw32(MAC_HASH_REG_3, mc_filter[3]);
9699 if (rx_mode != tp->rx_mode) {
9700 tp->rx_mode = rx_mode;
9701 tw32_f(MAC_RX_MODE, rx_mode);
9702 udelay(10);
9706 static void tg3_set_rx_mode(struct net_device *dev)
9708 struct tg3 *tp = netdev_priv(dev);
9710 if (!netif_running(dev))
9711 return;
9713 tg3_full_lock(tp, 0);
9714 __tg3_set_rx_mode(dev);
9715 tg3_full_unlock(tp);
9718 static int tg3_get_regs_len(struct net_device *dev)
9720 return TG3_REG_BLK_SIZE;
9723 static void tg3_get_regs(struct net_device *dev,
9724 struct ethtool_regs *regs, void *_p)
9726 struct tg3 *tp = netdev_priv(dev);
9728 regs->version = 0;
9730 memset(_p, 0, TG3_REG_BLK_SIZE);
9732 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9733 return;
9735 tg3_full_lock(tp, 0);
9737 tg3_dump_legacy_regs(tp, (u32 *)_p);
9739 tg3_full_unlock(tp);
9742 static int tg3_get_eeprom_len(struct net_device *dev)
9744 struct tg3 *tp = netdev_priv(dev);
9746 return tp->nvram_size;
9749 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9751 struct tg3 *tp = netdev_priv(dev);
9752 int ret;
9753 u8 *pd;
9754 u32 i, offset, len, b_offset, b_count;
9755 __be32 val;
9757 if (tg3_flag(tp, NO_NVRAM))
9758 return -EINVAL;
9760 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9761 return -EAGAIN;
9763 offset = eeprom->offset;
9764 len = eeprom->len;
9765 eeprom->len = 0;
9767 eeprom->magic = TG3_EEPROM_MAGIC;
9769 if (offset & 3) {
9770 /* adjustments to start on required 4 byte boundary */
9771 b_offset = offset & 3;
9772 b_count = 4 - b_offset;
9773 if (b_count > len) {
9774 /* i.e. offset=1 len=2 */
9775 b_count = len;
9777 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
9778 if (ret)
9779 return ret;
9780 memcpy(data, ((char *)&val) + b_offset, b_count);
9781 len -= b_count;
9782 offset += b_count;
9783 eeprom->len += b_count;
9786 /* read bytes up to the last 4 byte boundary */
9787 pd = &data[eeprom->len];
9788 for (i = 0; i < (len - (len & 3)); i += 4) {
9789 ret = tg3_nvram_read_be32(tp, offset + i, &val);
9790 if (ret) {
9791 eeprom->len += i;
9792 return ret;
9794 memcpy(pd + i, &val, 4);
9796 eeprom->len += i;
9798 if (len & 3) {
9799 /* read last bytes not ending on 4 byte boundary */
9800 pd = &data[eeprom->len];
9801 b_count = len & 3;
9802 b_offset = offset + len - b_count;
9803 ret = tg3_nvram_read_be32(tp, b_offset, &val);
9804 if (ret)
9805 return ret;
9806 memcpy(pd, &val, b_count);
9807 eeprom->len += b_count;
9809 return 0;
9812 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
9814 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9816 struct tg3 *tp = netdev_priv(dev);
9817 int ret;
9818 u32 offset, len, b_offset, odd_len;
9819 u8 *buf;
9820 __be32 start, end;
9822 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9823 return -EAGAIN;
9825 if (tg3_flag(tp, NO_NVRAM) ||
9826 eeprom->magic != TG3_EEPROM_MAGIC)
9827 return -EINVAL;
9829 offset = eeprom->offset;
9830 len = eeprom->len;
9832 if ((b_offset = (offset & 3))) {
9833 /* adjustments to start on required 4 byte boundary */
9834 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
9835 if (ret)
9836 return ret;
9837 len += b_offset;
9838 offset &= ~3;
9839 if (len < 4)
9840 len = 4;
9843 odd_len = 0;
9844 if (len & 3) {
9845 /* adjustments to end on required 4 byte boundary */
9846 odd_len = 1;
9847 len = (len + 3) & ~3;
9848 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
9849 if (ret)
9850 return ret;
9853 buf = data;
9854 if (b_offset || odd_len) {
9855 buf = kmalloc(len, GFP_KERNEL);
9856 if (!buf)
9857 return -ENOMEM;
9858 if (b_offset)
9859 memcpy(buf, &start, 4);
9860 if (odd_len)
9861 memcpy(buf+len-4, &end, 4);
9862 memcpy(buf + b_offset, data, eeprom->len);
9865 ret = tg3_nvram_write_block(tp, offset, len, buf);
9867 if (buf != data)
9868 kfree(buf);
9870 return ret;
9873 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9875 struct tg3 *tp = netdev_priv(dev);
9877 if (tg3_flag(tp, USE_PHYLIB)) {
9878 struct phy_device *phydev;
9879 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
9880 return -EAGAIN;
9881 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9882 return phy_ethtool_gset(phydev, cmd);
9885 cmd->supported = (SUPPORTED_Autoneg);
9887 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
9888 cmd->supported |= (SUPPORTED_1000baseT_Half |
9889 SUPPORTED_1000baseT_Full);
9891 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
9892 cmd->supported |= (SUPPORTED_100baseT_Half |
9893 SUPPORTED_100baseT_Full |
9894 SUPPORTED_10baseT_Half |
9895 SUPPORTED_10baseT_Full |
9896 SUPPORTED_TP);
9897 cmd->port = PORT_TP;
9898 } else {
9899 cmd->supported |= SUPPORTED_FIBRE;
9900 cmd->port = PORT_FIBRE;
9903 cmd->advertising = tp->link_config.advertising;
9904 if (netif_running(dev)) {
9905 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
9906 cmd->duplex = tp->link_config.active_duplex;
9907 } else {
9908 ethtool_cmd_speed_set(cmd, SPEED_INVALID);
9909 cmd->duplex = DUPLEX_INVALID;
9911 cmd->phy_address = tp->phy_addr;
9912 cmd->transceiver = XCVR_INTERNAL;
9913 cmd->autoneg = tp->link_config.autoneg;
9914 cmd->maxtxpkt = 0;
9915 cmd->maxrxpkt = 0;
9916 return 0;
9919 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9921 struct tg3 *tp = netdev_priv(dev);
9922 u32 speed = ethtool_cmd_speed(cmd);
9924 if (tg3_flag(tp, USE_PHYLIB)) {
9925 struct phy_device *phydev;
9926 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
9927 return -EAGAIN;
9928 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9929 return phy_ethtool_sset(phydev, cmd);
9932 if (cmd->autoneg != AUTONEG_ENABLE &&
9933 cmd->autoneg != AUTONEG_DISABLE)
9934 return -EINVAL;
9936 if (cmd->autoneg == AUTONEG_DISABLE &&
9937 cmd->duplex != DUPLEX_FULL &&
9938 cmd->duplex != DUPLEX_HALF)
9939 return -EINVAL;
9941 if (cmd->autoneg == AUTONEG_ENABLE) {
9942 u32 mask = ADVERTISED_Autoneg |
9943 ADVERTISED_Pause |
9944 ADVERTISED_Asym_Pause;
9946 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
9947 mask |= ADVERTISED_1000baseT_Half |
9948 ADVERTISED_1000baseT_Full;
9950 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
9951 mask |= ADVERTISED_100baseT_Half |
9952 ADVERTISED_100baseT_Full |
9953 ADVERTISED_10baseT_Half |
9954 ADVERTISED_10baseT_Full |
9955 ADVERTISED_TP;
9956 else
9957 mask |= ADVERTISED_FIBRE;
9959 if (cmd->advertising & ~mask)
9960 return -EINVAL;
9962 mask &= (ADVERTISED_1000baseT_Half |
9963 ADVERTISED_1000baseT_Full |
9964 ADVERTISED_100baseT_Half |
9965 ADVERTISED_100baseT_Full |
9966 ADVERTISED_10baseT_Half |
9967 ADVERTISED_10baseT_Full);
9969 cmd->advertising &= mask;
9970 } else {
9971 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
9972 if (speed != SPEED_1000)
9973 return -EINVAL;
9975 if (cmd->duplex != DUPLEX_FULL)
9976 return -EINVAL;
9977 } else {
9978 if (speed != SPEED_100 &&
9979 speed != SPEED_10)
9980 return -EINVAL;
9984 tg3_full_lock(tp, 0);
9986 tp->link_config.autoneg = cmd->autoneg;
9987 if (cmd->autoneg == AUTONEG_ENABLE) {
9988 tp->link_config.advertising = (cmd->advertising |
9989 ADVERTISED_Autoneg);
9990 tp->link_config.speed = SPEED_INVALID;
9991 tp->link_config.duplex = DUPLEX_INVALID;
9992 } else {
9993 tp->link_config.advertising = 0;
9994 tp->link_config.speed = speed;
9995 tp->link_config.duplex = cmd->duplex;
9998 tp->link_config.orig_speed = tp->link_config.speed;
9999 tp->link_config.orig_duplex = tp->link_config.duplex;
10000 tp->link_config.orig_autoneg = tp->link_config.autoneg;
10002 if (netif_running(dev))
10003 tg3_setup_phy(tp, 1);
10005 tg3_full_unlock(tp);
10007 return 0;
10010 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10012 struct tg3 *tp = netdev_priv(dev);
10014 strcpy(info->driver, DRV_MODULE_NAME);
10015 strcpy(info->version, DRV_MODULE_VERSION);
10016 strcpy(info->fw_version, tp->fw_ver);
10017 strcpy(info->bus_info, pci_name(tp->pdev));
10020 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10022 struct tg3 *tp = netdev_priv(dev);
10024 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10025 wol->supported = WAKE_MAGIC;
10026 else
10027 wol->supported = 0;
10028 wol->wolopts = 0;
10029 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10030 wol->wolopts = WAKE_MAGIC;
10031 memset(&wol->sopass, 0, sizeof(wol->sopass));
10034 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10036 struct tg3 *tp = netdev_priv(dev);
10037 struct device *dp = &tp->pdev->dev;
10039 if (wol->wolopts & ~WAKE_MAGIC)
10040 return -EINVAL;
10041 if ((wol->wolopts & WAKE_MAGIC) &&
10042 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10043 return -EINVAL;
10045 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10047 spin_lock_bh(&tp->lock);
10048 if (device_may_wakeup(dp))
10049 tg3_flag_set(tp, WOL_ENABLE);
10050 else
10051 tg3_flag_clear(tp, WOL_ENABLE);
10052 spin_unlock_bh(&tp->lock);
10054 return 0;
10057 static u32 tg3_get_msglevel(struct net_device *dev)
10059 struct tg3 *tp = netdev_priv(dev);
10060 return tp->msg_enable;
10063 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10065 struct tg3 *tp = netdev_priv(dev);
10066 tp->msg_enable = value;
10069 static int tg3_nway_reset(struct net_device *dev)
10071 struct tg3 *tp = netdev_priv(dev);
10072 int r;
10074 if (!netif_running(dev))
10075 return -EAGAIN;
10077 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10078 return -EINVAL;
10080 if (tg3_flag(tp, USE_PHYLIB)) {
10081 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10082 return -EAGAIN;
10083 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10084 } else {
10085 u32 bmcr;
10087 spin_lock_bh(&tp->lock);
10088 r = -EINVAL;
10089 tg3_readphy(tp, MII_BMCR, &bmcr);
10090 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10091 ((bmcr & BMCR_ANENABLE) ||
10092 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10093 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10094 BMCR_ANENABLE);
10095 r = 0;
10097 spin_unlock_bh(&tp->lock);
10100 return r;
10103 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10105 struct tg3 *tp = netdev_priv(dev);
10107 ering->rx_max_pending = tp->rx_std_ring_mask;
10108 ering->rx_mini_max_pending = 0;
10109 if (tg3_flag(tp, JUMBO_RING_ENABLE))
10110 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10111 else
10112 ering->rx_jumbo_max_pending = 0;
10114 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10116 ering->rx_pending = tp->rx_pending;
10117 ering->rx_mini_pending = 0;
10118 if (tg3_flag(tp, JUMBO_RING_ENABLE))
10119 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10120 else
10121 ering->rx_jumbo_pending = 0;
10123 ering->tx_pending = tp->napi[0].tx_pending;
10126 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10128 struct tg3 *tp = netdev_priv(dev);
10129 int i, irq_sync = 0, err = 0;
10131 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10132 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10133 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10134 (ering->tx_pending <= MAX_SKB_FRAGS) ||
10135 (tg3_flag(tp, TSO_BUG) &&
10136 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10137 return -EINVAL;
10139 if (netif_running(dev)) {
10140 tg3_phy_stop(tp);
10141 tg3_netif_stop(tp);
10142 irq_sync = 1;
10145 tg3_full_lock(tp, irq_sync);
10147 tp->rx_pending = ering->rx_pending;
10149 if (tg3_flag(tp, MAX_RXPEND_64) &&
10150 tp->rx_pending > 63)
10151 tp->rx_pending = 63;
10152 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10154 for (i = 0; i < tp->irq_max; i++)
10155 tp->napi[i].tx_pending = ering->tx_pending;
10157 if (netif_running(dev)) {
10158 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10159 err = tg3_restart_hw(tp, 1);
10160 if (!err)
10161 tg3_netif_start(tp);
10164 tg3_full_unlock(tp);
10166 if (irq_sync && !err)
10167 tg3_phy_start(tp);
10169 return err;
10172 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10174 struct tg3 *tp = netdev_priv(dev);
10176 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10178 if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
10179 epause->rx_pause = 1;
10180 else
10181 epause->rx_pause = 0;
10183 if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
10184 epause->tx_pause = 1;
10185 else
10186 epause->tx_pause = 0;
10189 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10191 struct tg3 *tp = netdev_priv(dev);
10192 int err = 0;
10194 if (tg3_flag(tp, USE_PHYLIB)) {
10195 u32 newadv;
10196 struct phy_device *phydev;
10198 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10200 if (!(phydev->supported & SUPPORTED_Pause) ||
10201 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10202 (epause->rx_pause != epause->tx_pause)))
10203 return -EINVAL;
10205 tp->link_config.flowctrl = 0;
10206 if (epause->rx_pause) {
10207 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10209 if (epause->tx_pause) {
10210 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10211 newadv = ADVERTISED_Pause;
10212 } else
10213 newadv = ADVERTISED_Pause |
10214 ADVERTISED_Asym_Pause;
10215 } else if (epause->tx_pause) {
10216 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10217 newadv = ADVERTISED_Asym_Pause;
10218 } else
10219 newadv = 0;
10221 if (epause->autoneg)
10222 tg3_flag_set(tp, PAUSE_AUTONEG);
10223 else
10224 tg3_flag_clear(tp, PAUSE_AUTONEG);
10226 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10227 u32 oldadv = phydev->advertising &
10228 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10229 if (oldadv != newadv) {
10230 phydev->advertising &=
10231 ~(ADVERTISED_Pause |
10232 ADVERTISED_Asym_Pause);
10233 phydev->advertising |= newadv;
10234 if (phydev->autoneg) {
10236 * Always renegotiate the link to
10237 * inform our link partner of our
10238 * flow control settings, even if the
10239 * flow control is forced. Let
10240 * tg3_adjust_link() do the final
10241 * flow control setup.
10243 return phy_start_aneg(phydev);
10247 if (!epause->autoneg)
10248 tg3_setup_flow_control(tp, 0, 0);
10249 } else {
10250 tp->link_config.orig_advertising &=
10251 ~(ADVERTISED_Pause |
10252 ADVERTISED_Asym_Pause);
10253 tp->link_config.orig_advertising |= newadv;
10255 } else {
10256 int irq_sync = 0;
10258 if (netif_running(dev)) {
10259 tg3_netif_stop(tp);
10260 irq_sync = 1;
10263 tg3_full_lock(tp, irq_sync);
10265 if (epause->autoneg)
10266 tg3_flag_set(tp, PAUSE_AUTONEG);
10267 else
10268 tg3_flag_clear(tp, PAUSE_AUTONEG);
10269 if (epause->rx_pause)
10270 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10271 else
10272 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10273 if (epause->tx_pause)
10274 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10275 else
10276 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10278 if (netif_running(dev)) {
10279 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10280 err = tg3_restart_hw(tp, 1);
10281 if (!err)
10282 tg3_netif_start(tp);
10285 tg3_full_unlock(tp);
10288 return err;
10291 static int tg3_get_sset_count(struct net_device *dev, int sset)
10293 switch (sset) {
10294 case ETH_SS_TEST:
10295 return TG3_NUM_TEST;
10296 case ETH_SS_STATS:
10297 return TG3_NUM_STATS;
10298 default:
10299 return -EOPNOTSUPP;
10303 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10305 switch (stringset) {
10306 case ETH_SS_STATS:
10307 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
10308 break;
10309 case ETH_SS_TEST:
10310 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
10311 break;
10312 default:
10313 WARN_ON(1); /* we need a WARN() */
10314 break;
10318 static int tg3_set_phys_id(struct net_device *dev,
10319 enum ethtool_phys_id_state state)
10321 struct tg3 *tp = netdev_priv(dev);
10323 if (!netif_running(tp->dev))
10324 return -EAGAIN;
10326 switch (state) {
10327 case ETHTOOL_ID_ACTIVE:
10328 return 1; /* cycle on/off once per second */
10330 case ETHTOOL_ID_ON:
10331 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10332 LED_CTRL_1000MBPS_ON |
10333 LED_CTRL_100MBPS_ON |
10334 LED_CTRL_10MBPS_ON |
10335 LED_CTRL_TRAFFIC_OVERRIDE |
10336 LED_CTRL_TRAFFIC_BLINK |
10337 LED_CTRL_TRAFFIC_LED);
10338 break;
10340 case ETHTOOL_ID_OFF:
10341 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10342 LED_CTRL_TRAFFIC_OVERRIDE);
10343 break;
10345 case ETHTOOL_ID_INACTIVE:
10346 tw32(MAC_LED_CTRL, tp->led_ctrl);
10347 break;
10350 return 0;
10353 static void tg3_get_ethtool_stats(struct net_device *dev,
10354 struct ethtool_stats *estats, u64 *tmp_stats)
10356 struct tg3 *tp = netdev_priv(dev);
10357 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
10360 static __be32 * tg3_vpd_readblock(struct tg3 *tp)
10362 int i;
10363 __be32 *buf;
10364 u32 offset = 0, len = 0;
10365 u32 magic, val;
10367 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
10368 return NULL;
10370 if (magic == TG3_EEPROM_MAGIC) {
10371 for (offset = TG3_NVM_DIR_START;
10372 offset < TG3_NVM_DIR_END;
10373 offset += TG3_NVM_DIRENT_SIZE) {
10374 if (tg3_nvram_read(tp, offset, &val))
10375 return NULL;
10377 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
10378 TG3_NVM_DIRTYPE_EXTVPD)
10379 break;
10382 if (offset != TG3_NVM_DIR_END) {
10383 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
10384 if (tg3_nvram_read(tp, offset + 4, &offset))
10385 return NULL;
10387 offset = tg3_nvram_logical_addr(tp, offset);
10391 if (!offset || !len) {
10392 offset = TG3_NVM_VPD_OFF;
10393 len = TG3_NVM_VPD_LEN;
10396 buf = kmalloc(len, GFP_KERNEL);
10397 if (buf == NULL)
10398 return NULL;
10400 if (magic == TG3_EEPROM_MAGIC) {
10401 for (i = 0; i < len; i += 4) {
10402 /* The data is in little-endian format in NVRAM.
10403 * Use the big-endian read routines to preserve
10404 * the byte order as it exists in NVRAM.
10406 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
10407 goto error;
10409 } else {
10410 u8 *ptr;
10411 ssize_t cnt;
10412 unsigned int pos = 0;
10414 ptr = (u8 *)&buf[0];
10415 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
10416 cnt = pci_read_vpd(tp->pdev, pos,
10417 len - pos, ptr);
10418 if (cnt == -ETIMEDOUT || cnt == -EINTR)
10419 cnt = 0;
10420 else if (cnt < 0)
10421 goto error;
10423 if (pos != len)
10424 goto error;
10427 return buf;
10429 error:
10430 kfree(buf);
10431 return NULL;
10434 #define NVRAM_TEST_SIZE 0x100
10435 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
10436 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
10437 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
10438 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10439 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10441 static int tg3_test_nvram(struct tg3 *tp)
10443 u32 csum, magic;
10444 __be32 *buf;
10445 int i, j, k, err = 0, size;
10447 if (tg3_flag(tp, NO_NVRAM))
10448 return 0;
10450 if (tg3_nvram_read(tp, 0, &magic) != 0)
10451 return -EIO;
10453 if (magic == TG3_EEPROM_MAGIC)
10454 size = NVRAM_TEST_SIZE;
10455 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10456 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10457 TG3_EEPROM_SB_FORMAT_1) {
10458 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10459 case TG3_EEPROM_SB_REVISION_0:
10460 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10461 break;
10462 case TG3_EEPROM_SB_REVISION_2:
10463 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10464 break;
10465 case TG3_EEPROM_SB_REVISION_3:
10466 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10467 break;
10468 default:
10469 return 0;
10471 } else
10472 return 0;
10473 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10474 size = NVRAM_SELFBOOT_HW_SIZE;
10475 else
10476 return -EIO;
10478 buf = kmalloc(size, GFP_KERNEL);
10479 if (buf == NULL)
10480 return -ENOMEM;
10482 err = -EIO;
10483 for (i = 0, j = 0; i < size; i += 4, j++) {
10484 err = tg3_nvram_read_be32(tp, i, &buf[j]);
10485 if (err)
10486 break;
10488 if (i < size)
10489 goto out;
10491 /* Selfboot format */
10492 magic = be32_to_cpu(buf[0]);
10493 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
10494 TG3_EEPROM_MAGIC_FW) {
10495 u8 *buf8 = (u8 *) buf, csum8 = 0;
10497 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
10498 TG3_EEPROM_SB_REVISION_2) {
10499 /* For rev 2, the csum doesn't include the MBA. */
10500 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
10501 csum8 += buf8[i];
10502 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
10503 csum8 += buf8[i];
10504 } else {
10505 for (i = 0; i < size; i++)
10506 csum8 += buf8[i];
10509 if (csum8 == 0) {
10510 err = 0;
10511 goto out;
10514 err = -EIO;
10515 goto out;
10518 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
10519 TG3_EEPROM_MAGIC_HW) {
10520 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
10521 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
10522 u8 *buf8 = (u8 *) buf;
10524 /* Separate the parity bits and the data bytes. */
10525 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
10526 if ((i == 0) || (i == 8)) {
10527 int l;
10528 u8 msk;
10530 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
10531 parity[k++] = buf8[i] & msk;
10532 i++;
10533 } else if (i == 16) {
10534 int l;
10535 u8 msk;
10537 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
10538 parity[k++] = buf8[i] & msk;
10539 i++;
10541 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
10542 parity[k++] = buf8[i] & msk;
10543 i++;
10545 data[j++] = buf8[i];
10548 err = -EIO;
10549 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
10550 u8 hw8 = hweight8(data[i]);
10552 if ((hw8 & 0x1) && parity[i])
10553 goto out;
10554 else if (!(hw8 & 0x1) && !parity[i])
10555 goto out;
10557 err = 0;
10558 goto out;
10561 err = -EIO;
10563 /* Bootstrap checksum at offset 0x10 */
10564 csum = calc_crc((unsigned char *) buf, 0x10);
10565 if (csum != le32_to_cpu(buf[0x10/4]))
10566 goto out;
10568 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10569 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
10570 if (csum != le32_to_cpu(buf[0xfc/4]))
10571 goto out;
10573 kfree(buf);
10575 buf = tg3_vpd_readblock(tp);
10576 if (!buf)
10577 return -ENOMEM;
10579 i = pci_vpd_find_tag((u8 *)buf, 0, TG3_NVM_VPD_LEN,
10580 PCI_VPD_LRDT_RO_DATA);
10581 if (i > 0) {
10582 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
10583 if (j < 0)
10584 goto out;
10586 if (i + PCI_VPD_LRDT_TAG_SIZE + j > TG3_NVM_VPD_LEN)
10587 goto out;
10589 i += PCI_VPD_LRDT_TAG_SIZE;
10590 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
10591 PCI_VPD_RO_KEYWORD_CHKSUM);
10592 if (j > 0) {
10593 u8 csum8 = 0;
10595 j += PCI_VPD_INFO_FLD_HDR_SIZE;
10597 for (i = 0; i <= j; i++)
10598 csum8 += ((u8 *)buf)[i];
10600 if (csum8)
10601 goto out;
10605 err = 0;
10607 out:
10608 kfree(buf);
10609 return err;
10612 #define TG3_SERDES_TIMEOUT_SEC 2
10613 #define TG3_COPPER_TIMEOUT_SEC 6
10615 static int tg3_test_link(struct tg3 *tp)
10617 int i, max;
10619 if (!netif_running(tp->dev))
10620 return -ENODEV;
10622 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
10623 max = TG3_SERDES_TIMEOUT_SEC;
10624 else
10625 max = TG3_COPPER_TIMEOUT_SEC;
10627 for (i = 0; i < max; i++) {
10628 if (netif_carrier_ok(tp->dev))
10629 return 0;
10631 if (msleep_interruptible(1000))
10632 break;
10635 return -EIO;
10638 /* Only test the commonly used registers */
10639 static int tg3_test_registers(struct tg3 *tp)
10641 int i, is_5705, is_5750;
10642 u32 offset, read_mask, write_mask, val, save_val, read_val;
10643 static struct {
10644 u16 offset;
10645 u16 flags;
10646 #define TG3_FL_5705 0x1
10647 #define TG3_FL_NOT_5705 0x2
10648 #define TG3_FL_NOT_5788 0x4
10649 #define TG3_FL_NOT_5750 0x8
10650 u32 read_mask;
10651 u32 write_mask;
10652 } reg_tbl[] = {
10653 /* MAC Control Registers */
10654 { MAC_MODE, TG3_FL_NOT_5705,
10655 0x00000000, 0x00ef6f8c },
10656 { MAC_MODE, TG3_FL_5705,
10657 0x00000000, 0x01ef6b8c },
10658 { MAC_STATUS, TG3_FL_NOT_5705,
10659 0x03800107, 0x00000000 },
10660 { MAC_STATUS, TG3_FL_5705,
10661 0x03800100, 0x00000000 },
10662 { MAC_ADDR_0_HIGH, 0x0000,
10663 0x00000000, 0x0000ffff },
10664 { MAC_ADDR_0_LOW, 0x0000,
10665 0x00000000, 0xffffffff },
10666 { MAC_RX_MTU_SIZE, 0x0000,
10667 0x00000000, 0x0000ffff },
10668 { MAC_TX_MODE, 0x0000,
10669 0x00000000, 0x00000070 },
10670 { MAC_TX_LENGTHS, 0x0000,
10671 0x00000000, 0x00003fff },
10672 { MAC_RX_MODE, TG3_FL_NOT_5705,
10673 0x00000000, 0x000007fc },
10674 { MAC_RX_MODE, TG3_FL_5705,
10675 0x00000000, 0x000007dc },
10676 { MAC_HASH_REG_0, 0x0000,
10677 0x00000000, 0xffffffff },
10678 { MAC_HASH_REG_1, 0x0000,
10679 0x00000000, 0xffffffff },
10680 { MAC_HASH_REG_2, 0x0000,
10681 0x00000000, 0xffffffff },
10682 { MAC_HASH_REG_3, 0x0000,
10683 0x00000000, 0xffffffff },
10685 /* Receive Data and Receive BD Initiator Control Registers. */
10686 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
10687 0x00000000, 0xffffffff },
10688 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
10689 0x00000000, 0xffffffff },
10690 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
10691 0x00000000, 0x00000003 },
10692 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
10693 0x00000000, 0xffffffff },
10694 { RCVDBDI_STD_BD+0, 0x0000,
10695 0x00000000, 0xffffffff },
10696 { RCVDBDI_STD_BD+4, 0x0000,
10697 0x00000000, 0xffffffff },
10698 { RCVDBDI_STD_BD+8, 0x0000,
10699 0x00000000, 0xffff0002 },
10700 { RCVDBDI_STD_BD+0xc, 0x0000,
10701 0x00000000, 0xffffffff },
10703 /* Receive BD Initiator Control Registers. */
10704 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
10705 0x00000000, 0xffffffff },
10706 { RCVBDI_STD_THRESH, TG3_FL_5705,
10707 0x00000000, 0x000003ff },
10708 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
10709 0x00000000, 0xffffffff },
10711 /* Host Coalescing Control Registers. */
10712 { HOSTCC_MODE, TG3_FL_NOT_5705,
10713 0x00000000, 0x00000004 },
10714 { HOSTCC_MODE, TG3_FL_5705,
10715 0x00000000, 0x000000f6 },
10716 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
10717 0x00000000, 0xffffffff },
10718 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
10719 0x00000000, 0x000003ff },
10720 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
10721 0x00000000, 0xffffffff },
10722 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
10723 0x00000000, 0x000003ff },
10724 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
10725 0x00000000, 0xffffffff },
10726 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10727 0x00000000, 0x000000ff },
10728 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
10729 0x00000000, 0xffffffff },
10730 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10731 0x00000000, 0x000000ff },
10732 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
10733 0x00000000, 0xffffffff },
10734 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
10735 0x00000000, 0xffffffff },
10736 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10737 0x00000000, 0xffffffff },
10738 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10739 0x00000000, 0x000000ff },
10740 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10741 0x00000000, 0xffffffff },
10742 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10743 0x00000000, 0x000000ff },
10744 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
10745 0x00000000, 0xffffffff },
10746 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
10747 0x00000000, 0xffffffff },
10748 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
10749 0x00000000, 0xffffffff },
10750 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
10751 0x00000000, 0xffffffff },
10752 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
10753 0x00000000, 0xffffffff },
10754 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
10755 0xffffffff, 0x00000000 },
10756 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
10757 0xffffffff, 0x00000000 },
10759 /* Buffer Manager Control Registers. */
10760 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
10761 0x00000000, 0x007fff80 },
10762 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
10763 0x00000000, 0x007fffff },
10764 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
10765 0x00000000, 0x0000003f },
10766 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
10767 0x00000000, 0x000001ff },
10768 { BUFMGR_MB_HIGH_WATER, 0x0000,
10769 0x00000000, 0x000001ff },
10770 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
10771 0xffffffff, 0x00000000 },
10772 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
10773 0xffffffff, 0x00000000 },
10775 /* Mailbox Registers */
10776 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
10777 0x00000000, 0x000001ff },
10778 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
10779 0x00000000, 0x000001ff },
10780 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
10781 0x00000000, 0x000007ff },
10782 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
10783 0x00000000, 0x000001ff },
10785 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
10788 is_5705 = is_5750 = 0;
10789 if (tg3_flag(tp, 5705_PLUS)) {
10790 is_5705 = 1;
10791 if (tg3_flag(tp, 5750_PLUS))
10792 is_5750 = 1;
10795 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
10796 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
10797 continue;
10799 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
10800 continue;
10802 if (tg3_flag(tp, IS_5788) &&
10803 (reg_tbl[i].flags & TG3_FL_NOT_5788))
10804 continue;
10806 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
10807 continue;
10809 offset = (u32) reg_tbl[i].offset;
10810 read_mask = reg_tbl[i].read_mask;
10811 write_mask = reg_tbl[i].write_mask;
10813 /* Save the original register content */
10814 save_val = tr32(offset);
10816 /* Determine the read-only value. */
10817 read_val = save_val & read_mask;
10819 /* Write zero to the register, then make sure the read-only bits
10820 * are not changed and the read/write bits are all zeros.
10822 tw32(offset, 0);
10824 val = tr32(offset);
10826 /* Test the read-only and read/write bits. */
10827 if (((val & read_mask) != read_val) || (val & write_mask))
10828 goto out;
10830 /* Write ones to all the bits defined by RdMask and WrMask, then
10831 * make sure the read-only bits are not changed and the
10832 * read/write bits are all ones.
10834 tw32(offset, read_mask | write_mask);
10836 val = tr32(offset);
10838 /* Test the read-only bits. */
10839 if ((val & read_mask) != read_val)
10840 goto out;
10842 /* Test the read/write bits. */
10843 if ((val & write_mask) != write_mask)
10844 goto out;
10846 tw32(offset, save_val);
10849 return 0;
10851 out:
10852 if (netif_msg_hw(tp))
10853 netdev_err(tp->dev,
10854 "Register test failed at offset %x\n", offset);
10855 tw32(offset, save_val);
10856 return -EIO;
10859 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
10861 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
10862 int i;
10863 u32 j;
10865 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
10866 for (j = 0; j < len; j += 4) {
10867 u32 val;
10869 tg3_write_mem(tp, offset + j, test_pattern[i]);
10870 tg3_read_mem(tp, offset + j, &val);
10871 if (val != test_pattern[i])
10872 return -EIO;
10875 return 0;
10878 static int tg3_test_memory(struct tg3 *tp)
10880 static struct mem_entry {
10881 u32 offset;
10882 u32 len;
10883 } mem_tbl_570x[] = {
10884 { 0x00000000, 0x00b50},
10885 { 0x00002000, 0x1c000},
10886 { 0xffffffff, 0x00000}
10887 }, mem_tbl_5705[] = {
10888 { 0x00000100, 0x0000c},
10889 { 0x00000200, 0x00008},
10890 { 0x00004000, 0x00800},
10891 { 0x00006000, 0x01000},
10892 { 0x00008000, 0x02000},
10893 { 0x00010000, 0x0e000},
10894 { 0xffffffff, 0x00000}
10895 }, mem_tbl_5755[] = {
10896 { 0x00000200, 0x00008},
10897 { 0x00004000, 0x00800},
10898 { 0x00006000, 0x00800},
10899 { 0x00008000, 0x02000},
10900 { 0x00010000, 0x0c000},
10901 { 0xffffffff, 0x00000}
10902 }, mem_tbl_5906[] = {
10903 { 0x00000200, 0x00008},
10904 { 0x00004000, 0x00400},
10905 { 0x00006000, 0x00400},
10906 { 0x00008000, 0x01000},
10907 { 0x00010000, 0x01000},
10908 { 0xffffffff, 0x00000}
10909 }, mem_tbl_5717[] = {
10910 { 0x00000200, 0x00008},
10911 { 0x00010000, 0x0a000},
10912 { 0x00020000, 0x13c00},
10913 { 0xffffffff, 0x00000}
10914 }, mem_tbl_57765[] = {
10915 { 0x00000200, 0x00008},
10916 { 0x00004000, 0x00800},
10917 { 0x00006000, 0x09800},
10918 { 0x00010000, 0x0a000},
10919 { 0xffffffff, 0x00000}
10921 struct mem_entry *mem_tbl;
10922 int err = 0;
10923 int i;
10925 if (tg3_flag(tp, 5717_PLUS))
10926 mem_tbl = mem_tbl_5717;
10927 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
10928 mem_tbl = mem_tbl_57765;
10929 else if (tg3_flag(tp, 5755_PLUS))
10930 mem_tbl = mem_tbl_5755;
10931 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10932 mem_tbl = mem_tbl_5906;
10933 else if (tg3_flag(tp, 5705_PLUS))
10934 mem_tbl = mem_tbl_5705;
10935 else
10936 mem_tbl = mem_tbl_570x;
10938 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
10939 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
10940 if (err)
10941 break;
10944 return err;
10947 #define TG3_MAC_LOOPBACK 0
10948 #define TG3_PHY_LOOPBACK 1
10949 #define TG3_TSO_LOOPBACK 2
10951 #define TG3_TSO_MSS 500
10953 #define TG3_TSO_IP_HDR_LEN 20
10954 #define TG3_TSO_TCP_HDR_LEN 20
10955 #define TG3_TSO_TCP_OPT_LEN 12
10957 static const u8 tg3_tso_header[] = {
10958 0x08, 0x00,
10959 0x45, 0x00, 0x00, 0x00,
10960 0x00, 0x00, 0x40, 0x00,
10961 0x40, 0x06, 0x00, 0x00,
10962 0x0a, 0x00, 0x00, 0x01,
10963 0x0a, 0x00, 0x00, 0x02,
10964 0x0d, 0x00, 0xe0, 0x00,
10965 0x00, 0x00, 0x01, 0x00,
10966 0x00, 0x00, 0x02, 0x00,
10967 0x80, 0x10, 0x10, 0x00,
10968 0x14, 0x09, 0x00, 0x00,
10969 0x01, 0x01, 0x08, 0x0a,
10970 0x11, 0x11, 0x11, 0x11,
10971 0x11, 0x11, 0x11, 0x11,
10974 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, int loopback_mode)
10976 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
10977 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
10978 struct sk_buff *skb, *rx_skb;
10979 u8 *tx_data;
10980 dma_addr_t map;
10981 int num_pkts, tx_len, rx_len, i, err;
10982 struct tg3_rx_buffer_desc *desc;
10983 struct tg3_napi *tnapi, *rnapi;
10984 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
10986 tnapi = &tp->napi[0];
10987 rnapi = &tp->napi[0];
10988 if (tp->irq_cnt > 1) {
10989 if (tg3_flag(tp, ENABLE_RSS))
10990 rnapi = &tp->napi[1];
10991 if (tg3_flag(tp, ENABLE_TSS))
10992 tnapi = &tp->napi[1];
10994 coal_now = tnapi->coal_now | rnapi->coal_now;
10996 if (loopback_mode == TG3_MAC_LOOPBACK) {
10997 /* HW errata - mac loopback fails in some cases on 5780.
10998 * Normal traffic and PHY loopback are not affected by
10999 * errata. Also, the MAC loopback test is deprecated for
11000 * all newer ASIC revisions.
11002 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
11003 tg3_flag(tp, CPMU_PRESENT))
11004 return 0;
11006 mac_mode = tp->mac_mode &
11007 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11008 mac_mode |= MAC_MODE_PORT_INT_LPBACK;
11009 if (!tg3_flag(tp, 5705_PLUS))
11010 mac_mode |= MAC_MODE_LINK_POLARITY;
11011 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
11012 mac_mode |= MAC_MODE_PORT_MODE_MII;
11013 else
11014 mac_mode |= MAC_MODE_PORT_MODE_GMII;
11015 tw32(MAC_MODE, mac_mode);
11016 } else {
11017 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
11018 tg3_phy_fet_toggle_apd(tp, false);
11019 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
11020 } else
11021 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
11023 tg3_phy_toggle_automdix(tp, 0);
11025 tg3_writephy(tp, MII_BMCR, val);
11026 udelay(40);
11028 mac_mode = tp->mac_mode &
11029 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11030 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
11031 tg3_writephy(tp, MII_TG3_FET_PTEST,
11032 MII_TG3_FET_PTEST_FRC_TX_LINK |
11033 MII_TG3_FET_PTEST_FRC_TX_LOCK);
11034 /* The write needs to be flushed for the AC131 */
11035 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11036 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
11037 mac_mode |= MAC_MODE_PORT_MODE_MII;
11038 } else
11039 mac_mode |= MAC_MODE_PORT_MODE_GMII;
11041 /* reset to prevent losing 1st rx packet intermittently */
11042 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
11043 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
11044 udelay(10);
11045 tw32_f(MAC_RX_MODE, tp->rx_mode);
11047 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
11048 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
11049 if (masked_phy_id == TG3_PHY_ID_BCM5401)
11050 mac_mode &= ~MAC_MODE_LINK_POLARITY;
11051 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
11052 mac_mode |= MAC_MODE_LINK_POLARITY;
11053 tg3_writephy(tp, MII_TG3_EXT_CTRL,
11054 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
11056 tw32(MAC_MODE, mac_mode);
11058 /* Wait for link */
11059 for (i = 0; i < 100; i++) {
11060 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11061 break;
11062 mdelay(1);
11066 err = -EIO;
11068 tx_len = pktsz;
11069 skb = netdev_alloc_skb(tp->dev, tx_len);
11070 if (!skb)
11071 return -ENOMEM;
11073 tx_data = skb_put(skb, tx_len);
11074 memcpy(tx_data, tp->dev->dev_addr, 6);
11075 memset(tx_data + 6, 0x0, 8);
11077 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11079 if (loopback_mode == TG3_TSO_LOOPBACK) {
11080 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11082 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11083 TG3_TSO_TCP_OPT_LEN;
11085 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11086 sizeof(tg3_tso_header));
11087 mss = TG3_TSO_MSS;
11089 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11090 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11092 /* Set the total length field in the IP header */
11093 iph->tot_len = htons((u16)(mss + hdr_len));
11095 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11096 TXD_FLAG_CPU_POST_DMA);
11098 if (tg3_flag(tp, HW_TSO_1) ||
11099 tg3_flag(tp, HW_TSO_2) ||
11100 tg3_flag(tp, HW_TSO_3)) {
11101 struct tcphdr *th;
11102 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11103 th = (struct tcphdr *)&tx_data[val];
11104 th->check = 0;
11105 } else
11106 base_flags |= TXD_FLAG_TCPUDP_CSUM;
11108 if (tg3_flag(tp, HW_TSO_3)) {
11109 mss |= (hdr_len & 0xc) << 12;
11110 if (hdr_len & 0x10)
11111 base_flags |= 0x00000010;
11112 base_flags |= (hdr_len & 0x3e0) << 5;
11113 } else if (tg3_flag(tp, HW_TSO_2))
11114 mss |= hdr_len << 9;
11115 else if (tg3_flag(tp, HW_TSO_1) ||
11116 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11117 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11118 } else {
11119 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11122 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11123 } else {
11124 num_pkts = 1;
11125 data_off = ETH_HLEN;
11128 for (i = data_off; i < tx_len; i++)
11129 tx_data[i] = (u8) (i & 0xff);
11131 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11132 if (pci_dma_mapping_error(tp->pdev, map)) {
11133 dev_kfree_skb(skb);
11134 return -EIO;
11137 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11138 rnapi->coal_now);
11140 udelay(10);
11142 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11144 tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len,
11145 base_flags, (mss << 1) | 1);
11147 tnapi->tx_prod++;
11149 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11150 tr32_mailbox(tnapi->prodmbox);
11152 udelay(10);
11154 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
11155 for (i = 0; i < 35; i++) {
11156 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11157 coal_now);
11159 udelay(10);
11161 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11162 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11163 if ((tx_idx == tnapi->tx_prod) &&
11164 (rx_idx == (rx_start_idx + num_pkts)))
11165 break;
11168 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
11169 dev_kfree_skb(skb);
11171 if (tx_idx != tnapi->tx_prod)
11172 goto out;
11174 if (rx_idx != rx_start_idx + num_pkts)
11175 goto out;
11177 val = data_off;
11178 while (rx_idx != rx_start_idx) {
11179 desc = &rnapi->rx_rcb[rx_start_idx++];
11180 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11181 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11183 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11184 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11185 goto out;
11187 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11188 - ETH_FCS_LEN;
11190 if (loopback_mode != TG3_TSO_LOOPBACK) {
11191 if (rx_len != tx_len)
11192 goto out;
11194 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11195 if (opaque_key != RXD_OPAQUE_RING_STD)
11196 goto out;
11197 } else {
11198 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11199 goto out;
11201 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11202 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11203 >> RXD_TCPCSUM_SHIFT != 0xffff) {
11204 goto out;
11207 if (opaque_key == RXD_OPAQUE_RING_STD) {
11208 rx_skb = tpr->rx_std_buffers[desc_idx].skb;
11209 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11210 mapping);
11211 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11212 rx_skb = tpr->rx_jmb_buffers[desc_idx].skb;
11213 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11214 mapping);
11215 } else
11216 goto out;
11218 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11219 PCI_DMA_FROMDEVICE);
11221 for (i = data_off; i < rx_len; i++, val++) {
11222 if (*(rx_skb->data + i) != (u8) (val & 0xff))
11223 goto out;
11227 err = 0;
11229 /* tg3_free_rings will unmap and free the rx_skb */
11230 out:
11231 return err;
11234 #define TG3_STD_LOOPBACK_FAILED 1
11235 #define TG3_JMB_LOOPBACK_FAILED 2
11236 #define TG3_TSO_LOOPBACK_FAILED 4
11238 #define TG3_MAC_LOOPBACK_SHIFT 0
11239 #define TG3_PHY_LOOPBACK_SHIFT 4
11240 #define TG3_LOOPBACK_FAILED 0x00000077
11242 static int tg3_test_loopback(struct tg3 *tp)
11244 int err = 0;
11245 u32 eee_cap, cpmuctrl = 0;
11247 if (!netif_running(tp->dev))
11248 return TG3_LOOPBACK_FAILED;
11250 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11251 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11253 err = tg3_reset_hw(tp, 1);
11254 if (err) {
11255 err = TG3_LOOPBACK_FAILED;
11256 goto done;
11259 if (tg3_flag(tp, ENABLE_RSS)) {
11260 int i;
11262 /* Reroute all rx packets to the 1st queue */
11263 for (i = MAC_RSS_INDIR_TBL_0;
11264 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11265 tw32(i, 0x0);
11268 /* Turn off gphy autopowerdown. */
11269 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11270 tg3_phy_toggle_apd(tp, false);
11272 if (tg3_flag(tp, CPMU_PRESENT)) {
11273 int i;
11274 u32 status;
11276 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
11278 /* Wait for up to 40 microseconds to acquire lock. */
11279 for (i = 0; i < 4; i++) {
11280 status = tr32(TG3_CPMU_MUTEX_GNT);
11281 if (status == CPMU_MUTEX_GNT_DRIVER)
11282 break;
11283 udelay(10);
11286 if (status != CPMU_MUTEX_GNT_DRIVER) {
11287 err = TG3_LOOPBACK_FAILED;
11288 goto done;
11291 /* Turn off link-based power management. */
11292 cpmuctrl = tr32(TG3_CPMU_CTRL);
11293 tw32(TG3_CPMU_CTRL,
11294 cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
11295 CPMU_CTRL_LINK_AWARE_MODE));
11298 if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_MAC_LOOPBACK))
11299 err |= TG3_STD_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
11301 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11302 tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_MAC_LOOPBACK))
11303 err |= TG3_JMB_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
11305 if (tg3_flag(tp, CPMU_PRESENT)) {
11306 tw32(TG3_CPMU_CTRL, cpmuctrl);
11308 /* Release the mutex */
11309 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
11312 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11313 !tg3_flag(tp, USE_PHYLIB)) {
11314 if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_PHY_LOOPBACK))
11315 err |= TG3_STD_LOOPBACK_FAILED <<
11316 TG3_PHY_LOOPBACK_SHIFT;
11317 if (tg3_flag(tp, TSO_CAPABLE) &&
11318 tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_TSO_LOOPBACK))
11319 err |= TG3_TSO_LOOPBACK_FAILED <<
11320 TG3_PHY_LOOPBACK_SHIFT;
11321 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11322 tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_PHY_LOOPBACK))
11323 err |= TG3_JMB_LOOPBACK_FAILED <<
11324 TG3_PHY_LOOPBACK_SHIFT;
11327 /* Re-enable gphy autopowerdown. */
11328 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11329 tg3_phy_toggle_apd(tp, true);
11331 done:
11332 tp->phy_flags |= eee_cap;
11334 return err;
11337 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11338 u64 *data)
11340 struct tg3 *tp = netdev_priv(dev);
11342 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11343 tg3_power_up(tp);
11345 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11347 if (tg3_test_nvram(tp) != 0) {
11348 etest->flags |= ETH_TEST_FL_FAILED;
11349 data[0] = 1;
11351 if (tg3_test_link(tp) != 0) {
11352 etest->flags |= ETH_TEST_FL_FAILED;
11353 data[1] = 1;
11355 if (etest->flags & ETH_TEST_FL_OFFLINE) {
11356 int err, err2 = 0, irq_sync = 0;
11358 if (netif_running(dev)) {
11359 tg3_phy_stop(tp);
11360 tg3_netif_stop(tp);
11361 irq_sync = 1;
11364 tg3_full_lock(tp, irq_sync);
11366 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
11367 err = tg3_nvram_lock(tp);
11368 tg3_halt_cpu(tp, RX_CPU_BASE);
11369 if (!tg3_flag(tp, 5705_PLUS))
11370 tg3_halt_cpu(tp, TX_CPU_BASE);
11371 if (!err)
11372 tg3_nvram_unlock(tp);
11374 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
11375 tg3_phy_reset(tp);
11377 if (tg3_test_registers(tp) != 0) {
11378 etest->flags |= ETH_TEST_FL_FAILED;
11379 data[2] = 1;
11381 if (tg3_test_memory(tp) != 0) {
11382 etest->flags |= ETH_TEST_FL_FAILED;
11383 data[3] = 1;
11385 if ((data[4] = tg3_test_loopback(tp)) != 0)
11386 etest->flags |= ETH_TEST_FL_FAILED;
11388 tg3_full_unlock(tp);
11390 if (tg3_test_interrupt(tp) != 0) {
11391 etest->flags |= ETH_TEST_FL_FAILED;
11392 data[5] = 1;
11395 tg3_full_lock(tp, 0);
11397 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11398 if (netif_running(dev)) {
11399 tg3_flag_set(tp, INIT_COMPLETE);
11400 err2 = tg3_restart_hw(tp, 1);
11401 if (!err2)
11402 tg3_netif_start(tp);
11405 tg3_full_unlock(tp);
11407 if (irq_sync && !err2)
11408 tg3_phy_start(tp);
11410 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11411 tg3_power_down(tp);
11415 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11417 struct mii_ioctl_data *data = if_mii(ifr);
11418 struct tg3 *tp = netdev_priv(dev);
11419 int err;
11421 if (tg3_flag(tp, USE_PHYLIB)) {
11422 struct phy_device *phydev;
11423 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11424 return -EAGAIN;
11425 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11426 return phy_mii_ioctl(phydev, ifr, cmd);
11429 switch (cmd) {
11430 case SIOCGMIIPHY:
11431 data->phy_id = tp->phy_addr;
11433 /* fallthru */
11434 case SIOCGMIIREG: {
11435 u32 mii_regval;
11437 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11438 break; /* We have no PHY */
11440 if (!netif_running(dev))
11441 return -EAGAIN;
11443 spin_lock_bh(&tp->lock);
11444 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
11445 spin_unlock_bh(&tp->lock);
11447 data->val_out = mii_regval;
11449 return err;
11452 case SIOCSMIIREG:
11453 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11454 break; /* We have no PHY */
11456 if (!netif_running(dev))
11457 return -EAGAIN;
11459 spin_lock_bh(&tp->lock);
11460 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
11461 spin_unlock_bh(&tp->lock);
11463 return err;
11465 default:
11466 /* do nothing */
11467 break;
11469 return -EOPNOTSUPP;
11472 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11474 struct tg3 *tp = netdev_priv(dev);
11476 memcpy(ec, &tp->coal, sizeof(*ec));
11477 return 0;
11480 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11482 struct tg3 *tp = netdev_priv(dev);
11483 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11484 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11486 if (!tg3_flag(tp, 5705_PLUS)) {
11487 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11488 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11489 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
11490 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
11493 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11494 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11495 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11496 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11497 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11498 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11499 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11500 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11501 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11502 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11503 return -EINVAL;
11505 /* No rx interrupts will be generated if both are zero */
11506 if ((ec->rx_coalesce_usecs == 0) &&
11507 (ec->rx_max_coalesced_frames == 0))
11508 return -EINVAL;
11510 /* No tx interrupts will be generated if both are zero */
11511 if ((ec->tx_coalesce_usecs == 0) &&
11512 (ec->tx_max_coalesced_frames == 0))
11513 return -EINVAL;
11515 /* Only copy relevant parameters, ignore all others. */
11516 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11517 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
11518 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
11519 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
11520 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
11521 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
11522 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
11523 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
11524 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
11526 if (netif_running(dev)) {
11527 tg3_full_lock(tp, 0);
11528 __tg3_set_coalesce(tp, &tp->coal);
11529 tg3_full_unlock(tp);
11531 return 0;
11534 static const struct ethtool_ops tg3_ethtool_ops = {
11535 .get_settings = tg3_get_settings,
11536 .set_settings = tg3_set_settings,
11537 .get_drvinfo = tg3_get_drvinfo,
11538 .get_regs_len = tg3_get_regs_len,
11539 .get_regs = tg3_get_regs,
11540 .get_wol = tg3_get_wol,
11541 .set_wol = tg3_set_wol,
11542 .get_msglevel = tg3_get_msglevel,
11543 .set_msglevel = tg3_set_msglevel,
11544 .nway_reset = tg3_nway_reset,
11545 .get_link = ethtool_op_get_link,
11546 .get_eeprom_len = tg3_get_eeprom_len,
11547 .get_eeprom = tg3_get_eeprom,
11548 .set_eeprom = tg3_set_eeprom,
11549 .get_ringparam = tg3_get_ringparam,
11550 .set_ringparam = tg3_set_ringparam,
11551 .get_pauseparam = tg3_get_pauseparam,
11552 .set_pauseparam = tg3_set_pauseparam,
11553 .self_test = tg3_self_test,
11554 .get_strings = tg3_get_strings,
11555 .set_phys_id = tg3_set_phys_id,
11556 .get_ethtool_stats = tg3_get_ethtool_stats,
11557 .get_coalesce = tg3_get_coalesce,
11558 .set_coalesce = tg3_set_coalesce,
11559 .get_sset_count = tg3_get_sset_count,
11562 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
11564 u32 cursize, val, magic;
11566 tp->nvram_size = EEPROM_CHIP_SIZE;
11568 if (tg3_nvram_read(tp, 0, &magic) != 0)
11569 return;
11571 if ((magic != TG3_EEPROM_MAGIC) &&
11572 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
11573 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
11574 return;
11577 * Size the chip by reading offsets at increasing powers of two.
11578 * When we encounter our validation signature, we know the addressing
11579 * has wrapped around, and thus have our chip size.
11581 cursize = 0x10;
11583 while (cursize < tp->nvram_size) {
11584 if (tg3_nvram_read(tp, cursize, &val) != 0)
11585 return;
11587 if (val == magic)
11588 break;
11590 cursize <<= 1;
11593 tp->nvram_size = cursize;
11596 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
11598 u32 val;
11600 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
11601 return;
11603 /* Selfboot format */
11604 if (val != TG3_EEPROM_MAGIC) {
11605 tg3_get_eeprom_size(tp);
11606 return;
11609 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
11610 if (val != 0) {
11611 /* This is confusing. We want to operate on the
11612 * 16-bit value at offset 0xf2. The tg3_nvram_read()
11613 * call will read from NVRAM and byteswap the data
11614 * according to the byteswapping settings for all
11615 * other register accesses. This ensures the data we
11616 * want will always reside in the lower 16-bits.
11617 * However, the data in NVRAM is in LE format, which
11618 * means the data from the NVRAM read will always be
11619 * opposite the endianness of the CPU. The 16-bit
11620 * byteswap then brings the data to CPU endianness.
11622 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
11623 return;
11626 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11629 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
11631 u32 nvcfg1;
11633 nvcfg1 = tr32(NVRAM_CFG1);
11634 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
11635 tg3_flag_set(tp, FLASH);
11636 } else {
11637 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11638 tw32(NVRAM_CFG1, nvcfg1);
11641 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11642 tg3_flag(tp, 5780_CLASS)) {
11643 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
11644 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
11645 tp->nvram_jedecnum = JEDEC_ATMEL;
11646 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11647 tg3_flag_set(tp, NVRAM_BUFFERED);
11648 break;
11649 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
11650 tp->nvram_jedecnum = JEDEC_ATMEL;
11651 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
11652 break;
11653 case FLASH_VENDOR_ATMEL_EEPROM:
11654 tp->nvram_jedecnum = JEDEC_ATMEL;
11655 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11656 tg3_flag_set(tp, NVRAM_BUFFERED);
11657 break;
11658 case FLASH_VENDOR_ST:
11659 tp->nvram_jedecnum = JEDEC_ST;
11660 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
11661 tg3_flag_set(tp, NVRAM_BUFFERED);
11662 break;
11663 case FLASH_VENDOR_SAIFUN:
11664 tp->nvram_jedecnum = JEDEC_SAIFUN;
11665 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
11666 break;
11667 case FLASH_VENDOR_SST_SMALL:
11668 case FLASH_VENDOR_SST_LARGE:
11669 tp->nvram_jedecnum = JEDEC_SST;
11670 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
11671 break;
11673 } else {
11674 tp->nvram_jedecnum = JEDEC_ATMEL;
11675 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11676 tg3_flag_set(tp, NVRAM_BUFFERED);
11680 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
11682 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
11683 case FLASH_5752PAGE_SIZE_256:
11684 tp->nvram_pagesize = 256;
11685 break;
11686 case FLASH_5752PAGE_SIZE_512:
11687 tp->nvram_pagesize = 512;
11688 break;
11689 case FLASH_5752PAGE_SIZE_1K:
11690 tp->nvram_pagesize = 1024;
11691 break;
11692 case FLASH_5752PAGE_SIZE_2K:
11693 tp->nvram_pagesize = 2048;
11694 break;
11695 case FLASH_5752PAGE_SIZE_4K:
11696 tp->nvram_pagesize = 4096;
11697 break;
11698 case FLASH_5752PAGE_SIZE_264:
11699 tp->nvram_pagesize = 264;
11700 break;
11701 case FLASH_5752PAGE_SIZE_528:
11702 tp->nvram_pagesize = 528;
11703 break;
11707 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
11709 u32 nvcfg1;
11711 nvcfg1 = tr32(NVRAM_CFG1);
11713 /* NVRAM protection for TPM */
11714 if (nvcfg1 & (1 << 27))
11715 tg3_flag_set(tp, PROTECTED_NVRAM);
11717 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11718 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
11719 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
11720 tp->nvram_jedecnum = JEDEC_ATMEL;
11721 tg3_flag_set(tp, NVRAM_BUFFERED);
11722 break;
11723 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11724 tp->nvram_jedecnum = JEDEC_ATMEL;
11725 tg3_flag_set(tp, NVRAM_BUFFERED);
11726 tg3_flag_set(tp, FLASH);
11727 break;
11728 case FLASH_5752VENDOR_ST_M45PE10:
11729 case FLASH_5752VENDOR_ST_M45PE20:
11730 case FLASH_5752VENDOR_ST_M45PE40:
11731 tp->nvram_jedecnum = JEDEC_ST;
11732 tg3_flag_set(tp, NVRAM_BUFFERED);
11733 tg3_flag_set(tp, FLASH);
11734 break;
11737 if (tg3_flag(tp, FLASH)) {
11738 tg3_nvram_get_pagesize(tp, nvcfg1);
11739 } else {
11740 /* For eeprom, set pagesize to maximum eeprom size */
11741 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11743 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11744 tw32(NVRAM_CFG1, nvcfg1);
11748 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
11750 u32 nvcfg1, protect = 0;
11752 nvcfg1 = tr32(NVRAM_CFG1);
11754 /* NVRAM protection for TPM */
11755 if (nvcfg1 & (1 << 27)) {
11756 tg3_flag_set(tp, PROTECTED_NVRAM);
11757 protect = 1;
11760 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11761 switch (nvcfg1) {
11762 case FLASH_5755VENDOR_ATMEL_FLASH_1:
11763 case FLASH_5755VENDOR_ATMEL_FLASH_2:
11764 case FLASH_5755VENDOR_ATMEL_FLASH_3:
11765 case FLASH_5755VENDOR_ATMEL_FLASH_5:
11766 tp->nvram_jedecnum = JEDEC_ATMEL;
11767 tg3_flag_set(tp, NVRAM_BUFFERED);
11768 tg3_flag_set(tp, FLASH);
11769 tp->nvram_pagesize = 264;
11770 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
11771 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
11772 tp->nvram_size = (protect ? 0x3e200 :
11773 TG3_NVRAM_SIZE_512KB);
11774 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
11775 tp->nvram_size = (protect ? 0x1f200 :
11776 TG3_NVRAM_SIZE_256KB);
11777 else
11778 tp->nvram_size = (protect ? 0x1f200 :
11779 TG3_NVRAM_SIZE_128KB);
11780 break;
11781 case FLASH_5752VENDOR_ST_M45PE10:
11782 case FLASH_5752VENDOR_ST_M45PE20:
11783 case FLASH_5752VENDOR_ST_M45PE40:
11784 tp->nvram_jedecnum = JEDEC_ST;
11785 tg3_flag_set(tp, NVRAM_BUFFERED);
11786 tg3_flag_set(tp, FLASH);
11787 tp->nvram_pagesize = 256;
11788 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
11789 tp->nvram_size = (protect ?
11790 TG3_NVRAM_SIZE_64KB :
11791 TG3_NVRAM_SIZE_128KB);
11792 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
11793 tp->nvram_size = (protect ?
11794 TG3_NVRAM_SIZE_64KB :
11795 TG3_NVRAM_SIZE_256KB);
11796 else
11797 tp->nvram_size = (protect ?
11798 TG3_NVRAM_SIZE_128KB :
11799 TG3_NVRAM_SIZE_512KB);
11800 break;
11804 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
11806 u32 nvcfg1;
11808 nvcfg1 = tr32(NVRAM_CFG1);
11810 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11811 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
11812 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11813 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
11814 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11815 tp->nvram_jedecnum = JEDEC_ATMEL;
11816 tg3_flag_set(tp, NVRAM_BUFFERED);
11817 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11819 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11820 tw32(NVRAM_CFG1, nvcfg1);
11821 break;
11822 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11823 case FLASH_5755VENDOR_ATMEL_FLASH_1:
11824 case FLASH_5755VENDOR_ATMEL_FLASH_2:
11825 case FLASH_5755VENDOR_ATMEL_FLASH_3:
11826 tp->nvram_jedecnum = JEDEC_ATMEL;
11827 tg3_flag_set(tp, NVRAM_BUFFERED);
11828 tg3_flag_set(tp, FLASH);
11829 tp->nvram_pagesize = 264;
11830 break;
11831 case FLASH_5752VENDOR_ST_M45PE10:
11832 case FLASH_5752VENDOR_ST_M45PE20:
11833 case FLASH_5752VENDOR_ST_M45PE40:
11834 tp->nvram_jedecnum = JEDEC_ST;
11835 tg3_flag_set(tp, NVRAM_BUFFERED);
11836 tg3_flag_set(tp, FLASH);
11837 tp->nvram_pagesize = 256;
11838 break;
11842 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
11844 u32 nvcfg1, protect = 0;
11846 nvcfg1 = tr32(NVRAM_CFG1);
11848 /* NVRAM protection for TPM */
11849 if (nvcfg1 & (1 << 27)) {
11850 tg3_flag_set(tp, PROTECTED_NVRAM);
11851 protect = 1;
11854 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11855 switch (nvcfg1) {
11856 case FLASH_5761VENDOR_ATMEL_ADB021D:
11857 case FLASH_5761VENDOR_ATMEL_ADB041D:
11858 case FLASH_5761VENDOR_ATMEL_ADB081D:
11859 case FLASH_5761VENDOR_ATMEL_ADB161D:
11860 case FLASH_5761VENDOR_ATMEL_MDB021D:
11861 case FLASH_5761VENDOR_ATMEL_MDB041D:
11862 case FLASH_5761VENDOR_ATMEL_MDB081D:
11863 case FLASH_5761VENDOR_ATMEL_MDB161D:
11864 tp->nvram_jedecnum = JEDEC_ATMEL;
11865 tg3_flag_set(tp, NVRAM_BUFFERED);
11866 tg3_flag_set(tp, FLASH);
11867 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
11868 tp->nvram_pagesize = 256;
11869 break;
11870 case FLASH_5761VENDOR_ST_A_M45PE20:
11871 case FLASH_5761VENDOR_ST_A_M45PE40:
11872 case FLASH_5761VENDOR_ST_A_M45PE80:
11873 case FLASH_5761VENDOR_ST_A_M45PE16:
11874 case FLASH_5761VENDOR_ST_M_M45PE20:
11875 case FLASH_5761VENDOR_ST_M_M45PE40:
11876 case FLASH_5761VENDOR_ST_M_M45PE80:
11877 case FLASH_5761VENDOR_ST_M_M45PE16:
11878 tp->nvram_jedecnum = JEDEC_ST;
11879 tg3_flag_set(tp, NVRAM_BUFFERED);
11880 tg3_flag_set(tp, FLASH);
11881 tp->nvram_pagesize = 256;
11882 break;
11885 if (protect) {
11886 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
11887 } else {
11888 switch (nvcfg1) {
11889 case FLASH_5761VENDOR_ATMEL_ADB161D:
11890 case FLASH_5761VENDOR_ATMEL_MDB161D:
11891 case FLASH_5761VENDOR_ST_A_M45PE16:
11892 case FLASH_5761VENDOR_ST_M_M45PE16:
11893 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
11894 break;
11895 case FLASH_5761VENDOR_ATMEL_ADB081D:
11896 case FLASH_5761VENDOR_ATMEL_MDB081D:
11897 case FLASH_5761VENDOR_ST_A_M45PE80:
11898 case FLASH_5761VENDOR_ST_M_M45PE80:
11899 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
11900 break;
11901 case FLASH_5761VENDOR_ATMEL_ADB041D:
11902 case FLASH_5761VENDOR_ATMEL_MDB041D:
11903 case FLASH_5761VENDOR_ST_A_M45PE40:
11904 case FLASH_5761VENDOR_ST_M_M45PE40:
11905 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11906 break;
11907 case FLASH_5761VENDOR_ATMEL_ADB021D:
11908 case FLASH_5761VENDOR_ATMEL_MDB021D:
11909 case FLASH_5761VENDOR_ST_A_M45PE20:
11910 case FLASH_5761VENDOR_ST_M_M45PE20:
11911 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11912 break;
11917 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
11919 tp->nvram_jedecnum = JEDEC_ATMEL;
11920 tg3_flag_set(tp, NVRAM_BUFFERED);
11921 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11924 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
11926 u32 nvcfg1;
11928 nvcfg1 = tr32(NVRAM_CFG1);
11930 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11931 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11932 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11933 tp->nvram_jedecnum = JEDEC_ATMEL;
11934 tg3_flag_set(tp, NVRAM_BUFFERED);
11935 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11937 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11938 tw32(NVRAM_CFG1, nvcfg1);
11939 return;
11940 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11941 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
11942 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
11943 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
11944 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
11945 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
11946 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
11947 tp->nvram_jedecnum = JEDEC_ATMEL;
11948 tg3_flag_set(tp, NVRAM_BUFFERED);
11949 tg3_flag_set(tp, FLASH);
11951 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11952 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11953 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
11954 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
11955 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11956 break;
11957 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
11958 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
11959 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11960 break;
11961 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
11962 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
11963 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11964 break;
11966 break;
11967 case FLASH_5752VENDOR_ST_M45PE10:
11968 case FLASH_5752VENDOR_ST_M45PE20:
11969 case FLASH_5752VENDOR_ST_M45PE40:
11970 tp->nvram_jedecnum = JEDEC_ST;
11971 tg3_flag_set(tp, NVRAM_BUFFERED);
11972 tg3_flag_set(tp, FLASH);
11974 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11975 case FLASH_5752VENDOR_ST_M45PE10:
11976 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11977 break;
11978 case FLASH_5752VENDOR_ST_M45PE20:
11979 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11980 break;
11981 case FLASH_5752VENDOR_ST_M45PE40:
11982 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11983 break;
11985 break;
11986 default:
11987 tg3_flag_set(tp, NO_NVRAM);
11988 return;
11991 tg3_nvram_get_pagesize(tp, nvcfg1);
11992 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
11993 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
11997 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
11999 u32 nvcfg1;
12001 nvcfg1 = tr32(NVRAM_CFG1);
12003 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12004 case FLASH_5717VENDOR_ATMEL_EEPROM:
12005 case FLASH_5717VENDOR_MICRO_EEPROM:
12006 tp->nvram_jedecnum = JEDEC_ATMEL;
12007 tg3_flag_set(tp, NVRAM_BUFFERED);
12008 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12010 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12011 tw32(NVRAM_CFG1, nvcfg1);
12012 return;
12013 case FLASH_5717VENDOR_ATMEL_MDB011D:
12014 case FLASH_5717VENDOR_ATMEL_ADB011B:
12015 case FLASH_5717VENDOR_ATMEL_ADB011D:
12016 case FLASH_5717VENDOR_ATMEL_MDB021D:
12017 case FLASH_5717VENDOR_ATMEL_ADB021B:
12018 case FLASH_5717VENDOR_ATMEL_ADB021D:
12019 case FLASH_5717VENDOR_ATMEL_45USPT:
12020 tp->nvram_jedecnum = JEDEC_ATMEL;
12021 tg3_flag_set(tp, NVRAM_BUFFERED);
12022 tg3_flag_set(tp, FLASH);
12024 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12025 case FLASH_5717VENDOR_ATMEL_MDB021D:
12026 /* Detect size with tg3_nvram_get_size() */
12027 break;
12028 case FLASH_5717VENDOR_ATMEL_ADB021B:
12029 case FLASH_5717VENDOR_ATMEL_ADB021D:
12030 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12031 break;
12032 default:
12033 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12034 break;
12036 break;
12037 case FLASH_5717VENDOR_ST_M_M25PE10:
12038 case FLASH_5717VENDOR_ST_A_M25PE10:
12039 case FLASH_5717VENDOR_ST_M_M45PE10:
12040 case FLASH_5717VENDOR_ST_A_M45PE10:
12041 case FLASH_5717VENDOR_ST_M_M25PE20:
12042 case FLASH_5717VENDOR_ST_A_M25PE20:
12043 case FLASH_5717VENDOR_ST_M_M45PE20:
12044 case FLASH_5717VENDOR_ST_A_M45PE20:
12045 case FLASH_5717VENDOR_ST_25USPT:
12046 case FLASH_5717VENDOR_ST_45USPT:
12047 tp->nvram_jedecnum = JEDEC_ST;
12048 tg3_flag_set(tp, NVRAM_BUFFERED);
12049 tg3_flag_set(tp, FLASH);
12051 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12052 case FLASH_5717VENDOR_ST_M_M25PE20:
12053 case FLASH_5717VENDOR_ST_M_M45PE20:
12054 /* Detect size with tg3_nvram_get_size() */
12055 break;
12056 case FLASH_5717VENDOR_ST_A_M25PE20:
12057 case FLASH_5717VENDOR_ST_A_M45PE20:
12058 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12059 break;
12060 default:
12061 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12062 break;
12064 break;
12065 default:
12066 tg3_flag_set(tp, NO_NVRAM);
12067 return;
12070 tg3_nvram_get_pagesize(tp, nvcfg1);
12071 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12072 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12075 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12077 u32 nvcfg1, nvmpinstrp;
12079 nvcfg1 = tr32(NVRAM_CFG1);
12080 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12082 switch (nvmpinstrp) {
12083 case FLASH_5720_EEPROM_HD:
12084 case FLASH_5720_EEPROM_LD:
12085 tp->nvram_jedecnum = JEDEC_ATMEL;
12086 tg3_flag_set(tp, NVRAM_BUFFERED);
12088 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12089 tw32(NVRAM_CFG1, nvcfg1);
12090 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12091 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12092 else
12093 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12094 return;
12095 case FLASH_5720VENDOR_M_ATMEL_DB011D:
12096 case FLASH_5720VENDOR_A_ATMEL_DB011B:
12097 case FLASH_5720VENDOR_A_ATMEL_DB011D:
12098 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12099 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12100 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12101 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12102 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12103 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12104 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12105 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12106 case FLASH_5720VENDOR_ATMEL_45USPT:
12107 tp->nvram_jedecnum = JEDEC_ATMEL;
12108 tg3_flag_set(tp, NVRAM_BUFFERED);
12109 tg3_flag_set(tp, FLASH);
12111 switch (nvmpinstrp) {
12112 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12113 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12114 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12115 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12116 break;
12117 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12118 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12119 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12120 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12121 break;
12122 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12123 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12124 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12125 break;
12126 default:
12127 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12128 break;
12130 break;
12131 case FLASH_5720VENDOR_M_ST_M25PE10:
12132 case FLASH_5720VENDOR_M_ST_M45PE10:
12133 case FLASH_5720VENDOR_A_ST_M25PE10:
12134 case FLASH_5720VENDOR_A_ST_M45PE10:
12135 case FLASH_5720VENDOR_M_ST_M25PE20:
12136 case FLASH_5720VENDOR_M_ST_M45PE20:
12137 case FLASH_5720VENDOR_A_ST_M25PE20:
12138 case FLASH_5720VENDOR_A_ST_M45PE20:
12139 case FLASH_5720VENDOR_M_ST_M25PE40:
12140 case FLASH_5720VENDOR_M_ST_M45PE40:
12141 case FLASH_5720VENDOR_A_ST_M25PE40:
12142 case FLASH_5720VENDOR_A_ST_M45PE40:
12143 case FLASH_5720VENDOR_M_ST_M25PE80:
12144 case FLASH_5720VENDOR_M_ST_M45PE80:
12145 case FLASH_5720VENDOR_A_ST_M25PE80:
12146 case FLASH_5720VENDOR_A_ST_M45PE80:
12147 case FLASH_5720VENDOR_ST_25USPT:
12148 case FLASH_5720VENDOR_ST_45USPT:
12149 tp->nvram_jedecnum = JEDEC_ST;
12150 tg3_flag_set(tp, NVRAM_BUFFERED);
12151 tg3_flag_set(tp, FLASH);
12153 switch (nvmpinstrp) {
12154 case FLASH_5720VENDOR_M_ST_M25PE20:
12155 case FLASH_5720VENDOR_M_ST_M45PE20:
12156 case FLASH_5720VENDOR_A_ST_M25PE20:
12157 case FLASH_5720VENDOR_A_ST_M45PE20:
12158 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12159 break;
12160 case FLASH_5720VENDOR_M_ST_M25PE40:
12161 case FLASH_5720VENDOR_M_ST_M45PE40:
12162 case FLASH_5720VENDOR_A_ST_M25PE40:
12163 case FLASH_5720VENDOR_A_ST_M45PE40:
12164 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12165 break;
12166 case FLASH_5720VENDOR_M_ST_M25PE80:
12167 case FLASH_5720VENDOR_M_ST_M45PE80:
12168 case FLASH_5720VENDOR_A_ST_M25PE80:
12169 case FLASH_5720VENDOR_A_ST_M45PE80:
12170 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12171 break;
12172 default:
12173 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12174 break;
12176 break;
12177 default:
12178 tg3_flag_set(tp, NO_NVRAM);
12179 return;
12182 tg3_nvram_get_pagesize(tp, nvcfg1);
12183 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12184 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12187 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12188 static void __devinit tg3_nvram_init(struct tg3 *tp)
12190 tw32_f(GRC_EEPROM_ADDR,
12191 (EEPROM_ADDR_FSM_RESET |
12192 (EEPROM_DEFAULT_CLOCK_PERIOD <<
12193 EEPROM_ADDR_CLKPERD_SHIFT)));
12195 msleep(1);
12197 /* Enable seeprom accesses. */
12198 tw32_f(GRC_LOCAL_CTRL,
12199 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12200 udelay(100);
12202 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12203 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12204 tg3_flag_set(tp, NVRAM);
12206 if (tg3_nvram_lock(tp)) {
12207 netdev_warn(tp->dev,
12208 "Cannot get nvram lock, %s failed\n",
12209 __func__);
12210 return;
12212 tg3_enable_nvram_access(tp);
12214 tp->nvram_size = 0;
12216 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12217 tg3_get_5752_nvram_info(tp);
12218 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12219 tg3_get_5755_nvram_info(tp);
12220 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12221 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12222 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12223 tg3_get_5787_nvram_info(tp);
12224 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12225 tg3_get_5761_nvram_info(tp);
12226 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12227 tg3_get_5906_nvram_info(tp);
12228 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12229 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
12230 tg3_get_57780_nvram_info(tp);
12231 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12232 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
12233 tg3_get_5717_nvram_info(tp);
12234 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12235 tg3_get_5720_nvram_info(tp);
12236 else
12237 tg3_get_nvram_info(tp);
12239 if (tp->nvram_size == 0)
12240 tg3_get_nvram_size(tp);
12242 tg3_disable_nvram_access(tp);
12243 tg3_nvram_unlock(tp);
12245 } else {
12246 tg3_flag_clear(tp, NVRAM);
12247 tg3_flag_clear(tp, NVRAM_BUFFERED);
12249 tg3_get_eeprom_size(tp);
12253 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
12254 u32 offset, u32 len, u8 *buf)
12256 int i, j, rc = 0;
12257 u32 val;
12259 for (i = 0; i < len; i += 4) {
12260 u32 addr;
12261 __be32 data;
12263 addr = offset + i;
12265 memcpy(&data, buf + i, 4);
12268 * The SEEPROM interface expects the data to always be opposite
12269 * the native endian format. We accomplish this by reversing
12270 * all the operations that would have been performed on the
12271 * data from a call to tg3_nvram_read_be32().
12273 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
12275 val = tr32(GRC_EEPROM_ADDR);
12276 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
12278 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
12279 EEPROM_ADDR_READ);
12280 tw32(GRC_EEPROM_ADDR, val |
12281 (0 << EEPROM_ADDR_DEVID_SHIFT) |
12282 (addr & EEPROM_ADDR_ADDR_MASK) |
12283 EEPROM_ADDR_START |
12284 EEPROM_ADDR_WRITE);
12286 for (j = 0; j < 1000; j++) {
12287 val = tr32(GRC_EEPROM_ADDR);
12289 if (val & EEPROM_ADDR_COMPLETE)
12290 break;
12291 msleep(1);
12293 if (!(val & EEPROM_ADDR_COMPLETE)) {
12294 rc = -EBUSY;
12295 break;
12299 return rc;
12302 /* offset and length are dword aligned */
12303 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
12304 u8 *buf)
12306 int ret = 0;
12307 u32 pagesize = tp->nvram_pagesize;
12308 u32 pagemask = pagesize - 1;
12309 u32 nvram_cmd;
12310 u8 *tmp;
12312 tmp = kmalloc(pagesize, GFP_KERNEL);
12313 if (tmp == NULL)
12314 return -ENOMEM;
12316 while (len) {
12317 int j;
12318 u32 phy_addr, page_off, size;
12320 phy_addr = offset & ~pagemask;
12322 for (j = 0; j < pagesize; j += 4) {
12323 ret = tg3_nvram_read_be32(tp, phy_addr + j,
12324 (__be32 *) (tmp + j));
12325 if (ret)
12326 break;
12328 if (ret)
12329 break;
12331 page_off = offset & pagemask;
12332 size = pagesize;
12333 if (len < size)
12334 size = len;
12336 len -= size;
12338 memcpy(tmp + page_off, buf, size);
12340 offset = offset + (pagesize - page_off);
12342 tg3_enable_nvram_access(tp);
12345 * Before we can erase the flash page, we need
12346 * to issue a special "write enable" command.
12348 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12350 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12351 break;
12353 /* Erase the target page */
12354 tw32(NVRAM_ADDR, phy_addr);
12356 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
12357 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
12359 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12360 break;
12362 /* Issue another write enable to start the write. */
12363 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12365 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12366 break;
12368 for (j = 0; j < pagesize; j += 4) {
12369 __be32 data;
12371 data = *((__be32 *) (tmp + j));
12373 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12375 tw32(NVRAM_ADDR, phy_addr + j);
12377 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
12378 NVRAM_CMD_WR;
12380 if (j == 0)
12381 nvram_cmd |= NVRAM_CMD_FIRST;
12382 else if (j == (pagesize - 4))
12383 nvram_cmd |= NVRAM_CMD_LAST;
12385 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12386 break;
12388 if (ret)
12389 break;
12392 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12393 tg3_nvram_exec_cmd(tp, nvram_cmd);
12395 kfree(tmp);
12397 return ret;
12400 /* offset and length are dword aligned */
12401 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
12402 u8 *buf)
12404 int i, ret = 0;
12406 for (i = 0; i < len; i += 4, offset += 4) {
12407 u32 page_off, phy_addr, nvram_cmd;
12408 __be32 data;
12410 memcpy(&data, buf + i, 4);
12411 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12413 page_off = offset % tp->nvram_pagesize;
12415 phy_addr = tg3_nvram_phys_addr(tp, offset);
12417 tw32(NVRAM_ADDR, phy_addr);
12419 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
12421 if (page_off == 0 || i == 0)
12422 nvram_cmd |= NVRAM_CMD_FIRST;
12423 if (page_off == (tp->nvram_pagesize - 4))
12424 nvram_cmd |= NVRAM_CMD_LAST;
12426 if (i == (len - 4))
12427 nvram_cmd |= NVRAM_CMD_LAST;
12429 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
12430 !tg3_flag(tp, 5755_PLUS) &&
12431 (tp->nvram_jedecnum == JEDEC_ST) &&
12432 (nvram_cmd & NVRAM_CMD_FIRST)) {
12434 if ((ret = tg3_nvram_exec_cmd(tp,
12435 NVRAM_CMD_WREN | NVRAM_CMD_GO |
12436 NVRAM_CMD_DONE)))
12438 break;
12440 if (!tg3_flag(tp, FLASH)) {
12441 /* We always do complete word writes to eeprom. */
12442 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
12445 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12446 break;
12448 return ret;
12451 /* offset and length are dword aligned */
12452 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12454 int ret;
12456 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12457 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
12458 ~GRC_LCLCTRL_GPIO_OUTPUT1);
12459 udelay(40);
12462 if (!tg3_flag(tp, NVRAM)) {
12463 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
12464 } else {
12465 u32 grc_mode;
12467 ret = tg3_nvram_lock(tp);
12468 if (ret)
12469 return ret;
12471 tg3_enable_nvram_access(tp);
12472 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
12473 tw32(NVRAM_WRITE1, 0x406);
12475 grc_mode = tr32(GRC_MODE);
12476 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
12478 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
12479 ret = tg3_nvram_write_block_buffered(tp, offset, len,
12480 buf);
12481 } else {
12482 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
12483 buf);
12486 grc_mode = tr32(GRC_MODE);
12487 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
12489 tg3_disable_nvram_access(tp);
12490 tg3_nvram_unlock(tp);
12493 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12494 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
12495 udelay(40);
12498 return ret;
12501 struct subsys_tbl_ent {
12502 u16 subsys_vendor, subsys_devid;
12503 u32 phy_id;
12506 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
12507 /* Broadcom boards. */
12508 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12509 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
12510 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12511 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
12512 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12513 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
12514 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12515 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
12516 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12517 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
12518 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12519 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
12520 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12521 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
12522 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12523 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
12524 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12525 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
12526 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12527 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
12528 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12529 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
12531 /* 3com boards. */
12532 { TG3PCI_SUBVENDOR_ID_3COM,
12533 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
12534 { TG3PCI_SUBVENDOR_ID_3COM,
12535 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
12536 { TG3PCI_SUBVENDOR_ID_3COM,
12537 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
12538 { TG3PCI_SUBVENDOR_ID_3COM,
12539 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
12540 { TG3PCI_SUBVENDOR_ID_3COM,
12541 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
12543 /* DELL boards. */
12544 { TG3PCI_SUBVENDOR_ID_DELL,
12545 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
12546 { TG3PCI_SUBVENDOR_ID_DELL,
12547 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
12548 { TG3PCI_SUBVENDOR_ID_DELL,
12549 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
12550 { TG3PCI_SUBVENDOR_ID_DELL,
12551 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
12553 /* Compaq boards. */
12554 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12555 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
12556 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12557 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
12558 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12559 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
12560 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12561 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
12562 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12563 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
12565 /* IBM boards. */
12566 { TG3PCI_SUBVENDOR_ID_IBM,
12567 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
12570 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
12572 int i;
12574 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
12575 if ((subsys_id_to_phy_id[i].subsys_vendor ==
12576 tp->pdev->subsystem_vendor) &&
12577 (subsys_id_to_phy_id[i].subsys_devid ==
12578 tp->pdev->subsystem_device))
12579 return &subsys_id_to_phy_id[i];
12581 return NULL;
12584 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12586 u32 val;
12587 u16 pmcsr;
12589 /* On some early chips the SRAM cannot be accessed in D3hot state,
12590 * so need make sure we're in D0.
12592 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
12593 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
12594 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
12595 msleep(1);
12597 /* Make sure register accesses (indirect or otherwise)
12598 * will function correctly.
12600 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12601 tp->misc_host_ctrl);
12603 /* The memory arbiter has to be enabled in order for SRAM accesses
12604 * to succeed. Normally on powerup the tg3 chip firmware will make
12605 * sure it is enabled, but other entities such as system netboot
12606 * code might disable it.
12608 val = tr32(MEMARB_MODE);
12609 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
12611 tp->phy_id = TG3_PHY_ID_INVALID;
12612 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12614 /* Assume an onboard device and WOL capable by default. */
12615 tg3_flag_set(tp, EEPROM_WRITE_PROT);
12616 tg3_flag_set(tp, WOL_CAP);
12618 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12619 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
12620 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12621 tg3_flag_set(tp, IS_NIC);
12623 val = tr32(VCPU_CFGSHDW);
12624 if (val & VCPU_CFGSHDW_ASPM_DBNC)
12625 tg3_flag_set(tp, ASPM_WORKAROUND);
12626 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
12627 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
12628 tg3_flag_set(tp, WOL_ENABLE);
12629 device_set_wakeup_enable(&tp->pdev->dev, true);
12631 goto done;
12634 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
12635 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
12636 u32 nic_cfg, led_cfg;
12637 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
12638 int eeprom_phy_serdes = 0;
12640 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
12641 tp->nic_sram_data_cfg = nic_cfg;
12643 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
12644 ver >>= NIC_SRAM_DATA_VER_SHIFT;
12645 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12646 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12647 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
12648 (ver > 0) && (ver < 0x100))
12649 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
12651 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12652 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
12654 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
12655 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
12656 eeprom_phy_serdes = 1;
12658 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
12659 if (nic_phy_id != 0) {
12660 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
12661 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
12663 eeprom_phy_id = (id1 >> 16) << 10;
12664 eeprom_phy_id |= (id2 & 0xfc00) << 16;
12665 eeprom_phy_id |= (id2 & 0x03ff) << 0;
12666 } else
12667 eeprom_phy_id = 0;
12669 tp->phy_id = eeprom_phy_id;
12670 if (eeprom_phy_serdes) {
12671 if (!tg3_flag(tp, 5705_PLUS))
12672 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12673 else
12674 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
12677 if (tg3_flag(tp, 5750_PLUS))
12678 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
12679 SHASTA_EXT_LED_MODE_MASK);
12680 else
12681 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
12683 switch (led_cfg) {
12684 default:
12685 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
12686 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12687 break;
12689 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
12690 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12691 break;
12693 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
12694 tp->led_ctrl = LED_CTRL_MODE_MAC;
12696 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
12697 * read on some older 5700/5701 bootcode.
12699 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12700 ASIC_REV_5700 ||
12701 GET_ASIC_REV(tp->pci_chip_rev_id) ==
12702 ASIC_REV_5701)
12703 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12705 break;
12707 case SHASTA_EXT_LED_SHARED:
12708 tp->led_ctrl = LED_CTRL_MODE_SHARED;
12709 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
12710 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
12711 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12712 LED_CTRL_MODE_PHY_2);
12713 break;
12715 case SHASTA_EXT_LED_MAC:
12716 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
12717 break;
12719 case SHASTA_EXT_LED_COMBO:
12720 tp->led_ctrl = LED_CTRL_MODE_COMBO;
12721 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
12722 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12723 LED_CTRL_MODE_PHY_2);
12724 break;
12728 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12729 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
12730 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
12731 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12733 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
12734 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12736 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
12737 tg3_flag_set(tp, EEPROM_WRITE_PROT);
12738 if ((tp->pdev->subsystem_vendor ==
12739 PCI_VENDOR_ID_ARIMA) &&
12740 (tp->pdev->subsystem_device == 0x205a ||
12741 tp->pdev->subsystem_device == 0x2063))
12742 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12743 } else {
12744 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12745 tg3_flag_set(tp, IS_NIC);
12748 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
12749 tg3_flag_set(tp, ENABLE_ASF);
12750 if (tg3_flag(tp, 5750_PLUS))
12751 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
12754 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
12755 tg3_flag(tp, 5750_PLUS))
12756 tg3_flag_set(tp, ENABLE_APE);
12758 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
12759 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
12760 tg3_flag_clear(tp, WOL_CAP);
12762 if (tg3_flag(tp, WOL_CAP) &&
12763 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
12764 tg3_flag_set(tp, WOL_ENABLE);
12765 device_set_wakeup_enable(&tp->pdev->dev, true);
12768 if (cfg2 & (1 << 17))
12769 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
12771 /* serdes signal pre-emphasis in register 0x590 set by */
12772 /* bootcode if bit 18 is set */
12773 if (cfg2 & (1 << 18))
12774 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
12776 if ((tg3_flag(tp, 57765_PLUS) ||
12777 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12778 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
12779 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
12780 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
12782 if (tg3_flag(tp, PCI_EXPRESS) &&
12783 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
12784 !tg3_flag(tp, 57765_PLUS)) {
12785 u32 cfg3;
12787 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
12788 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
12789 tg3_flag_set(tp, ASPM_WORKAROUND);
12792 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
12793 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
12794 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
12795 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
12796 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
12797 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
12799 done:
12800 if (tg3_flag(tp, WOL_CAP))
12801 device_set_wakeup_enable(&tp->pdev->dev,
12802 tg3_flag(tp, WOL_ENABLE));
12803 else
12804 device_set_wakeup_capable(&tp->pdev->dev, false);
12807 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
12809 int i;
12810 u32 val;
12812 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
12813 tw32(OTP_CTRL, cmd);
12815 /* Wait for up to 1 ms for command to execute. */
12816 for (i = 0; i < 100; i++) {
12817 val = tr32(OTP_STATUS);
12818 if (val & OTP_STATUS_CMD_DONE)
12819 break;
12820 udelay(10);
12823 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
12826 /* Read the gphy configuration from the OTP region of the chip. The gphy
12827 * configuration is a 32-bit value that straddles the alignment boundary.
12828 * We do two 32-bit reads and then shift and merge the results.
12830 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
12832 u32 bhalf_otp, thalf_otp;
12834 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
12836 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
12837 return 0;
12839 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
12841 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12842 return 0;
12844 thalf_otp = tr32(OTP_READ_DATA);
12846 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
12848 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12849 return 0;
12851 bhalf_otp = tr32(OTP_READ_DATA);
12853 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
12856 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
12858 u32 adv = ADVERTISED_Autoneg |
12859 ADVERTISED_Pause;
12861 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12862 adv |= ADVERTISED_1000baseT_Half |
12863 ADVERTISED_1000baseT_Full;
12865 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12866 adv |= ADVERTISED_100baseT_Half |
12867 ADVERTISED_100baseT_Full |
12868 ADVERTISED_10baseT_Half |
12869 ADVERTISED_10baseT_Full |
12870 ADVERTISED_TP;
12871 else
12872 adv |= ADVERTISED_FIBRE;
12874 tp->link_config.advertising = adv;
12875 tp->link_config.speed = SPEED_INVALID;
12876 tp->link_config.duplex = DUPLEX_INVALID;
12877 tp->link_config.autoneg = AUTONEG_ENABLE;
12878 tp->link_config.active_speed = SPEED_INVALID;
12879 tp->link_config.active_duplex = DUPLEX_INVALID;
12880 tp->link_config.orig_speed = SPEED_INVALID;
12881 tp->link_config.orig_duplex = DUPLEX_INVALID;
12882 tp->link_config.orig_autoneg = AUTONEG_INVALID;
12885 static int __devinit tg3_phy_probe(struct tg3 *tp)
12887 u32 hw_phy_id_1, hw_phy_id_2;
12888 u32 hw_phy_id, hw_phy_id_masked;
12889 int err;
12891 /* flow control autonegotiation is default behavior */
12892 tg3_flag_set(tp, PAUSE_AUTONEG);
12893 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
12895 if (tg3_flag(tp, USE_PHYLIB))
12896 return tg3_phy_init(tp);
12898 /* Reading the PHY ID register can conflict with ASF
12899 * firmware access to the PHY hardware.
12901 err = 0;
12902 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
12903 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
12904 } else {
12905 /* Now read the physical PHY_ID from the chip and verify
12906 * that it is sane. If it doesn't look good, we fall back
12907 * to either the hard-coded table based PHY_ID and failing
12908 * that the value found in the eeprom area.
12910 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
12911 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
12913 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
12914 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
12915 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
12917 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
12920 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
12921 tp->phy_id = hw_phy_id;
12922 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
12923 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12924 else
12925 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
12926 } else {
12927 if (tp->phy_id != TG3_PHY_ID_INVALID) {
12928 /* Do nothing, phy ID already set up in
12929 * tg3_get_eeprom_hw_cfg().
12931 } else {
12932 struct subsys_tbl_ent *p;
12934 /* No eeprom signature? Try the hardcoded
12935 * subsys device table.
12937 p = tg3_lookup_by_subsys(tp);
12938 if (!p)
12939 return -ENODEV;
12941 tp->phy_id = p->phy_id;
12942 if (!tp->phy_id ||
12943 tp->phy_id == TG3_PHY_ID_BCM8002)
12944 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12948 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
12949 ((tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
12950 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
12951 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12952 tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
12953 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
12955 tg3_phy_init_link_config(tp);
12957 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
12958 !tg3_flag(tp, ENABLE_APE) &&
12959 !tg3_flag(tp, ENABLE_ASF)) {
12960 u32 bmsr, mask;
12962 tg3_readphy(tp, MII_BMSR, &bmsr);
12963 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
12964 (bmsr & BMSR_LSTATUS))
12965 goto skip_phy_reset;
12967 err = tg3_phy_reset(tp);
12968 if (err)
12969 return err;
12971 tg3_phy_set_wirespeed(tp);
12973 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
12974 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
12975 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
12976 if (!tg3_copper_is_advertising_all(tp, mask)) {
12977 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
12978 tp->link_config.flowctrl);
12980 tg3_writephy(tp, MII_BMCR,
12981 BMCR_ANENABLE | BMCR_ANRESTART);
12985 skip_phy_reset:
12986 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
12987 err = tg3_init_5401phy_dsp(tp);
12988 if (err)
12989 return err;
12991 err = tg3_init_5401phy_dsp(tp);
12994 return err;
12997 static void __devinit tg3_read_vpd(struct tg3 *tp)
12999 u8 *vpd_data;
13000 unsigned int block_end, rosize, len;
13001 int j, i = 0;
13003 vpd_data = (u8 *)tg3_vpd_readblock(tp);
13004 if (!vpd_data)
13005 goto out_no_vpd;
13007 i = pci_vpd_find_tag(vpd_data, 0, TG3_NVM_VPD_LEN,
13008 PCI_VPD_LRDT_RO_DATA);
13009 if (i < 0)
13010 goto out_not_found;
13012 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13013 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13014 i += PCI_VPD_LRDT_TAG_SIZE;
13016 if (block_end > TG3_NVM_VPD_LEN)
13017 goto out_not_found;
13019 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13020 PCI_VPD_RO_KEYWORD_MFR_ID);
13021 if (j > 0) {
13022 len = pci_vpd_info_field_size(&vpd_data[j]);
13024 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13025 if (j + len > block_end || len != 4 ||
13026 memcmp(&vpd_data[j], "1028", 4))
13027 goto partno;
13029 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13030 PCI_VPD_RO_KEYWORD_VENDOR0);
13031 if (j < 0)
13032 goto partno;
13034 len = pci_vpd_info_field_size(&vpd_data[j]);
13036 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13037 if (j + len > block_end)
13038 goto partno;
13040 memcpy(tp->fw_ver, &vpd_data[j], len);
13041 strncat(tp->fw_ver, " bc ", TG3_NVM_VPD_LEN - len - 1);
13044 partno:
13045 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13046 PCI_VPD_RO_KEYWORD_PARTNO);
13047 if (i < 0)
13048 goto out_not_found;
13050 len = pci_vpd_info_field_size(&vpd_data[i]);
13052 i += PCI_VPD_INFO_FLD_HDR_SIZE;
13053 if (len > TG3_BPN_SIZE ||
13054 (len + i) > TG3_NVM_VPD_LEN)
13055 goto out_not_found;
13057 memcpy(tp->board_part_number, &vpd_data[i], len);
13059 out_not_found:
13060 kfree(vpd_data);
13061 if (tp->board_part_number[0])
13062 return;
13064 out_no_vpd:
13065 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13066 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13067 strcpy(tp->board_part_number, "BCM5717");
13068 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13069 strcpy(tp->board_part_number, "BCM5718");
13070 else
13071 goto nomatch;
13072 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13073 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13074 strcpy(tp->board_part_number, "BCM57780");
13075 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13076 strcpy(tp->board_part_number, "BCM57760");
13077 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13078 strcpy(tp->board_part_number, "BCM57790");
13079 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13080 strcpy(tp->board_part_number, "BCM57788");
13081 else
13082 goto nomatch;
13083 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13084 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13085 strcpy(tp->board_part_number, "BCM57761");
13086 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13087 strcpy(tp->board_part_number, "BCM57765");
13088 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13089 strcpy(tp->board_part_number, "BCM57781");
13090 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13091 strcpy(tp->board_part_number, "BCM57785");
13092 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13093 strcpy(tp->board_part_number, "BCM57791");
13094 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13095 strcpy(tp->board_part_number, "BCM57795");
13096 else
13097 goto nomatch;
13098 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13099 strcpy(tp->board_part_number, "BCM95906");
13100 } else {
13101 nomatch:
13102 strcpy(tp->board_part_number, "none");
13106 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13108 u32 val;
13110 if (tg3_nvram_read(tp, offset, &val) ||
13111 (val & 0xfc000000) != 0x0c000000 ||
13112 tg3_nvram_read(tp, offset + 4, &val) ||
13113 val != 0)
13114 return 0;
13116 return 1;
13119 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13121 u32 val, offset, start, ver_offset;
13122 int i, dst_off;
13123 bool newver = false;
13125 if (tg3_nvram_read(tp, 0xc, &offset) ||
13126 tg3_nvram_read(tp, 0x4, &start))
13127 return;
13129 offset = tg3_nvram_logical_addr(tp, offset);
13131 if (tg3_nvram_read(tp, offset, &val))
13132 return;
13134 if ((val & 0xfc000000) == 0x0c000000) {
13135 if (tg3_nvram_read(tp, offset + 4, &val))
13136 return;
13138 if (val == 0)
13139 newver = true;
13142 dst_off = strlen(tp->fw_ver);
13144 if (newver) {
13145 if (TG3_VER_SIZE - dst_off < 16 ||
13146 tg3_nvram_read(tp, offset + 8, &ver_offset))
13147 return;
13149 offset = offset + ver_offset - start;
13150 for (i = 0; i < 16; i += 4) {
13151 __be32 v;
13152 if (tg3_nvram_read_be32(tp, offset + i, &v))
13153 return;
13155 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13157 } else {
13158 u32 major, minor;
13160 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13161 return;
13163 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13164 TG3_NVM_BCVER_MAJSFT;
13165 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13166 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13167 "v%d.%02d", major, minor);
13171 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13173 u32 val, major, minor;
13175 /* Use native endian representation */
13176 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13177 return;
13179 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13180 TG3_NVM_HWSB_CFG1_MAJSFT;
13181 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13182 TG3_NVM_HWSB_CFG1_MINSFT;
13184 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13187 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13189 u32 offset, major, minor, build;
13191 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13193 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13194 return;
13196 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13197 case TG3_EEPROM_SB_REVISION_0:
13198 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13199 break;
13200 case TG3_EEPROM_SB_REVISION_2:
13201 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13202 break;
13203 case TG3_EEPROM_SB_REVISION_3:
13204 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13205 break;
13206 case TG3_EEPROM_SB_REVISION_4:
13207 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13208 break;
13209 case TG3_EEPROM_SB_REVISION_5:
13210 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13211 break;
13212 case TG3_EEPROM_SB_REVISION_6:
13213 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13214 break;
13215 default:
13216 return;
13219 if (tg3_nvram_read(tp, offset, &val))
13220 return;
13222 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13223 TG3_EEPROM_SB_EDH_BLD_SHFT;
13224 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13225 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13226 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
13228 if (minor > 99 || build > 26)
13229 return;
13231 offset = strlen(tp->fw_ver);
13232 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13233 " v%d.%02d", major, minor);
13235 if (build > 0) {
13236 offset = strlen(tp->fw_ver);
13237 if (offset < TG3_VER_SIZE - 1)
13238 tp->fw_ver[offset] = 'a' + build - 1;
13242 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13244 u32 val, offset, start;
13245 int i, vlen;
13247 for (offset = TG3_NVM_DIR_START;
13248 offset < TG3_NVM_DIR_END;
13249 offset += TG3_NVM_DIRENT_SIZE) {
13250 if (tg3_nvram_read(tp, offset, &val))
13251 return;
13253 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13254 break;
13257 if (offset == TG3_NVM_DIR_END)
13258 return;
13260 if (!tg3_flag(tp, 5705_PLUS))
13261 start = 0x08000000;
13262 else if (tg3_nvram_read(tp, offset - 4, &start))
13263 return;
13265 if (tg3_nvram_read(tp, offset + 4, &offset) ||
13266 !tg3_fw_img_is_valid(tp, offset) ||
13267 tg3_nvram_read(tp, offset + 8, &val))
13268 return;
13270 offset += val - start;
13272 vlen = strlen(tp->fw_ver);
13274 tp->fw_ver[vlen++] = ',';
13275 tp->fw_ver[vlen++] = ' ';
13277 for (i = 0; i < 4; i++) {
13278 __be32 v;
13279 if (tg3_nvram_read_be32(tp, offset, &v))
13280 return;
13282 offset += sizeof(v);
13284 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13285 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13286 break;
13289 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13290 vlen += sizeof(v);
13294 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13296 int vlen;
13297 u32 apedata;
13298 char *fwtype;
13300 if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13301 return;
13303 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13304 if (apedata != APE_SEG_SIG_MAGIC)
13305 return;
13307 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13308 if (!(apedata & APE_FW_STATUS_READY))
13309 return;
13311 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13313 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13314 tg3_flag_set(tp, APE_HAS_NCSI);
13315 fwtype = "NCSI";
13316 } else {
13317 fwtype = "DASH";
13320 vlen = strlen(tp->fw_ver);
13322 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13323 fwtype,
13324 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13325 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13326 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13327 (apedata & APE_FW_VERSION_BLDMSK));
13330 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13332 u32 val;
13333 bool vpd_vers = false;
13335 if (tp->fw_ver[0] != 0)
13336 vpd_vers = true;
13338 if (tg3_flag(tp, NO_NVRAM)) {
13339 strcat(tp->fw_ver, "sb");
13340 return;
13343 if (tg3_nvram_read(tp, 0, &val))
13344 return;
13346 if (val == TG3_EEPROM_MAGIC)
13347 tg3_read_bc_ver(tp);
13348 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13349 tg3_read_sb_ver(tp, val);
13350 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13351 tg3_read_hwsb_ver(tp);
13352 else
13353 return;
13355 if (!tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || vpd_vers)
13356 goto done;
13358 tg3_read_mgmtfw_ver(tp);
13360 done:
13361 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13364 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
13366 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13368 if (tg3_flag(tp, LRG_PROD_RING_CAP))
13369 return TG3_RX_RET_MAX_SIZE_5717;
13370 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13371 return TG3_RX_RET_MAX_SIZE_5700;
13372 else
13373 return TG3_RX_RET_MAX_SIZE_5705;
13376 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13377 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13378 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13379 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13380 { },
13383 static int __devinit tg3_get_invariants(struct tg3 *tp)
13385 u32 misc_ctrl_reg;
13386 u32 pci_state_reg, grc_misc_cfg;
13387 u32 val;
13388 u16 pci_cmd;
13389 int err;
13391 /* Force memory write invalidate off. If we leave it on,
13392 * then on 5700_BX chips we have to enable a workaround.
13393 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13394 * to match the cacheline size. The Broadcom driver have this
13395 * workaround but turns MWI off all the times so never uses
13396 * it. This seems to suggest that the workaround is insufficient.
13398 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13399 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13400 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13402 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
13403 * has the register indirect write enable bit set before
13404 * we try to access any of the MMIO registers. It is also
13405 * critical that the PCI-X hw workaround situation is decided
13406 * before that as well.
13408 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13409 &misc_ctrl_reg);
13411 tp->pci_chip_rev_id = (misc_ctrl_reg >>
13412 MISC_HOST_CTRL_CHIPREV_SHIFT);
13413 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13414 u32 prod_id_asic_rev;
13416 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13417 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13418 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13419 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13420 pci_read_config_dword(tp->pdev,
13421 TG3PCI_GEN2_PRODID_ASICREV,
13422 &prod_id_asic_rev);
13423 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13424 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13425 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13426 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13427 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13428 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13429 pci_read_config_dword(tp->pdev,
13430 TG3PCI_GEN15_PRODID_ASICREV,
13431 &prod_id_asic_rev);
13432 else
13433 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
13434 &prod_id_asic_rev);
13436 tp->pci_chip_rev_id = prod_id_asic_rev;
13439 /* Wrong chip ID in 5752 A0. This code can be removed later
13440 * as A0 is not in production.
13442 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13443 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13445 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13446 * we need to disable memory and use config. cycles
13447 * only to access all registers. The 5702/03 chips
13448 * can mistakenly decode the special cycles from the
13449 * ICH chipsets as memory write cycles, causing corruption
13450 * of register and memory space. Only certain ICH bridges
13451 * will drive special cycles with non-zero data during the
13452 * address phase which can fall within the 5703's address
13453 * range. This is not an ICH bug as the PCI spec allows
13454 * non-zero address during special cycles. However, only
13455 * these ICH bridges are known to drive non-zero addresses
13456 * during special cycles.
13458 * Since special cycles do not cross PCI bridges, we only
13459 * enable this workaround if the 5703 is on the secondary
13460 * bus of these ICH bridges.
13462 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
13463 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
13464 static struct tg3_dev_id {
13465 u32 vendor;
13466 u32 device;
13467 u32 rev;
13468 } ich_chipsets[] = {
13469 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
13470 PCI_ANY_ID },
13471 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
13472 PCI_ANY_ID },
13473 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
13474 0xa },
13475 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
13476 PCI_ANY_ID },
13477 { },
13479 struct tg3_dev_id *pci_id = &ich_chipsets[0];
13480 struct pci_dev *bridge = NULL;
13482 while (pci_id->vendor != 0) {
13483 bridge = pci_get_device(pci_id->vendor, pci_id->device,
13484 bridge);
13485 if (!bridge) {
13486 pci_id++;
13487 continue;
13489 if (pci_id->rev != PCI_ANY_ID) {
13490 if (bridge->revision > pci_id->rev)
13491 continue;
13493 if (bridge->subordinate &&
13494 (bridge->subordinate->number ==
13495 tp->pdev->bus->number)) {
13496 tg3_flag_set(tp, ICH_WORKAROUND);
13497 pci_dev_put(bridge);
13498 break;
13503 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13504 static struct tg3_dev_id {
13505 u32 vendor;
13506 u32 device;
13507 } bridge_chipsets[] = {
13508 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
13509 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
13510 { },
13512 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
13513 struct pci_dev *bridge = NULL;
13515 while (pci_id->vendor != 0) {
13516 bridge = pci_get_device(pci_id->vendor,
13517 pci_id->device,
13518 bridge);
13519 if (!bridge) {
13520 pci_id++;
13521 continue;
13523 if (bridge->subordinate &&
13524 (bridge->subordinate->number <=
13525 tp->pdev->bus->number) &&
13526 (bridge->subordinate->subordinate >=
13527 tp->pdev->bus->number)) {
13528 tg3_flag_set(tp, 5701_DMA_BUG);
13529 pci_dev_put(bridge);
13530 break;
13535 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
13536 * DMA addresses > 40-bit. This bridge may have other additional
13537 * 57xx devices behind it in some 4-port NIC designs for example.
13538 * Any tg3 device found behind the bridge will also need the 40-bit
13539 * DMA workaround.
13541 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
13542 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13543 tg3_flag_set(tp, 5780_CLASS);
13544 tg3_flag_set(tp, 40BIT_DMA_BUG);
13545 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
13546 } else {
13547 struct pci_dev *bridge = NULL;
13549 do {
13550 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
13551 PCI_DEVICE_ID_SERVERWORKS_EPB,
13552 bridge);
13553 if (bridge && bridge->subordinate &&
13554 (bridge->subordinate->number <=
13555 tp->pdev->bus->number) &&
13556 (bridge->subordinate->subordinate >=
13557 tp->pdev->bus->number)) {
13558 tg3_flag_set(tp, 40BIT_DMA_BUG);
13559 pci_dev_put(bridge);
13560 break;
13562 } while (bridge);
13565 /* Initialize misc host control in PCI block. */
13566 tp->misc_host_ctrl |= (misc_ctrl_reg &
13567 MISC_HOST_CTRL_CHIPREV);
13568 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13569 tp->misc_host_ctrl);
13571 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
13572 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
13573 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13574 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13575 tp->pdev_peer = tg3_find_peer(tp);
13577 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13578 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13579 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13580 tg3_flag_set(tp, 5717_PLUS);
13582 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
13583 tg3_flag(tp, 5717_PLUS))
13584 tg3_flag_set(tp, 57765_PLUS);
13586 /* Intentionally exclude ASIC_REV_5906 */
13587 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13588 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13589 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13590 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13591 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13592 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13593 tg3_flag(tp, 57765_PLUS))
13594 tg3_flag_set(tp, 5755_PLUS);
13596 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13597 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13598 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13599 tg3_flag(tp, 5755_PLUS) ||
13600 tg3_flag(tp, 5780_CLASS))
13601 tg3_flag_set(tp, 5750_PLUS);
13603 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
13604 tg3_flag(tp, 5750_PLUS))
13605 tg3_flag_set(tp, 5705_PLUS);
13607 /* Determine TSO capabilities */
13608 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13609 ; /* Do nothing. HW bug. */
13610 else if (tg3_flag(tp, 57765_PLUS))
13611 tg3_flag_set(tp, HW_TSO_3);
13612 else if (tg3_flag(tp, 5755_PLUS) ||
13613 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13614 tg3_flag_set(tp, HW_TSO_2);
13615 else if (tg3_flag(tp, 5750_PLUS)) {
13616 tg3_flag_set(tp, HW_TSO_1);
13617 tg3_flag_set(tp, TSO_BUG);
13618 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
13619 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
13620 tg3_flag_clear(tp, TSO_BUG);
13621 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13622 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13623 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
13624 tg3_flag_set(tp, TSO_BUG);
13625 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13626 tp->fw_needed = FIRMWARE_TG3TSO5;
13627 else
13628 tp->fw_needed = FIRMWARE_TG3TSO;
13631 /* Selectively allow TSO based on operating conditions */
13632 if (tg3_flag(tp, HW_TSO_1) ||
13633 tg3_flag(tp, HW_TSO_2) ||
13634 tg3_flag(tp, HW_TSO_3) ||
13635 (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF)))
13636 tg3_flag_set(tp, TSO_CAPABLE);
13637 else {
13638 tg3_flag_clear(tp, TSO_CAPABLE);
13639 tg3_flag_clear(tp, TSO_BUG);
13640 tp->fw_needed = NULL;
13643 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
13644 tp->fw_needed = FIRMWARE_TG3;
13646 tp->irq_max = 1;
13648 if (tg3_flag(tp, 5750_PLUS)) {
13649 tg3_flag_set(tp, SUPPORT_MSI);
13650 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
13651 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
13652 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
13653 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
13654 tp->pdev_peer == tp->pdev))
13655 tg3_flag_clear(tp, SUPPORT_MSI);
13657 if (tg3_flag(tp, 5755_PLUS) ||
13658 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13659 tg3_flag_set(tp, 1SHOT_MSI);
13662 if (tg3_flag(tp, 57765_PLUS)) {
13663 tg3_flag_set(tp, SUPPORT_MSIX);
13664 tp->irq_max = TG3_IRQ_MAX_VECS;
13668 /* All chips can get confused if TX buffers
13669 * straddle the 4GB address boundary.
13671 tg3_flag_set(tp, 4G_DMA_BNDRY_BUG);
13673 if (tg3_flag(tp, 5755_PLUS))
13674 tg3_flag_set(tp, SHORT_DMA_BUG);
13675 else
13676 tg3_flag_set(tp, 40BIT_DMA_LIMIT_BUG);
13678 if (tg3_flag(tp, 5717_PLUS))
13679 tg3_flag_set(tp, LRG_PROD_RING_CAP);
13681 if (tg3_flag(tp, 57765_PLUS) &&
13682 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
13683 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
13685 if (!tg3_flag(tp, 5705_PLUS) ||
13686 tg3_flag(tp, 5780_CLASS) ||
13687 tg3_flag(tp, USE_JUMBO_BDFLAG))
13688 tg3_flag_set(tp, JUMBO_CAPABLE);
13690 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
13691 &pci_state_reg);
13693 tp->pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
13694 if (tp->pcie_cap != 0) {
13695 u16 lnkctl;
13697 tg3_flag_set(tp, PCI_EXPRESS);
13699 tp->pcie_readrq = 4096;
13700 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13701 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13702 tp->pcie_readrq = 2048;
13704 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
13706 pci_read_config_word(tp->pdev,
13707 tp->pcie_cap + PCI_EXP_LNKCTL,
13708 &lnkctl);
13709 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
13710 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13711 ASIC_REV_5906) {
13712 tg3_flag_clear(tp, HW_TSO_2);
13713 tg3_flag_clear(tp, TSO_CAPABLE);
13715 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13716 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13717 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
13718 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
13719 tg3_flag_set(tp, CLKREQ_BUG);
13720 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
13721 tg3_flag_set(tp, L1PLLPD_EN);
13723 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
13724 tg3_flag_set(tp, PCI_EXPRESS);
13725 } else if (!tg3_flag(tp, 5705_PLUS) ||
13726 tg3_flag(tp, 5780_CLASS)) {
13727 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
13728 if (!tp->pcix_cap) {
13729 dev_err(&tp->pdev->dev,
13730 "Cannot find PCI-X capability, aborting\n");
13731 return -EIO;
13734 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
13735 tg3_flag_set(tp, PCIX_MODE);
13738 /* If we have an AMD 762 or VIA K8T800 chipset, write
13739 * reordering to the mailbox registers done by the host
13740 * controller can cause major troubles. We read back from
13741 * every mailbox register write to force the writes to be
13742 * posted to the chip in order.
13744 if (pci_dev_present(tg3_write_reorder_chipsets) &&
13745 !tg3_flag(tp, PCI_EXPRESS))
13746 tg3_flag_set(tp, MBOX_WRITE_REORDER);
13748 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
13749 &tp->pci_cacheline_sz);
13750 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13751 &tp->pci_lat_timer);
13752 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
13753 tp->pci_lat_timer < 64) {
13754 tp->pci_lat_timer = 64;
13755 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13756 tp->pci_lat_timer);
13759 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
13760 /* 5700 BX chips need to have their TX producer index
13761 * mailboxes written twice to workaround a bug.
13763 tg3_flag_set(tp, TXD_MBOX_HWBUG);
13765 /* If we are in PCI-X mode, enable register write workaround.
13767 * The workaround is to use indirect register accesses
13768 * for all chip writes not to mailbox registers.
13770 if (tg3_flag(tp, PCIX_MODE)) {
13771 u32 pm_reg;
13773 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
13775 /* The chip can have it's power management PCI config
13776 * space registers clobbered due to this bug.
13777 * So explicitly force the chip into D0 here.
13779 pci_read_config_dword(tp->pdev,
13780 tp->pm_cap + PCI_PM_CTRL,
13781 &pm_reg);
13782 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
13783 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
13784 pci_write_config_dword(tp->pdev,
13785 tp->pm_cap + PCI_PM_CTRL,
13786 pm_reg);
13788 /* Also, force SERR#/PERR# in PCI command. */
13789 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13790 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
13791 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13795 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
13796 tg3_flag_set(tp, PCI_HIGH_SPEED);
13797 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
13798 tg3_flag_set(tp, PCI_32BIT);
13800 /* Chip-specific fixup from Broadcom driver */
13801 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
13802 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
13803 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
13804 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
13807 /* Default fast path register access methods */
13808 tp->read32 = tg3_read32;
13809 tp->write32 = tg3_write32;
13810 tp->read32_mbox = tg3_read32;
13811 tp->write32_mbox = tg3_write32;
13812 tp->write32_tx_mbox = tg3_write32;
13813 tp->write32_rx_mbox = tg3_write32;
13815 /* Various workaround register access methods */
13816 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
13817 tp->write32 = tg3_write_indirect_reg32;
13818 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13819 (tg3_flag(tp, PCI_EXPRESS) &&
13820 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
13822 * Back to back register writes can cause problems on these
13823 * chips, the workaround is to read back all reg writes
13824 * except those to mailbox regs.
13826 * See tg3_write_indirect_reg32().
13828 tp->write32 = tg3_write_flush_reg32;
13831 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
13832 tp->write32_tx_mbox = tg3_write32_tx_mbox;
13833 if (tg3_flag(tp, MBOX_WRITE_REORDER))
13834 tp->write32_rx_mbox = tg3_write_flush_reg32;
13837 if (tg3_flag(tp, ICH_WORKAROUND)) {
13838 tp->read32 = tg3_read_indirect_reg32;
13839 tp->write32 = tg3_write_indirect_reg32;
13840 tp->read32_mbox = tg3_read_indirect_mbox;
13841 tp->write32_mbox = tg3_write_indirect_mbox;
13842 tp->write32_tx_mbox = tg3_write_indirect_mbox;
13843 tp->write32_rx_mbox = tg3_write_indirect_mbox;
13845 iounmap(tp->regs);
13846 tp->regs = NULL;
13848 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13849 pci_cmd &= ~PCI_COMMAND_MEMORY;
13850 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13852 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13853 tp->read32_mbox = tg3_read32_mbox_5906;
13854 tp->write32_mbox = tg3_write32_mbox_5906;
13855 tp->write32_tx_mbox = tg3_write32_mbox_5906;
13856 tp->write32_rx_mbox = tg3_write32_mbox_5906;
13859 if (tp->write32 == tg3_write_indirect_reg32 ||
13860 (tg3_flag(tp, PCIX_MODE) &&
13861 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13862 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
13863 tg3_flag_set(tp, SRAM_USE_CONFIG);
13865 /* Get eeprom hw config before calling tg3_set_power_state().
13866 * In particular, the TG3_FLAG_IS_NIC flag must be
13867 * determined before calling tg3_set_power_state() so that
13868 * we know whether or not to switch out of Vaux power.
13869 * When the flag is set, it means that GPIO1 is used for eeprom
13870 * write protect and also implies that it is a LOM where GPIOs
13871 * are not used to switch power.
13873 tg3_get_eeprom_hw_cfg(tp);
13875 if (tg3_flag(tp, ENABLE_APE)) {
13876 /* Allow reads and writes to the
13877 * APE register and memory space.
13879 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
13880 PCISTATE_ALLOW_APE_SHMEM_WR |
13881 PCISTATE_ALLOW_APE_PSPACE_WR;
13882 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
13883 pci_state_reg);
13886 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13887 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13888 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13889 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13890 tg3_flag(tp, 57765_PLUS))
13891 tg3_flag_set(tp, CPMU_PRESENT);
13893 /* Set up tp->grc_local_ctrl before calling tg3_power_up().
13894 * GPIO1 driven high will bring 5700's external PHY out of reset.
13895 * It is also used as eeprom write protect on LOMs.
13897 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
13898 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13899 tg3_flag(tp, EEPROM_WRITE_PROT))
13900 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
13901 GRC_LCLCTRL_GPIO_OUTPUT1);
13902 /* Unused GPIO3 must be driven as output on 5752 because there
13903 * are no pull-up resistors on unused GPIO pins.
13905 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13906 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
13908 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13909 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13910 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
13911 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
13913 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
13914 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
13915 /* Turn off the debug UART. */
13916 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
13917 if (tg3_flag(tp, IS_NIC))
13918 /* Keep VMain power. */
13919 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
13920 GRC_LCLCTRL_GPIO_OUTPUT0;
13923 /* Force the chip into D0. */
13924 err = tg3_power_up(tp);
13925 if (err) {
13926 dev_err(&tp->pdev->dev, "Transition to D0 failed\n");
13927 return err;
13930 /* Derive initial jumbo mode from MTU assigned in
13931 * ether_setup() via the alloc_etherdev() call
13933 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
13934 tg3_flag_set(tp, JUMBO_RING_ENABLE);
13936 /* Determine WakeOnLan speed to use. */
13937 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13938 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
13939 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
13940 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
13941 tg3_flag_clear(tp, WOL_SPEED_100MB);
13942 } else {
13943 tg3_flag_set(tp, WOL_SPEED_100MB);
13946 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13947 tp->phy_flags |= TG3_PHYFLG_IS_FET;
13949 /* A few boards don't want Ethernet@WireSpeed phy feature */
13950 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13951 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
13952 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
13953 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
13954 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
13955 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13956 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
13958 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
13959 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
13960 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
13961 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
13962 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
13964 if (tg3_flag(tp, 5705_PLUS) &&
13965 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
13966 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13967 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
13968 !tg3_flag(tp, 57765_PLUS)) {
13969 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13970 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13971 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13972 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
13973 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
13974 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
13975 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
13976 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
13977 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
13978 } else
13979 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
13982 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13983 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
13984 tp->phy_otp = tg3_read_otp_phycfg(tp);
13985 if (tp->phy_otp == 0)
13986 tp->phy_otp = TG3_OTP_DEFAULT;
13989 if (tg3_flag(tp, CPMU_PRESENT))
13990 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
13991 else
13992 tp->mi_mode = MAC_MI_MODE_BASE;
13994 tp->coalesce_mode = 0;
13995 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
13996 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
13997 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
13999 /* Set these bits to enable statistics workaround. */
14000 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14001 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14002 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14003 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14004 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14007 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14008 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14009 tg3_flag_set(tp, USE_PHYLIB);
14011 err = tg3_mdio_init(tp);
14012 if (err)
14013 return err;
14015 /* Initialize data/descriptor byte/word swapping. */
14016 val = tr32(GRC_MODE);
14017 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14018 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14019 GRC_MODE_WORD_SWAP_B2HRX_DATA |
14020 GRC_MODE_B2HRX_ENABLE |
14021 GRC_MODE_HTX2B_ENABLE |
14022 GRC_MODE_HOST_STACKUP);
14023 else
14024 val &= GRC_MODE_HOST_STACKUP;
14026 tw32(GRC_MODE, val | tp->grc_mode);
14028 tg3_switch_clocks(tp);
14030 /* Clear this out for sanity. */
14031 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14033 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14034 &pci_state_reg);
14035 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14036 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14037 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14039 if (chiprevid == CHIPREV_ID_5701_A0 ||
14040 chiprevid == CHIPREV_ID_5701_B0 ||
14041 chiprevid == CHIPREV_ID_5701_B2 ||
14042 chiprevid == CHIPREV_ID_5701_B5) {
14043 void __iomem *sram_base;
14045 /* Write some dummy words into the SRAM status block
14046 * area, see if it reads back correctly. If the return
14047 * value is bad, force enable the PCIX workaround.
14049 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14051 writel(0x00000000, sram_base);
14052 writel(0x00000000, sram_base + 4);
14053 writel(0xffffffff, sram_base + 4);
14054 if (readl(sram_base) != 0x00000000)
14055 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14059 udelay(50);
14060 tg3_nvram_init(tp);
14062 grc_misc_cfg = tr32(GRC_MISC_CFG);
14063 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14065 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14066 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14067 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14068 tg3_flag_set(tp, IS_5788);
14070 if (!tg3_flag(tp, IS_5788) &&
14071 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14072 tg3_flag_set(tp, TAGGED_STATUS);
14073 if (tg3_flag(tp, TAGGED_STATUS)) {
14074 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14075 HOSTCC_MODE_CLRTICK_TXBD);
14077 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14078 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14079 tp->misc_host_ctrl);
14082 /* Preserve the APE MAC_MODE bits */
14083 if (tg3_flag(tp, ENABLE_APE))
14084 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14085 else
14086 tp->mac_mode = TG3_DEF_MAC_MODE;
14088 /* these are limited to 10/100 only */
14089 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14090 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14091 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14092 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14093 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14094 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14095 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14096 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14097 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14098 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14099 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14100 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14101 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14102 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14103 (tp->phy_flags & TG3_PHYFLG_IS_FET))
14104 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14106 err = tg3_phy_probe(tp);
14107 if (err) {
14108 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14109 /* ... but do not return immediately ... */
14110 tg3_mdio_fini(tp);
14113 tg3_read_vpd(tp);
14114 tg3_read_fw_ver(tp);
14116 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14117 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14118 } else {
14119 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14120 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14121 else
14122 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14125 /* 5700 {AX,BX} chips have a broken status block link
14126 * change bit implementation, so we must use the
14127 * status register in those cases.
14129 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14130 tg3_flag_set(tp, USE_LINKCHG_REG);
14131 else
14132 tg3_flag_clear(tp, USE_LINKCHG_REG);
14134 /* The led_ctrl is set during tg3_phy_probe, here we might
14135 * have to force the link status polling mechanism based
14136 * upon subsystem IDs.
14138 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14139 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14140 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14141 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14142 tg3_flag_set(tp, USE_LINKCHG_REG);
14145 /* For all SERDES we poll the MAC status register. */
14146 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14147 tg3_flag_set(tp, POLL_SERDES);
14148 else
14149 tg3_flag_clear(tp, POLL_SERDES);
14151 tp->rx_offset = NET_IP_ALIGN;
14152 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14153 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14154 tg3_flag(tp, PCIX_MODE)) {
14155 tp->rx_offset = 0;
14156 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14157 tp->rx_copy_thresh = ~(u16)0;
14158 #endif
14161 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14162 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14163 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14165 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14167 /* Increment the rx prod index on the rx std ring by at most
14168 * 8 for these chips to workaround hw errata.
14170 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14171 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14172 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14173 tp->rx_std_max_post = 8;
14175 if (tg3_flag(tp, ASPM_WORKAROUND))
14176 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14177 PCIE_PWR_MGMT_L1_THRESH_MSK;
14179 return err;
14182 #ifdef CONFIG_SPARC
14183 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14185 struct net_device *dev = tp->dev;
14186 struct pci_dev *pdev = tp->pdev;
14187 struct device_node *dp = pci_device_to_OF_node(pdev);
14188 const unsigned char *addr;
14189 int len;
14191 addr = of_get_property(dp, "local-mac-address", &len);
14192 if (addr && len == 6) {
14193 memcpy(dev->dev_addr, addr, 6);
14194 memcpy(dev->perm_addr, dev->dev_addr, 6);
14195 return 0;
14197 return -ENODEV;
14200 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14202 struct net_device *dev = tp->dev;
14204 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14205 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14206 return 0;
14208 #endif
14210 static int __devinit tg3_get_device_address(struct tg3 *tp)
14212 struct net_device *dev = tp->dev;
14213 u32 hi, lo, mac_offset;
14214 int addr_ok = 0;
14216 #ifdef CONFIG_SPARC
14217 if (!tg3_get_macaddr_sparc(tp))
14218 return 0;
14219 #endif
14221 mac_offset = 0x7c;
14222 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14223 tg3_flag(tp, 5780_CLASS)) {
14224 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14225 mac_offset = 0xcc;
14226 if (tg3_nvram_lock(tp))
14227 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14228 else
14229 tg3_nvram_unlock(tp);
14230 } else if (tg3_flag(tp, 5717_PLUS)) {
14231 if (PCI_FUNC(tp->pdev->devfn) & 1)
14232 mac_offset = 0xcc;
14233 if (PCI_FUNC(tp->pdev->devfn) > 1)
14234 mac_offset += 0x18c;
14235 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14236 mac_offset = 0x10;
14238 /* First try to get it from MAC address mailbox. */
14239 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14240 if ((hi >> 16) == 0x484b) {
14241 dev->dev_addr[0] = (hi >> 8) & 0xff;
14242 dev->dev_addr[1] = (hi >> 0) & 0xff;
14244 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14245 dev->dev_addr[2] = (lo >> 24) & 0xff;
14246 dev->dev_addr[3] = (lo >> 16) & 0xff;
14247 dev->dev_addr[4] = (lo >> 8) & 0xff;
14248 dev->dev_addr[5] = (lo >> 0) & 0xff;
14250 /* Some old bootcode may report a 0 MAC address in SRAM */
14251 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14253 if (!addr_ok) {
14254 /* Next, try NVRAM. */
14255 if (!tg3_flag(tp, NO_NVRAM) &&
14256 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14257 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14258 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14259 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14261 /* Finally just fetch it out of the MAC control regs. */
14262 else {
14263 hi = tr32(MAC_ADDR_0_HIGH);
14264 lo = tr32(MAC_ADDR_0_LOW);
14266 dev->dev_addr[5] = lo & 0xff;
14267 dev->dev_addr[4] = (lo >> 8) & 0xff;
14268 dev->dev_addr[3] = (lo >> 16) & 0xff;
14269 dev->dev_addr[2] = (lo >> 24) & 0xff;
14270 dev->dev_addr[1] = hi & 0xff;
14271 dev->dev_addr[0] = (hi >> 8) & 0xff;
14275 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14276 #ifdef CONFIG_SPARC
14277 if (!tg3_get_default_macaddr_sparc(tp))
14278 return 0;
14279 #endif
14280 return -EINVAL;
14282 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14283 return 0;
14286 #define BOUNDARY_SINGLE_CACHELINE 1
14287 #define BOUNDARY_MULTI_CACHELINE 2
14289 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14291 int cacheline_size;
14292 u8 byte;
14293 int goal;
14295 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14296 if (byte == 0)
14297 cacheline_size = 1024;
14298 else
14299 cacheline_size = (int) byte * 4;
14301 /* On 5703 and later chips, the boundary bits have no
14302 * effect.
14304 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14305 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14306 !tg3_flag(tp, PCI_EXPRESS))
14307 goto out;
14309 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14310 goal = BOUNDARY_MULTI_CACHELINE;
14311 #else
14312 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14313 goal = BOUNDARY_SINGLE_CACHELINE;
14314 #else
14315 goal = 0;
14316 #endif
14317 #endif
14319 if (tg3_flag(tp, 57765_PLUS)) {
14320 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14321 goto out;
14324 if (!goal)
14325 goto out;
14327 /* PCI controllers on most RISC systems tend to disconnect
14328 * when a device tries to burst across a cache-line boundary.
14329 * Therefore, letting tg3 do so just wastes PCI bandwidth.
14331 * Unfortunately, for PCI-E there are only limited
14332 * write-side controls for this, and thus for reads
14333 * we will still get the disconnects. We'll also waste
14334 * these PCI cycles for both read and write for chips
14335 * other than 5700 and 5701 which do not implement the
14336 * boundary bits.
14338 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14339 switch (cacheline_size) {
14340 case 16:
14341 case 32:
14342 case 64:
14343 case 128:
14344 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14345 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14346 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14347 } else {
14348 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14349 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14351 break;
14353 case 256:
14354 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14355 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14356 break;
14358 default:
14359 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14360 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14361 break;
14363 } else if (tg3_flag(tp, PCI_EXPRESS)) {
14364 switch (cacheline_size) {
14365 case 16:
14366 case 32:
14367 case 64:
14368 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14369 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14370 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14371 break;
14373 /* fallthrough */
14374 case 128:
14375 default:
14376 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14377 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14378 break;
14380 } else {
14381 switch (cacheline_size) {
14382 case 16:
14383 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14384 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14385 DMA_RWCTRL_WRITE_BNDRY_16);
14386 break;
14388 /* fallthrough */
14389 case 32:
14390 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14391 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14392 DMA_RWCTRL_WRITE_BNDRY_32);
14393 break;
14395 /* fallthrough */
14396 case 64:
14397 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14398 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14399 DMA_RWCTRL_WRITE_BNDRY_64);
14400 break;
14402 /* fallthrough */
14403 case 128:
14404 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14405 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14406 DMA_RWCTRL_WRITE_BNDRY_128);
14407 break;
14409 /* fallthrough */
14410 case 256:
14411 val |= (DMA_RWCTRL_READ_BNDRY_256 |
14412 DMA_RWCTRL_WRITE_BNDRY_256);
14413 break;
14414 case 512:
14415 val |= (DMA_RWCTRL_READ_BNDRY_512 |
14416 DMA_RWCTRL_WRITE_BNDRY_512);
14417 break;
14418 case 1024:
14419 default:
14420 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14421 DMA_RWCTRL_WRITE_BNDRY_1024);
14422 break;
14426 out:
14427 return val;
14430 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
14432 struct tg3_internal_buffer_desc test_desc;
14433 u32 sram_dma_descs;
14434 int i, ret;
14436 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
14438 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
14439 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
14440 tw32(RDMAC_STATUS, 0);
14441 tw32(WDMAC_STATUS, 0);
14443 tw32(BUFMGR_MODE, 0);
14444 tw32(FTQ_RESET, 0);
14446 test_desc.addr_hi = ((u64) buf_dma) >> 32;
14447 test_desc.addr_lo = buf_dma & 0xffffffff;
14448 test_desc.nic_mbuf = 0x00002100;
14449 test_desc.len = size;
14452 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14453 * the *second* time the tg3 driver was getting loaded after an
14454 * initial scan.
14456 * Broadcom tells me:
14457 * ...the DMA engine is connected to the GRC block and a DMA
14458 * reset may affect the GRC block in some unpredictable way...
14459 * The behavior of resets to individual blocks has not been tested.
14461 * Broadcom noted the GRC reset will also reset all sub-components.
14463 if (to_device) {
14464 test_desc.cqid_sqid = (13 << 8) | 2;
14466 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
14467 udelay(40);
14468 } else {
14469 test_desc.cqid_sqid = (16 << 8) | 7;
14471 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
14472 udelay(40);
14474 test_desc.flags = 0x00000005;
14476 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
14477 u32 val;
14479 val = *(((u32 *)&test_desc) + i);
14480 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
14481 sram_dma_descs + (i * sizeof(u32)));
14482 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
14484 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
14486 if (to_device)
14487 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
14488 else
14489 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
14491 ret = -ENODEV;
14492 for (i = 0; i < 40; i++) {
14493 u32 val;
14495 if (to_device)
14496 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
14497 else
14498 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
14499 if ((val & 0xffff) == sram_dma_descs) {
14500 ret = 0;
14501 break;
14504 udelay(100);
14507 return ret;
14510 #define TEST_BUFFER_SIZE 0x2000
14512 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
14513 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
14514 { },
14517 static int __devinit tg3_test_dma(struct tg3 *tp)
14519 dma_addr_t buf_dma;
14520 u32 *buf, saved_dma_rwctrl;
14521 int ret = 0;
14523 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
14524 &buf_dma, GFP_KERNEL);
14525 if (!buf) {
14526 ret = -ENOMEM;
14527 goto out_nofree;
14530 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
14531 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
14533 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
14535 if (tg3_flag(tp, 57765_PLUS))
14536 goto out;
14538 if (tg3_flag(tp, PCI_EXPRESS)) {
14539 /* DMA read watermark not used on PCIE */
14540 tp->dma_rwctrl |= 0x00180000;
14541 } else if (!tg3_flag(tp, PCIX_MODE)) {
14542 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14543 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
14544 tp->dma_rwctrl |= 0x003f0000;
14545 else
14546 tp->dma_rwctrl |= 0x003f000f;
14547 } else {
14548 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14549 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
14550 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
14551 u32 read_water = 0x7;
14553 /* If the 5704 is behind the EPB bridge, we can
14554 * do the less restrictive ONE_DMA workaround for
14555 * better performance.
14557 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
14558 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14559 tp->dma_rwctrl |= 0x8000;
14560 else if (ccval == 0x6 || ccval == 0x7)
14561 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
14563 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
14564 read_water = 4;
14565 /* Set bit 23 to enable PCIX hw bug fix */
14566 tp->dma_rwctrl |=
14567 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
14568 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
14569 (1 << 23);
14570 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
14571 /* 5780 always in PCIX mode */
14572 tp->dma_rwctrl |= 0x00144000;
14573 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
14574 /* 5714 always in PCIX mode */
14575 tp->dma_rwctrl |= 0x00148000;
14576 } else {
14577 tp->dma_rwctrl |= 0x001b000f;
14581 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14582 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14583 tp->dma_rwctrl &= 0xfffffff0;
14585 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14586 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14587 /* Remove this if it causes problems for some boards. */
14588 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
14590 /* On 5700/5701 chips, we need to set this bit.
14591 * Otherwise the chip will issue cacheline transactions
14592 * to streamable DMA memory with not all the byte
14593 * enables turned on. This is an error on several
14594 * RISC PCI controllers, in particular sparc64.
14596 * On 5703/5704 chips, this bit has been reassigned
14597 * a different meaning. In particular, it is used
14598 * on those chips to enable a PCI-X workaround.
14600 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
14603 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14605 #if 0
14606 /* Unneeded, already done by tg3_get_invariants. */
14607 tg3_switch_clocks(tp);
14608 #endif
14610 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14611 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
14612 goto out;
14614 /* It is best to perform DMA test with maximum write burst size
14615 * to expose the 5700/5701 write DMA bug.
14617 saved_dma_rwctrl = tp->dma_rwctrl;
14618 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14619 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14621 while (1) {
14622 u32 *p = buf, i;
14624 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
14625 p[i] = i;
14627 /* Send the buffer to the chip. */
14628 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
14629 if (ret) {
14630 dev_err(&tp->pdev->dev,
14631 "%s: Buffer write failed. err = %d\n",
14632 __func__, ret);
14633 break;
14636 #if 0
14637 /* validate data reached card RAM correctly. */
14638 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14639 u32 val;
14640 tg3_read_mem(tp, 0x2100 + (i*4), &val);
14641 if (le32_to_cpu(val) != p[i]) {
14642 dev_err(&tp->pdev->dev,
14643 "%s: Buffer corrupted on device! "
14644 "(%d != %d)\n", __func__, val, i);
14645 /* ret = -ENODEV here? */
14647 p[i] = 0;
14649 #endif
14650 /* Now read it back. */
14651 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
14652 if (ret) {
14653 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
14654 "err = %d\n", __func__, ret);
14655 break;
14658 /* Verify it. */
14659 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14660 if (p[i] == i)
14661 continue;
14663 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14664 DMA_RWCTRL_WRITE_BNDRY_16) {
14665 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14666 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14667 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14668 break;
14669 } else {
14670 dev_err(&tp->pdev->dev,
14671 "%s: Buffer corrupted on read back! "
14672 "(%d != %d)\n", __func__, p[i], i);
14673 ret = -ENODEV;
14674 goto out;
14678 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
14679 /* Success. */
14680 ret = 0;
14681 break;
14684 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14685 DMA_RWCTRL_WRITE_BNDRY_16) {
14686 /* DMA test passed without adjusting DMA boundary,
14687 * now look for chipsets that are known to expose the
14688 * DMA bug without failing the test.
14690 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
14691 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14692 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14693 } else {
14694 /* Safe to use the calculated DMA boundary. */
14695 tp->dma_rwctrl = saved_dma_rwctrl;
14698 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14701 out:
14702 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
14703 out_nofree:
14704 return ret;
14707 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
14709 if (tg3_flag(tp, 57765_PLUS)) {
14710 tp->bufmgr_config.mbuf_read_dma_low_water =
14711 DEFAULT_MB_RDMA_LOW_WATER_5705;
14712 tp->bufmgr_config.mbuf_mac_rx_low_water =
14713 DEFAULT_MB_MACRX_LOW_WATER_57765;
14714 tp->bufmgr_config.mbuf_high_water =
14715 DEFAULT_MB_HIGH_WATER_57765;
14717 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14718 DEFAULT_MB_RDMA_LOW_WATER_5705;
14719 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14720 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
14721 tp->bufmgr_config.mbuf_high_water_jumbo =
14722 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
14723 } else if (tg3_flag(tp, 5705_PLUS)) {
14724 tp->bufmgr_config.mbuf_read_dma_low_water =
14725 DEFAULT_MB_RDMA_LOW_WATER_5705;
14726 tp->bufmgr_config.mbuf_mac_rx_low_water =
14727 DEFAULT_MB_MACRX_LOW_WATER_5705;
14728 tp->bufmgr_config.mbuf_high_water =
14729 DEFAULT_MB_HIGH_WATER_5705;
14730 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14731 tp->bufmgr_config.mbuf_mac_rx_low_water =
14732 DEFAULT_MB_MACRX_LOW_WATER_5906;
14733 tp->bufmgr_config.mbuf_high_water =
14734 DEFAULT_MB_HIGH_WATER_5906;
14737 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14738 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
14739 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14740 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
14741 tp->bufmgr_config.mbuf_high_water_jumbo =
14742 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
14743 } else {
14744 tp->bufmgr_config.mbuf_read_dma_low_water =
14745 DEFAULT_MB_RDMA_LOW_WATER;
14746 tp->bufmgr_config.mbuf_mac_rx_low_water =
14747 DEFAULT_MB_MACRX_LOW_WATER;
14748 tp->bufmgr_config.mbuf_high_water =
14749 DEFAULT_MB_HIGH_WATER;
14751 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14752 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
14753 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14754 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
14755 tp->bufmgr_config.mbuf_high_water_jumbo =
14756 DEFAULT_MB_HIGH_WATER_JUMBO;
14759 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
14760 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
14763 static char * __devinit tg3_phy_string(struct tg3 *tp)
14765 switch (tp->phy_id & TG3_PHY_ID_MASK) {
14766 case TG3_PHY_ID_BCM5400: return "5400";
14767 case TG3_PHY_ID_BCM5401: return "5401";
14768 case TG3_PHY_ID_BCM5411: return "5411";
14769 case TG3_PHY_ID_BCM5701: return "5701";
14770 case TG3_PHY_ID_BCM5703: return "5703";
14771 case TG3_PHY_ID_BCM5704: return "5704";
14772 case TG3_PHY_ID_BCM5705: return "5705";
14773 case TG3_PHY_ID_BCM5750: return "5750";
14774 case TG3_PHY_ID_BCM5752: return "5752";
14775 case TG3_PHY_ID_BCM5714: return "5714";
14776 case TG3_PHY_ID_BCM5780: return "5780";
14777 case TG3_PHY_ID_BCM5755: return "5755";
14778 case TG3_PHY_ID_BCM5787: return "5787";
14779 case TG3_PHY_ID_BCM5784: return "5784";
14780 case TG3_PHY_ID_BCM5756: return "5722/5756";
14781 case TG3_PHY_ID_BCM5906: return "5906";
14782 case TG3_PHY_ID_BCM5761: return "5761";
14783 case TG3_PHY_ID_BCM5718C: return "5718C";
14784 case TG3_PHY_ID_BCM5718S: return "5718S";
14785 case TG3_PHY_ID_BCM57765: return "57765";
14786 case TG3_PHY_ID_BCM5719C: return "5719C";
14787 case TG3_PHY_ID_BCM5720C: return "5720C";
14788 case TG3_PHY_ID_BCM8002: return "8002/serdes";
14789 case 0: return "serdes";
14790 default: return "unknown";
14794 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
14796 if (tg3_flag(tp, PCI_EXPRESS)) {
14797 strcpy(str, "PCI Express");
14798 return str;
14799 } else if (tg3_flag(tp, PCIX_MODE)) {
14800 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
14802 strcpy(str, "PCIX:");
14804 if ((clock_ctrl == 7) ||
14805 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
14806 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
14807 strcat(str, "133MHz");
14808 else if (clock_ctrl == 0)
14809 strcat(str, "33MHz");
14810 else if (clock_ctrl == 2)
14811 strcat(str, "50MHz");
14812 else if (clock_ctrl == 4)
14813 strcat(str, "66MHz");
14814 else if (clock_ctrl == 6)
14815 strcat(str, "100MHz");
14816 } else {
14817 strcpy(str, "PCI:");
14818 if (tg3_flag(tp, PCI_HIGH_SPEED))
14819 strcat(str, "66MHz");
14820 else
14821 strcat(str, "33MHz");
14823 if (tg3_flag(tp, PCI_32BIT))
14824 strcat(str, ":32-bit");
14825 else
14826 strcat(str, ":64-bit");
14827 return str;
14830 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
14832 struct pci_dev *peer;
14833 unsigned int func, devnr = tp->pdev->devfn & ~7;
14835 for (func = 0; func < 8; func++) {
14836 peer = pci_get_slot(tp->pdev->bus, devnr | func);
14837 if (peer && peer != tp->pdev)
14838 break;
14839 pci_dev_put(peer);
14841 /* 5704 can be configured in single-port mode, set peer to
14842 * tp->pdev in that case.
14844 if (!peer) {
14845 peer = tp->pdev;
14846 return peer;
14850 * We don't need to keep the refcount elevated; there's no way
14851 * to remove one half of this device without removing the other
14853 pci_dev_put(peer);
14855 return peer;
14858 static void __devinit tg3_init_coal(struct tg3 *tp)
14860 struct ethtool_coalesce *ec = &tp->coal;
14862 memset(ec, 0, sizeof(*ec));
14863 ec->cmd = ETHTOOL_GCOALESCE;
14864 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
14865 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
14866 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
14867 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
14868 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
14869 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
14870 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
14871 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
14872 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
14874 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
14875 HOSTCC_MODE_CLRTICK_TXBD)) {
14876 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
14877 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
14878 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
14879 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
14882 if (tg3_flag(tp, 5705_PLUS)) {
14883 ec->rx_coalesce_usecs_irq = 0;
14884 ec->tx_coalesce_usecs_irq = 0;
14885 ec->stats_block_coalesce_usecs = 0;
14889 static const struct net_device_ops tg3_netdev_ops = {
14890 .ndo_open = tg3_open,
14891 .ndo_stop = tg3_close,
14892 .ndo_start_xmit = tg3_start_xmit,
14893 .ndo_get_stats64 = tg3_get_stats64,
14894 .ndo_validate_addr = eth_validate_addr,
14895 .ndo_set_multicast_list = tg3_set_rx_mode,
14896 .ndo_set_mac_address = tg3_set_mac_addr,
14897 .ndo_do_ioctl = tg3_ioctl,
14898 .ndo_tx_timeout = tg3_tx_timeout,
14899 .ndo_change_mtu = tg3_change_mtu,
14900 .ndo_fix_features = tg3_fix_features,
14901 .ndo_set_features = tg3_set_features,
14902 #ifdef CONFIG_NET_POLL_CONTROLLER
14903 .ndo_poll_controller = tg3_poll_controller,
14904 #endif
14907 static int __devinit tg3_init_one(struct pci_dev *pdev,
14908 const struct pci_device_id *ent)
14910 struct net_device *dev;
14911 struct tg3 *tp;
14912 int i, err, pm_cap;
14913 u32 sndmbx, rcvmbx, intmbx;
14914 char str[40];
14915 u64 dma_mask, persist_dma_mask;
14916 u32 features = 0;
14918 printk_once(KERN_INFO "%s\n", version);
14920 err = pci_enable_device(pdev);
14921 if (err) {
14922 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
14923 return err;
14926 err = pci_request_regions(pdev, DRV_MODULE_NAME);
14927 if (err) {
14928 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
14929 goto err_out_disable_pdev;
14932 pci_set_master(pdev);
14934 /* Find power-management capability. */
14935 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
14936 if (pm_cap == 0) {
14937 dev_err(&pdev->dev,
14938 "Cannot find Power Management capability, aborting\n");
14939 err = -EIO;
14940 goto err_out_free_res;
14943 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
14944 if (!dev) {
14945 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
14946 err = -ENOMEM;
14947 goto err_out_free_res;
14950 SET_NETDEV_DEV(dev, &pdev->dev);
14952 tp = netdev_priv(dev);
14953 tp->pdev = pdev;
14954 tp->dev = dev;
14955 tp->pm_cap = pm_cap;
14956 tp->rx_mode = TG3_DEF_RX_MODE;
14957 tp->tx_mode = TG3_DEF_TX_MODE;
14959 if (tg3_debug > 0)
14960 tp->msg_enable = tg3_debug;
14961 else
14962 tp->msg_enable = TG3_DEF_MSG_ENABLE;
14964 /* The word/byte swap controls here control register access byte
14965 * swapping. DMA data byte swapping is controlled in the GRC_MODE
14966 * setting below.
14968 tp->misc_host_ctrl =
14969 MISC_HOST_CTRL_MASK_PCI_INT |
14970 MISC_HOST_CTRL_WORD_SWAP |
14971 MISC_HOST_CTRL_INDIR_ACCESS |
14972 MISC_HOST_CTRL_PCISTATE_RW;
14974 /* The NONFRM (non-frame) byte/word swap controls take effect
14975 * on descriptor entries, anything which isn't packet data.
14977 * The StrongARM chips on the board (one for tx, one for rx)
14978 * are running in big-endian mode.
14980 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
14981 GRC_MODE_WSWAP_NONFRM_DATA);
14982 #ifdef __BIG_ENDIAN
14983 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
14984 #endif
14985 spin_lock_init(&tp->lock);
14986 spin_lock_init(&tp->indirect_lock);
14987 INIT_WORK(&tp->reset_task, tg3_reset_task);
14989 tp->regs = pci_ioremap_bar(pdev, BAR_0);
14990 if (!tp->regs) {
14991 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
14992 err = -ENOMEM;
14993 goto err_out_free_dev;
14996 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
14997 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
14999 dev->ethtool_ops = &tg3_ethtool_ops;
15000 dev->watchdog_timeo = TG3_TX_TIMEOUT;
15001 dev->netdev_ops = &tg3_netdev_ops;
15002 dev->irq = pdev->irq;
15004 err = tg3_get_invariants(tp);
15005 if (err) {
15006 dev_err(&pdev->dev,
15007 "Problem fetching invariants of chip, aborting\n");
15008 goto err_out_iounmap;
15011 /* The EPB bridge inside 5714, 5715, and 5780 and any
15012 * device behind the EPB cannot support DMA addresses > 40-bit.
15013 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15014 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15015 * do DMA address check in tg3_start_xmit().
15017 if (tg3_flag(tp, IS_5788))
15018 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15019 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15020 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15021 #ifdef CONFIG_HIGHMEM
15022 dma_mask = DMA_BIT_MASK(64);
15023 #endif
15024 } else
15025 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15027 /* Configure DMA attributes. */
15028 if (dma_mask > DMA_BIT_MASK(32)) {
15029 err = pci_set_dma_mask(pdev, dma_mask);
15030 if (!err) {
15031 features |= NETIF_F_HIGHDMA;
15032 err = pci_set_consistent_dma_mask(pdev,
15033 persist_dma_mask);
15034 if (err < 0) {
15035 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15036 "DMA for consistent allocations\n");
15037 goto err_out_iounmap;
15041 if (err || dma_mask == DMA_BIT_MASK(32)) {
15042 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15043 if (err) {
15044 dev_err(&pdev->dev,
15045 "No usable DMA configuration, aborting\n");
15046 goto err_out_iounmap;
15050 tg3_init_bufmgr_config(tp);
15052 features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15054 /* 5700 B0 chips do not support checksumming correctly due
15055 * to hardware bugs.
15057 if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15058 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15060 if (tg3_flag(tp, 5755_PLUS))
15061 features |= NETIF_F_IPV6_CSUM;
15064 /* TSO is on by default on chips that support hardware TSO.
15065 * Firmware TSO on older chips gives lower performance, so it
15066 * is off by default, but can be enabled using ethtool.
15068 if ((tg3_flag(tp, HW_TSO_1) ||
15069 tg3_flag(tp, HW_TSO_2) ||
15070 tg3_flag(tp, HW_TSO_3)) &&
15071 (features & NETIF_F_IP_CSUM))
15072 features |= NETIF_F_TSO;
15073 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15074 if (features & NETIF_F_IPV6_CSUM)
15075 features |= NETIF_F_TSO6;
15076 if (tg3_flag(tp, HW_TSO_3) ||
15077 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15078 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15079 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15080 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15081 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15082 features |= NETIF_F_TSO_ECN;
15085 dev->features |= features;
15086 dev->vlan_features |= features;
15089 * Add loopback capability only for a subset of devices that support
15090 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15091 * loopback for the remaining devices.
15093 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15094 !tg3_flag(tp, CPMU_PRESENT))
15095 /* Add the loopback capability */
15096 features |= NETIF_F_LOOPBACK;
15098 dev->hw_features |= features;
15100 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15101 !tg3_flag(tp, TSO_CAPABLE) &&
15102 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15103 tg3_flag_set(tp, MAX_RXPEND_64);
15104 tp->rx_pending = 63;
15107 err = tg3_get_device_address(tp);
15108 if (err) {
15109 dev_err(&pdev->dev,
15110 "Could not obtain valid ethernet address, aborting\n");
15111 goto err_out_iounmap;
15114 if (tg3_flag(tp, ENABLE_APE)) {
15115 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15116 if (!tp->aperegs) {
15117 dev_err(&pdev->dev,
15118 "Cannot map APE registers, aborting\n");
15119 err = -ENOMEM;
15120 goto err_out_iounmap;
15123 tg3_ape_lock_init(tp);
15125 if (tg3_flag(tp, ENABLE_ASF))
15126 tg3_read_dash_ver(tp);
15130 * Reset chip in case UNDI or EFI driver did not shutdown
15131 * DMA self test will enable WDMAC and we'll see (spurious)
15132 * pending DMA on the PCI bus at that point.
15134 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15135 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15136 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15137 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15140 err = tg3_test_dma(tp);
15141 if (err) {
15142 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15143 goto err_out_apeunmap;
15146 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15147 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15148 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15149 for (i = 0; i < tp->irq_max; i++) {
15150 struct tg3_napi *tnapi = &tp->napi[i];
15152 tnapi->tp = tp;
15153 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15155 tnapi->int_mbox = intmbx;
15156 if (i < 4)
15157 intmbx += 0x8;
15158 else
15159 intmbx += 0x4;
15161 tnapi->consmbox = rcvmbx;
15162 tnapi->prodmbox = sndmbx;
15164 if (i)
15165 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15166 else
15167 tnapi->coal_now = HOSTCC_MODE_NOW;
15169 if (!tg3_flag(tp, SUPPORT_MSIX))
15170 break;
15173 * If we support MSIX, we'll be using RSS. If we're using
15174 * RSS, the first vector only handles link interrupts and the
15175 * remaining vectors handle rx and tx interrupts. Reuse the
15176 * mailbox values for the next iteration. The values we setup
15177 * above are still useful for the single vectored mode.
15179 if (!i)
15180 continue;
15182 rcvmbx += 0x8;
15184 if (sndmbx & 0x4)
15185 sndmbx -= 0x4;
15186 else
15187 sndmbx += 0xc;
15190 tg3_init_coal(tp);
15192 pci_set_drvdata(pdev, dev);
15194 err = register_netdev(dev);
15195 if (err) {
15196 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15197 goto err_out_apeunmap;
15200 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15201 tp->board_part_number,
15202 tp->pci_chip_rev_id,
15203 tg3_bus_string(tp, str),
15204 dev->dev_addr);
15206 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15207 struct phy_device *phydev;
15208 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15209 netdev_info(dev,
15210 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15211 phydev->drv->name, dev_name(&phydev->dev));
15212 } else {
15213 char *ethtype;
15215 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15216 ethtype = "10/100Base-TX";
15217 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15218 ethtype = "1000Base-SX";
15219 else
15220 ethtype = "10/100/1000Base-T";
15222 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15223 "(WireSpeed[%d], EEE[%d])\n",
15224 tg3_phy_string(tp), ethtype,
15225 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15226 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15229 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15230 (dev->features & NETIF_F_RXCSUM) != 0,
15231 tg3_flag(tp, USE_LINKCHG_REG) != 0,
15232 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15233 tg3_flag(tp, ENABLE_ASF) != 0,
15234 tg3_flag(tp, TSO_CAPABLE) != 0);
15235 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15236 tp->dma_rwctrl,
15237 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15238 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15240 pci_save_state(pdev);
15242 return 0;
15244 err_out_apeunmap:
15245 if (tp->aperegs) {
15246 iounmap(tp->aperegs);
15247 tp->aperegs = NULL;
15250 err_out_iounmap:
15251 if (tp->regs) {
15252 iounmap(tp->regs);
15253 tp->regs = NULL;
15256 err_out_free_dev:
15257 free_netdev(dev);
15259 err_out_free_res:
15260 pci_release_regions(pdev);
15262 err_out_disable_pdev:
15263 pci_disable_device(pdev);
15264 pci_set_drvdata(pdev, NULL);
15265 return err;
15268 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15270 struct net_device *dev = pci_get_drvdata(pdev);
15272 if (dev) {
15273 struct tg3 *tp = netdev_priv(dev);
15275 if (tp->fw)
15276 release_firmware(tp->fw);
15278 cancel_work_sync(&tp->reset_task);
15280 if (!tg3_flag(tp, USE_PHYLIB)) {
15281 tg3_phy_fini(tp);
15282 tg3_mdio_fini(tp);
15285 unregister_netdev(dev);
15286 if (tp->aperegs) {
15287 iounmap(tp->aperegs);
15288 tp->aperegs = NULL;
15290 if (tp->regs) {
15291 iounmap(tp->regs);
15292 tp->regs = NULL;
15294 free_netdev(dev);
15295 pci_release_regions(pdev);
15296 pci_disable_device(pdev);
15297 pci_set_drvdata(pdev, NULL);
15301 #ifdef CONFIG_PM_SLEEP
15302 static int tg3_suspend(struct device *device)
15304 struct pci_dev *pdev = to_pci_dev(device);
15305 struct net_device *dev = pci_get_drvdata(pdev);
15306 struct tg3 *tp = netdev_priv(dev);
15307 int err;
15309 if (!netif_running(dev))
15310 return 0;
15312 flush_work_sync(&tp->reset_task);
15313 tg3_phy_stop(tp);
15314 tg3_netif_stop(tp);
15316 del_timer_sync(&tp->timer);
15318 tg3_full_lock(tp, 1);
15319 tg3_disable_ints(tp);
15320 tg3_full_unlock(tp);
15322 netif_device_detach(dev);
15324 tg3_full_lock(tp, 0);
15325 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15326 tg3_flag_clear(tp, INIT_COMPLETE);
15327 tg3_full_unlock(tp);
15329 err = tg3_power_down_prepare(tp);
15330 if (err) {
15331 int err2;
15333 tg3_full_lock(tp, 0);
15335 tg3_flag_set(tp, INIT_COMPLETE);
15336 err2 = tg3_restart_hw(tp, 1);
15337 if (err2)
15338 goto out;
15340 tp->timer.expires = jiffies + tp->timer_offset;
15341 add_timer(&tp->timer);
15343 netif_device_attach(dev);
15344 tg3_netif_start(tp);
15346 out:
15347 tg3_full_unlock(tp);
15349 if (!err2)
15350 tg3_phy_start(tp);
15353 return err;
15356 static int tg3_resume(struct device *device)
15358 struct pci_dev *pdev = to_pci_dev(device);
15359 struct net_device *dev = pci_get_drvdata(pdev);
15360 struct tg3 *tp = netdev_priv(dev);
15361 int err;
15363 if (!netif_running(dev))
15364 return 0;
15366 netif_device_attach(dev);
15368 tg3_full_lock(tp, 0);
15370 tg3_flag_set(tp, INIT_COMPLETE);
15371 err = tg3_restart_hw(tp, 1);
15372 if (err)
15373 goto out;
15375 tp->timer.expires = jiffies + tp->timer_offset;
15376 add_timer(&tp->timer);
15378 tg3_netif_start(tp);
15380 out:
15381 tg3_full_unlock(tp);
15383 if (!err)
15384 tg3_phy_start(tp);
15386 return err;
15389 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15390 #define TG3_PM_OPS (&tg3_pm_ops)
15392 #else
15394 #define TG3_PM_OPS NULL
15396 #endif /* CONFIG_PM_SLEEP */
15399 * tg3_io_error_detected - called when PCI error is detected
15400 * @pdev: Pointer to PCI device
15401 * @state: The current pci connection state
15403 * This function is called after a PCI bus error affecting
15404 * this device has been detected.
15406 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15407 pci_channel_state_t state)
15409 struct net_device *netdev = pci_get_drvdata(pdev);
15410 struct tg3 *tp = netdev_priv(netdev);
15411 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15413 netdev_info(netdev, "PCI I/O error detected\n");
15415 rtnl_lock();
15417 if (!netif_running(netdev))
15418 goto done;
15420 tg3_phy_stop(tp);
15422 tg3_netif_stop(tp);
15424 del_timer_sync(&tp->timer);
15425 tg3_flag_clear(tp, RESTART_TIMER);
15427 /* Want to make sure that the reset task doesn't run */
15428 cancel_work_sync(&tp->reset_task);
15429 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
15430 tg3_flag_clear(tp, RESTART_TIMER);
15432 netif_device_detach(netdev);
15434 /* Clean up software state, even if MMIO is blocked */
15435 tg3_full_lock(tp, 0);
15436 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
15437 tg3_full_unlock(tp);
15439 done:
15440 if (state == pci_channel_io_perm_failure)
15441 err = PCI_ERS_RESULT_DISCONNECT;
15442 else
15443 pci_disable_device(pdev);
15445 rtnl_unlock();
15447 return err;
15451 * tg3_io_slot_reset - called after the pci bus has been reset.
15452 * @pdev: Pointer to PCI device
15454 * Restart the card from scratch, as if from a cold-boot.
15455 * At this point, the card has exprienced a hard reset,
15456 * followed by fixups by BIOS, and has its config space
15457 * set up identically to what it was at cold boot.
15459 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
15461 struct net_device *netdev = pci_get_drvdata(pdev);
15462 struct tg3 *tp = netdev_priv(netdev);
15463 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15464 int err;
15466 rtnl_lock();
15468 if (pci_enable_device(pdev)) {
15469 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
15470 goto done;
15473 pci_set_master(pdev);
15474 pci_restore_state(pdev);
15475 pci_save_state(pdev);
15477 if (!netif_running(netdev)) {
15478 rc = PCI_ERS_RESULT_RECOVERED;
15479 goto done;
15482 err = tg3_power_up(tp);
15483 if (err) {
15484 netdev_err(netdev, "Failed to restore register access.\n");
15485 goto done;
15488 rc = PCI_ERS_RESULT_RECOVERED;
15490 done:
15491 rtnl_unlock();
15493 return rc;
15497 * tg3_io_resume - called when traffic can start flowing again.
15498 * @pdev: Pointer to PCI device
15500 * This callback is called when the error recovery driver tells
15501 * us that its OK to resume normal operation.
15503 static void tg3_io_resume(struct pci_dev *pdev)
15505 struct net_device *netdev = pci_get_drvdata(pdev);
15506 struct tg3 *tp = netdev_priv(netdev);
15507 int err;
15509 rtnl_lock();
15511 if (!netif_running(netdev))
15512 goto done;
15514 tg3_full_lock(tp, 0);
15515 tg3_flag_set(tp, INIT_COMPLETE);
15516 err = tg3_restart_hw(tp, 1);
15517 tg3_full_unlock(tp);
15518 if (err) {
15519 netdev_err(netdev, "Cannot restart hardware after reset.\n");
15520 goto done;
15523 netif_device_attach(netdev);
15525 tp->timer.expires = jiffies + tp->timer_offset;
15526 add_timer(&tp->timer);
15528 tg3_netif_start(tp);
15530 tg3_phy_start(tp);
15532 done:
15533 rtnl_unlock();
15536 static struct pci_error_handlers tg3_err_handler = {
15537 .error_detected = tg3_io_error_detected,
15538 .slot_reset = tg3_io_slot_reset,
15539 .resume = tg3_io_resume
15542 static struct pci_driver tg3_driver = {
15543 .name = DRV_MODULE_NAME,
15544 .id_table = tg3_pci_tbl,
15545 .probe = tg3_init_one,
15546 .remove = __devexit_p(tg3_remove_one),
15547 .err_handler = &tg3_err_handler,
15548 .driver.pm = TG3_PM_OPS,
15551 static int __init tg3_init(void)
15553 return pci_register_driver(&tg3_driver);
15556 static void __exit tg3_cleanup(void)
15558 pci_unregister_driver(&tg3_driver);
15561 module_init(tg3_init);
15562 module_exit(tg3_cleanup);