net: wl12xx: remove unnecessary prints
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / net / tg3.c
blobf4b01c638a330d969ab32f88178bc51ab7eebf73
1 /*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2011 Broadcom Corporation.
9 * Firmware is:
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/ioport.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/ethtool.h>
35 #include <linux/mdio.h>
36 #include <linux/mii.h>
37 #include <linux/phy.h>
38 #include <linux/brcmphy.h>
39 #include <linux/if_vlan.h>
40 #include <linux/ip.h>
41 #include <linux/tcp.h>
42 #include <linux/workqueue.h>
43 #include <linux/prefetch.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/firmware.h>
47 #include <net/checksum.h>
48 #include <net/ip.h>
50 #include <asm/system.h>
51 #include <linux/io.h>
52 #include <asm/byteorder.h>
53 #include <linux/uaccess.h>
55 #ifdef CONFIG_SPARC
56 #include <asm/idprom.h>
57 #include <asm/prom.h>
58 #endif
60 #define BAR_0 0
61 #define BAR_2 2
63 #include "tg3.h"
65 /* Functions & macros to verify TG3_FLAGS types */
67 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
69 return test_bit(flag, bits);
72 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
74 set_bit(flag, bits);
77 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
79 clear_bit(flag, bits);
82 #define tg3_flag(tp, flag) \
83 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
84 #define tg3_flag_set(tp, flag) \
85 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
86 #define tg3_flag_clear(tp, flag) \
87 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
89 #define DRV_MODULE_NAME "tg3"
90 #define TG3_MAJ_NUM 3
91 #define TG3_MIN_NUM 119
92 #define DRV_MODULE_VERSION \
93 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
94 #define DRV_MODULE_RELDATE "May 18, 2011"
96 #define TG3_DEF_MAC_MODE 0
97 #define TG3_DEF_RX_MODE 0
98 #define TG3_DEF_TX_MODE 0
99 #define TG3_DEF_MSG_ENABLE \
100 (NETIF_MSG_DRV | \
101 NETIF_MSG_PROBE | \
102 NETIF_MSG_LINK | \
103 NETIF_MSG_TIMER | \
104 NETIF_MSG_IFDOWN | \
105 NETIF_MSG_IFUP | \
106 NETIF_MSG_RX_ERR | \
107 NETIF_MSG_TX_ERR)
109 /* length of time before we decide the hardware is borked,
110 * and dev->tx_timeout() should be called to fix the problem
113 #define TG3_TX_TIMEOUT (5 * HZ)
115 /* hardware minimum and maximum for a single frame's data payload */
116 #define TG3_MIN_MTU 60
117 #define TG3_MAX_MTU(tp) \
118 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
120 /* These numbers seem to be hard coded in the NIC firmware somehow.
121 * You can't change the ring sizes, but you can change where you place
122 * them in the NIC onboard memory.
124 #define TG3_RX_STD_RING_SIZE(tp) \
125 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
126 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
127 #define TG3_DEF_RX_RING_PENDING 200
128 #define TG3_RX_JMB_RING_SIZE(tp) \
129 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
130 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
131 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
132 #define TG3_RSS_INDIR_TBL_SIZE 128
134 /* Do not place this n-ring entries value into the tp struct itself,
135 * we really want to expose these constants to GCC so that modulo et
136 * al. operations are done with shifts and masks instead of with
137 * hw multiply/modulo instructions. Another solution would be to
138 * replace things like '% foo' with '& (foo - 1)'.
141 #define TG3_TX_RING_SIZE 512
142 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
144 #define TG3_RX_STD_RING_BYTES(tp) \
145 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
146 #define TG3_RX_JMB_RING_BYTES(tp) \
147 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
148 #define TG3_RX_RCB_RING_BYTES(tp) \
149 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
150 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
151 TG3_TX_RING_SIZE)
152 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
154 #define TG3_DMA_BYTE_ENAB 64
156 #define TG3_RX_STD_DMA_SZ 1536
157 #define TG3_RX_JMB_DMA_SZ 9046
159 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
161 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
162 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
164 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
165 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
167 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
168 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
170 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
171 * that are at least dword aligned when used in PCIX mode. The driver
172 * works around this bug by double copying the packet. This workaround
173 * is built into the normal double copy length check for efficiency.
175 * However, the double copy is only necessary on those architectures
176 * where unaligned memory accesses are inefficient. For those architectures
177 * where unaligned memory accesses incur little penalty, we can reintegrate
178 * the 5701 in the normal rx path. Doing so saves a device structure
179 * dereference by hardcoding the double copy threshold in place.
181 #define TG3_RX_COPY_THRESHOLD 256
182 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
183 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
184 #else
185 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
186 #endif
188 /* minimum number of free TX descriptors required to wake up TX process */
189 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
191 #define TG3_RAW_IP_ALIGN 2
193 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
195 #define FIRMWARE_TG3 "tigon/tg3.bin"
196 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
197 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
199 static char version[] __devinitdata =
200 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
202 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
203 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
204 MODULE_LICENSE("GPL");
205 MODULE_VERSION(DRV_MODULE_VERSION);
206 MODULE_FIRMWARE(FIRMWARE_TG3);
207 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
208 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
210 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
211 module_param(tg3_debug, int, 0);
212 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
214 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
215 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
216 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
217 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
218 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
219 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
220 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
221 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
222 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
223 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
224 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
225 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
226 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
227 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
288 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
289 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
290 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
291 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
292 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
293 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
294 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
295 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
299 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
301 static const struct {
302 const char string[ETH_GSTRING_LEN];
303 } ethtool_stats_keys[] = {
304 { "rx_octets" },
305 { "rx_fragments" },
306 { "rx_ucast_packets" },
307 { "rx_mcast_packets" },
308 { "rx_bcast_packets" },
309 { "rx_fcs_errors" },
310 { "rx_align_errors" },
311 { "rx_xon_pause_rcvd" },
312 { "rx_xoff_pause_rcvd" },
313 { "rx_mac_ctrl_rcvd" },
314 { "rx_xoff_entered" },
315 { "rx_frame_too_long_errors" },
316 { "rx_jabbers" },
317 { "rx_undersize_packets" },
318 { "rx_in_length_errors" },
319 { "rx_out_length_errors" },
320 { "rx_64_or_less_octet_packets" },
321 { "rx_65_to_127_octet_packets" },
322 { "rx_128_to_255_octet_packets" },
323 { "rx_256_to_511_octet_packets" },
324 { "rx_512_to_1023_octet_packets" },
325 { "rx_1024_to_1522_octet_packets" },
326 { "rx_1523_to_2047_octet_packets" },
327 { "rx_2048_to_4095_octet_packets" },
328 { "rx_4096_to_8191_octet_packets" },
329 { "rx_8192_to_9022_octet_packets" },
331 { "tx_octets" },
332 { "tx_collisions" },
334 { "tx_xon_sent" },
335 { "tx_xoff_sent" },
336 { "tx_flow_control" },
337 { "tx_mac_errors" },
338 { "tx_single_collisions" },
339 { "tx_mult_collisions" },
340 { "tx_deferred" },
341 { "tx_excessive_collisions" },
342 { "tx_late_collisions" },
343 { "tx_collide_2times" },
344 { "tx_collide_3times" },
345 { "tx_collide_4times" },
346 { "tx_collide_5times" },
347 { "tx_collide_6times" },
348 { "tx_collide_7times" },
349 { "tx_collide_8times" },
350 { "tx_collide_9times" },
351 { "tx_collide_10times" },
352 { "tx_collide_11times" },
353 { "tx_collide_12times" },
354 { "tx_collide_13times" },
355 { "tx_collide_14times" },
356 { "tx_collide_15times" },
357 { "tx_ucast_packets" },
358 { "tx_mcast_packets" },
359 { "tx_bcast_packets" },
360 { "tx_carrier_sense_errors" },
361 { "tx_discards" },
362 { "tx_errors" },
364 { "dma_writeq_full" },
365 { "dma_write_prioq_full" },
366 { "rxbds_empty" },
367 { "rx_discards" },
368 { "rx_errors" },
369 { "rx_threshold_hit" },
371 { "dma_readq_full" },
372 { "dma_read_prioq_full" },
373 { "tx_comp_queue_full" },
375 { "ring_set_send_prod_index" },
376 { "ring_status_update" },
377 { "nic_irqs" },
378 { "nic_avoided_irqs" },
379 { "nic_tx_threshold_hit" },
381 { "mbuf_lwm_thresh_hit" },
384 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
387 static const struct {
388 const char string[ETH_GSTRING_LEN];
389 } ethtool_test_keys[] = {
390 { "nvram test (online) " },
391 { "link test (online) " },
392 { "register test (offline)" },
393 { "memory test (offline)" },
394 { "loopback test (offline)" },
395 { "interrupt test (offline)" },
398 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
401 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
403 writel(val, tp->regs + off);
406 static u32 tg3_read32(struct tg3 *tp, u32 off)
408 return readl(tp->regs + off);
411 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
413 writel(val, tp->aperegs + off);
416 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
418 return readl(tp->aperegs + off);
421 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
423 unsigned long flags;
425 spin_lock_irqsave(&tp->indirect_lock, flags);
426 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
427 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
428 spin_unlock_irqrestore(&tp->indirect_lock, flags);
431 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
433 writel(val, tp->regs + off);
434 readl(tp->regs + off);
437 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
439 unsigned long flags;
440 u32 val;
442 spin_lock_irqsave(&tp->indirect_lock, flags);
443 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
444 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
445 spin_unlock_irqrestore(&tp->indirect_lock, flags);
446 return val;
449 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
451 unsigned long flags;
453 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
454 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
455 TG3_64BIT_REG_LOW, val);
456 return;
458 if (off == TG3_RX_STD_PROD_IDX_REG) {
459 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
460 TG3_64BIT_REG_LOW, val);
461 return;
464 spin_lock_irqsave(&tp->indirect_lock, flags);
465 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
466 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
467 spin_unlock_irqrestore(&tp->indirect_lock, flags);
469 /* In indirect mode when disabling interrupts, we also need
470 * to clear the interrupt bit in the GRC local ctrl register.
472 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
473 (val == 0x1)) {
474 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
475 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
479 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
481 unsigned long flags;
482 u32 val;
484 spin_lock_irqsave(&tp->indirect_lock, flags);
485 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
486 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
487 spin_unlock_irqrestore(&tp->indirect_lock, flags);
488 return val;
491 /* usec_wait specifies the wait time in usec when writing to certain registers
492 * where it is unsafe to read back the register without some delay.
493 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
494 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
496 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
498 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
499 /* Non-posted methods */
500 tp->write32(tp, off, val);
501 else {
502 /* Posted method */
503 tg3_write32(tp, off, val);
504 if (usec_wait)
505 udelay(usec_wait);
506 tp->read32(tp, off);
508 /* Wait again after the read for the posted method to guarantee that
509 * the wait time is met.
511 if (usec_wait)
512 udelay(usec_wait);
515 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
517 tp->write32_mbox(tp, off, val);
518 if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
519 tp->read32_mbox(tp, off);
522 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
524 void __iomem *mbox = tp->regs + off;
525 writel(val, mbox);
526 if (tg3_flag(tp, TXD_MBOX_HWBUG))
527 writel(val, mbox);
528 if (tg3_flag(tp, MBOX_WRITE_REORDER))
529 readl(mbox);
532 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
534 return readl(tp->regs + off + GRCMBOX_BASE);
537 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
539 writel(val, tp->regs + off + GRCMBOX_BASE);
542 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
543 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
544 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
545 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
546 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
548 #define tw32(reg, val) tp->write32(tp, reg, val)
549 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
550 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
551 #define tr32(reg) tp->read32(tp, reg)
553 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
555 unsigned long flags;
557 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
558 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
559 return;
561 spin_lock_irqsave(&tp->indirect_lock, flags);
562 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
563 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
564 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
566 /* Always leave this as zero. */
567 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
568 } else {
569 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
570 tw32_f(TG3PCI_MEM_WIN_DATA, val);
572 /* Always leave this as zero. */
573 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
575 spin_unlock_irqrestore(&tp->indirect_lock, flags);
578 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
580 unsigned long flags;
582 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
583 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
584 *val = 0;
585 return;
588 spin_lock_irqsave(&tp->indirect_lock, flags);
589 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
590 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
591 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
593 /* Always leave this as zero. */
594 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
595 } else {
596 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
597 *val = tr32(TG3PCI_MEM_WIN_DATA);
599 /* Always leave this as zero. */
600 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
602 spin_unlock_irqrestore(&tp->indirect_lock, flags);
605 static void tg3_ape_lock_init(struct tg3 *tp)
607 int i;
608 u32 regbase;
610 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
611 regbase = TG3_APE_LOCK_GRANT;
612 else
613 regbase = TG3_APE_PER_LOCK_GRANT;
615 /* Make sure the driver hasn't any stale locks. */
616 for (i = 0; i < 8; i++)
617 tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
620 static int tg3_ape_lock(struct tg3 *tp, int locknum)
622 int i, off;
623 int ret = 0;
624 u32 status, req, gnt;
626 if (!tg3_flag(tp, ENABLE_APE))
627 return 0;
629 switch (locknum) {
630 case TG3_APE_LOCK_GRC:
631 case TG3_APE_LOCK_MEM:
632 break;
633 default:
634 return -EINVAL;
637 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
638 req = TG3_APE_LOCK_REQ;
639 gnt = TG3_APE_LOCK_GRANT;
640 } else {
641 req = TG3_APE_PER_LOCK_REQ;
642 gnt = TG3_APE_PER_LOCK_GRANT;
645 off = 4 * locknum;
647 tg3_ape_write32(tp, req + off, APE_LOCK_REQ_DRIVER);
649 /* Wait for up to 1 millisecond to acquire lock. */
650 for (i = 0; i < 100; i++) {
651 status = tg3_ape_read32(tp, gnt + off);
652 if (status == APE_LOCK_GRANT_DRIVER)
653 break;
654 udelay(10);
657 if (status != APE_LOCK_GRANT_DRIVER) {
658 /* Revoke the lock request. */
659 tg3_ape_write32(tp, gnt + off,
660 APE_LOCK_GRANT_DRIVER);
662 ret = -EBUSY;
665 return ret;
668 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
670 u32 gnt;
672 if (!tg3_flag(tp, ENABLE_APE))
673 return;
675 switch (locknum) {
676 case TG3_APE_LOCK_GRC:
677 case TG3_APE_LOCK_MEM:
678 break;
679 default:
680 return;
683 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
684 gnt = TG3_APE_LOCK_GRANT;
685 else
686 gnt = TG3_APE_PER_LOCK_GRANT;
688 tg3_ape_write32(tp, gnt + 4 * locknum, APE_LOCK_GRANT_DRIVER);
691 static void tg3_disable_ints(struct tg3 *tp)
693 int i;
695 tw32(TG3PCI_MISC_HOST_CTRL,
696 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
697 for (i = 0; i < tp->irq_max; i++)
698 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
701 static void tg3_enable_ints(struct tg3 *tp)
703 int i;
705 tp->irq_sync = 0;
706 wmb();
708 tw32(TG3PCI_MISC_HOST_CTRL,
709 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
711 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
712 for (i = 0; i < tp->irq_cnt; i++) {
713 struct tg3_napi *tnapi = &tp->napi[i];
715 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
716 if (tg3_flag(tp, 1SHOT_MSI))
717 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
719 tp->coal_now |= tnapi->coal_now;
722 /* Force an initial interrupt */
723 if (!tg3_flag(tp, TAGGED_STATUS) &&
724 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
725 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
726 else
727 tw32(HOSTCC_MODE, tp->coal_now);
729 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
732 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
734 struct tg3 *tp = tnapi->tp;
735 struct tg3_hw_status *sblk = tnapi->hw_status;
736 unsigned int work_exists = 0;
738 /* check for phy events */
739 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
740 if (sblk->status & SD_STATUS_LINK_CHG)
741 work_exists = 1;
743 /* check for RX/TX work to do */
744 if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
745 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
746 work_exists = 1;
748 return work_exists;
751 /* tg3_int_reenable
752 * similar to tg3_enable_ints, but it accurately determines whether there
753 * is new work pending and can return without flushing the PIO write
754 * which reenables interrupts
756 static void tg3_int_reenable(struct tg3_napi *tnapi)
758 struct tg3 *tp = tnapi->tp;
760 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
761 mmiowb();
763 /* When doing tagged status, this work check is unnecessary.
764 * The last_tag we write above tells the chip which piece of
765 * work we've completed.
767 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
768 tw32(HOSTCC_MODE, tp->coalesce_mode |
769 HOSTCC_MODE_ENABLE | tnapi->coal_now);
772 static void tg3_switch_clocks(struct tg3 *tp)
774 u32 clock_ctrl;
775 u32 orig_clock_ctrl;
777 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
778 return;
780 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
782 orig_clock_ctrl = clock_ctrl;
783 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
784 CLOCK_CTRL_CLKRUN_OENABLE |
785 0x1f);
786 tp->pci_clock_ctrl = clock_ctrl;
788 if (tg3_flag(tp, 5705_PLUS)) {
789 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
790 tw32_wait_f(TG3PCI_CLOCK_CTRL,
791 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
793 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
794 tw32_wait_f(TG3PCI_CLOCK_CTRL,
795 clock_ctrl |
796 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
797 40);
798 tw32_wait_f(TG3PCI_CLOCK_CTRL,
799 clock_ctrl | (CLOCK_CTRL_ALTCLK),
800 40);
802 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
805 #define PHY_BUSY_LOOPS 5000
807 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
809 u32 frame_val;
810 unsigned int loops;
811 int ret;
813 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
814 tw32_f(MAC_MI_MODE,
815 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
816 udelay(80);
819 *val = 0x0;
821 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
822 MI_COM_PHY_ADDR_MASK);
823 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
824 MI_COM_REG_ADDR_MASK);
825 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
827 tw32_f(MAC_MI_COM, frame_val);
829 loops = PHY_BUSY_LOOPS;
830 while (loops != 0) {
831 udelay(10);
832 frame_val = tr32(MAC_MI_COM);
834 if ((frame_val & MI_COM_BUSY) == 0) {
835 udelay(5);
836 frame_val = tr32(MAC_MI_COM);
837 break;
839 loops -= 1;
842 ret = -EBUSY;
843 if (loops != 0) {
844 *val = frame_val & MI_COM_DATA_MASK;
845 ret = 0;
848 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
849 tw32_f(MAC_MI_MODE, tp->mi_mode);
850 udelay(80);
853 return ret;
856 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
858 u32 frame_val;
859 unsigned int loops;
860 int ret;
862 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
863 (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
864 return 0;
866 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
867 tw32_f(MAC_MI_MODE,
868 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
869 udelay(80);
872 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
873 MI_COM_PHY_ADDR_MASK);
874 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
875 MI_COM_REG_ADDR_MASK);
876 frame_val |= (val & MI_COM_DATA_MASK);
877 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
879 tw32_f(MAC_MI_COM, frame_val);
881 loops = PHY_BUSY_LOOPS;
882 while (loops != 0) {
883 udelay(10);
884 frame_val = tr32(MAC_MI_COM);
885 if ((frame_val & MI_COM_BUSY) == 0) {
886 udelay(5);
887 frame_val = tr32(MAC_MI_COM);
888 break;
890 loops -= 1;
893 ret = -EBUSY;
894 if (loops != 0)
895 ret = 0;
897 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
898 tw32_f(MAC_MI_MODE, tp->mi_mode);
899 udelay(80);
902 return ret;
905 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
907 int err;
909 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
910 if (err)
911 goto done;
913 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
914 if (err)
915 goto done;
917 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
918 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
919 if (err)
920 goto done;
922 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
924 done:
925 return err;
928 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
930 int err;
932 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
933 if (err)
934 goto done;
936 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
937 if (err)
938 goto done;
940 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
941 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
942 if (err)
943 goto done;
945 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
947 done:
948 return err;
951 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
953 int err;
955 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
956 if (!err)
957 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
959 return err;
962 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
964 int err;
966 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
967 if (!err)
968 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
970 return err;
973 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
975 int err;
977 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
978 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
979 MII_TG3_AUXCTL_SHDWSEL_MISC);
980 if (!err)
981 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
983 return err;
986 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
988 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
989 set |= MII_TG3_AUXCTL_MISC_WREN;
991 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
994 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
995 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
996 MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
997 MII_TG3_AUXCTL_ACTL_TX_6DB)
999 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1000 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1001 MII_TG3_AUXCTL_ACTL_TX_6DB);
1003 static int tg3_bmcr_reset(struct tg3 *tp)
1005 u32 phy_control;
1006 int limit, err;
1008 /* OK, reset it, and poll the BMCR_RESET bit until it
1009 * clears or we time out.
1011 phy_control = BMCR_RESET;
1012 err = tg3_writephy(tp, MII_BMCR, phy_control);
1013 if (err != 0)
1014 return -EBUSY;
1016 limit = 5000;
1017 while (limit--) {
1018 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1019 if (err != 0)
1020 return -EBUSY;
1022 if ((phy_control & BMCR_RESET) == 0) {
1023 udelay(40);
1024 break;
1026 udelay(10);
1028 if (limit < 0)
1029 return -EBUSY;
1031 return 0;
1034 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1036 struct tg3 *tp = bp->priv;
1037 u32 val;
1039 spin_lock_bh(&tp->lock);
1041 if (tg3_readphy(tp, reg, &val))
1042 val = -EIO;
1044 spin_unlock_bh(&tp->lock);
1046 return val;
1049 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1051 struct tg3 *tp = bp->priv;
1052 u32 ret = 0;
1054 spin_lock_bh(&tp->lock);
1056 if (tg3_writephy(tp, reg, val))
1057 ret = -EIO;
1059 spin_unlock_bh(&tp->lock);
1061 return ret;
1064 static int tg3_mdio_reset(struct mii_bus *bp)
1066 return 0;
1069 static void tg3_mdio_config_5785(struct tg3 *tp)
1071 u32 val;
1072 struct phy_device *phydev;
1074 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1075 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1076 case PHY_ID_BCM50610:
1077 case PHY_ID_BCM50610M:
1078 val = MAC_PHYCFG2_50610_LED_MODES;
1079 break;
1080 case PHY_ID_BCMAC131:
1081 val = MAC_PHYCFG2_AC131_LED_MODES;
1082 break;
1083 case PHY_ID_RTL8211C:
1084 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1085 break;
1086 case PHY_ID_RTL8201E:
1087 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1088 break;
1089 default:
1090 return;
1093 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1094 tw32(MAC_PHYCFG2, val);
1096 val = tr32(MAC_PHYCFG1);
1097 val &= ~(MAC_PHYCFG1_RGMII_INT |
1098 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1099 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1100 tw32(MAC_PHYCFG1, val);
1102 return;
1105 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1106 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1107 MAC_PHYCFG2_FMODE_MASK_MASK |
1108 MAC_PHYCFG2_GMODE_MASK_MASK |
1109 MAC_PHYCFG2_ACT_MASK_MASK |
1110 MAC_PHYCFG2_QUAL_MASK_MASK |
1111 MAC_PHYCFG2_INBAND_ENABLE;
1113 tw32(MAC_PHYCFG2, val);
1115 val = tr32(MAC_PHYCFG1);
1116 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1117 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1118 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1119 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1120 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1121 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1122 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1124 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1125 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1126 tw32(MAC_PHYCFG1, val);
1128 val = tr32(MAC_EXT_RGMII_MODE);
1129 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1130 MAC_RGMII_MODE_RX_QUALITY |
1131 MAC_RGMII_MODE_RX_ACTIVITY |
1132 MAC_RGMII_MODE_RX_ENG_DET |
1133 MAC_RGMII_MODE_TX_ENABLE |
1134 MAC_RGMII_MODE_TX_LOWPWR |
1135 MAC_RGMII_MODE_TX_RESET);
1136 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1137 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1138 val |= MAC_RGMII_MODE_RX_INT_B |
1139 MAC_RGMII_MODE_RX_QUALITY |
1140 MAC_RGMII_MODE_RX_ACTIVITY |
1141 MAC_RGMII_MODE_RX_ENG_DET;
1142 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1143 val |= MAC_RGMII_MODE_TX_ENABLE |
1144 MAC_RGMII_MODE_TX_LOWPWR |
1145 MAC_RGMII_MODE_TX_RESET;
1147 tw32(MAC_EXT_RGMII_MODE, val);
1150 static void tg3_mdio_start(struct tg3 *tp)
1152 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1153 tw32_f(MAC_MI_MODE, tp->mi_mode);
1154 udelay(80);
1156 if (tg3_flag(tp, MDIOBUS_INITED) &&
1157 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1158 tg3_mdio_config_5785(tp);
1161 static int tg3_mdio_init(struct tg3 *tp)
1163 int i;
1164 u32 reg;
1165 struct phy_device *phydev;
1167 if (tg3_flag(tp, 5717_PLUS)) {
1168 u32 is_serdes;
1170 tp->phy_addr = PCI_FUNC(tp->pdev->devfn) + 1;
1172 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1173 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1174 else
1175 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1176 TG3_CPMU_PHY_STRAP_IS_SERDES;
1177 if (is_serdes)
1178 tp->phy_addr += 7;
1179 } else
1180 tp->phy_addr = TG3_PHY_MII_ADDR;
1182 tg3_mdio_start(tp);
1184 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1185 return 0;
1187 tp->mdio_bus = mdiobus_alloc();
1188 if (tp->mdio_bus == NULL)
1189 return -ENOMEM;
1191 tp->mdio_bus->name = "tg3 mdio bus";
1192 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1193 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1194 tp->mdio_bus->priv = tp;
1195 tp->mdio_bus->parent = &tp->pdev->dev;
1196 tp->mdio_bus->read = &tg3_mdio_read;
1197 tp->mdio_bus->write = &tg3_mdio_write;
1198 tp->mdio_bus->reset = &tg3_mdio_reset;
1199 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1200 tp->mdio_bus->irq = &tp->mdio_irq[0];
1202 for (i = 0; i < PHY_MAX_ADDR; i++)
1203 tp->mdio_bus->irq[i] = PHY_POLL;
1205 /* The bus registration will look for all the PHYs on the mdio bus.
1206 * Unfortunately, it does not ensure the PHY is powered up before
1207 * accessing the PHY ID registers. A chip reset is the
1208 * quickest way to bring the device back to an operational state..
1210 if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1211 tg3_bmcr_reset(tp);
1213 i = mdiobus_register(tp->mdio_bus);
1214 if (i) {
1215 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1216 mdiobus_free(tp->mdio_bus);
1217 return i;
1220 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1222 if (!phydev || !phydev->drv) {
1223 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1224 mdiobus_unregister(tp->mdio_bus);
1225 mdiobus_free(tp->mdio_bus);
1226 return -ENODEV;
1229 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1230 case PHY_ID_BCM57780:
1231 phydev->interface = PHY_INTERFACE_MODE_GMII;
1232 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1233 break;
1234 case PHY_ID_BCM50610:
1235 case PHY_ID_BCM50610M:
1236 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1237 PHY_BRCM_RX_REFCLK_UNUSED |
1238 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1239 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1240 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1241 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1242 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1243 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1244 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1245 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1246 /* fallthru */
1247 case PHY_ID_RTL8211C:
1248 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1249 break;
1250 case PHY_ID_RTL8201E:
1251 case PHY_ID_BCMAC131:
1252 phydev->interface = PHY_INTERFACE_MODE_MII;
1253 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1254 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1255 break;
1258 tg3_flag_set(tp, MDIOBUS_INITED);
1260 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1261 tg3_mdio_config_5785(tp);
1263 return 0;
1266 static void tg3_mdio_fini(struct tg3 *tp)
1268 if (tg3_flag(tp, MDIOBUS_INITED)) {
1269 tg3_flag_clear(tp, MDIOBUS_INITED);
1270 mdiobus_unregister(tp->mdio_bus);
1271 mdiobus_free(tp->mdio_bus);
1275 /* tp->lock is held. */
1276 static inline void tg3_generate_fw_event(struct tg3 *tp)
1278 u32 val;
1280 val = tr32(GRC_RX_CPU_EVENT);
1281 val |= GRC_RX_CPU_DRIVER_EVENT;
1282 tw32_f(GRC_RX_CPU_EVENT, val);
1284 tp->last_event_jiffies = jiffies;
1287 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1289 /* tp->lock is held. */
1290 static void tg3_wait_for_event_ack(struct tg3 *tp)
1292 int i;
1293 unsigned int delay_cnt;
1294 long time_remain;
1296 /* If enough time has passed, no wait is necessary. */
1297 time_remain = (long)(tp->last_event_jiffies + 1 +
1298 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1299 (long)jiffies;
1300 if (time_remain < 0)
1301 return;
1303 /* Check if we can shorten the wait time. */
1304 delay_cnt = jiffies_to_usecs(time_remain);
1305 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1306 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1307 delay_cnt = (delay_cnt >> 3) + 1;
1309 for (i = 0; i < delay_cnt; i++) {
1310 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1311 break;
1312 udelay(8);
1316 /* tp->lock is held. */
1317 static void tg3_ump_link_report(struct tg3 *tp)
1319 u32 reg;
1320 u32 val;
1322 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1323 return;
1325 tg3_wait_for_event_ack(tp);
1327 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1329 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1331 val = 0;
1332 if (!tg3_readphy(tp, MII_BMCR, &reg))
1333 val = reg << 16;
1334 if (!tg3_readphy(tp, MII_BMSR, &reg))
1335 val |= (reg & 0xffff);
1336 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1338 val = 0;
1339 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1340 val = reg << 16;
1341 if (!tg3_readphy(tp, MII_LPA, &reg))
1342 val |= (reg & 0xffff);
1343 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1345 val = 0;
1346 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1347 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1348 val = reg << 16;
1349 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1350 val |= (reg & 0xffff);
1352 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1354 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1355 val = reg << 16;
1356 else
1357 val = 0;
1358 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1360 tg3_generate_fw_event(tp);
1363 static void tg3_link_report(struct tg3 *tp)
1365 if (!netif_carrier_ok(tp->dev)) {
1366 netif_info(tp, link, tp->dev, "Link is down\n");
1367 tg3_ump_link_report(tp);
1368 } else if (netif_msg_link(tp)) {
1369 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1370 (tp->link_config.active_speed == SPEED_1000 ?
1371 1000 :
1372 (tp->link_config.active_speed == SPEED_100 ?
1373 100 : 10)),
1374 (tp->link_config.active_duplex == DUPLEX_FULL ?
1375 "full" : "half"));
1377 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1378 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1379 "on" : "off",
1380 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1381 "on" : "off");
1383 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1384 netdev_info(tp->dev, "EEE is %s\n",
1385 tp->setlpicnt ? "enabled" : "disabled");
1387 tg3_ump_link_report(tp);
1391 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1393 u16 miireg;
1395 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1396 miireg = ADVERTISE_PAUSE_CAP;
1397 else if (flow_ctrl & FLOW_CTRL_TX)
1398 miireg = ADVERTISE_PAUSE_ASYM;
1399 else if (flow_ctrl & FLOW_CTRL_RX)
1400 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1401 else
1402 miireg = 0;
1404 return miireg;
1407 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1409 u16 miireg;
1411 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1412 miireg = ADVERTISE_1000XPAUSE;
1413 else if (flow_ctrl & FLOW_CTRL_TX)
1414 miireg = ADVERTISE_1000XPSE_ASYM;
1415 else if (flow_ctrl & FLOW_CTRL_RX)
1416 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1417 else
1418 miireg = 0;
1420 return miireg;
1423 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1425 u8 cap = 0;
1427 if (lcladv & ADVERTISE_1000XPAUSE) {
1428 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1429 if (rmtadv & LPA_1000XPAUSE)
1430 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1431 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1432 cap = FLOW_CTRL_RX;
1433 } else {
1434 if (rmtadv & LPA_1000XPAUSE)
1435 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1437 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1438 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1439 cap = FLOW_CTRL_TX;
1442 return cap;
1445 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1447 u8 autoneg;
1448 u8 flowctrl = 0;
1449 u32 old_rx_mode = tp->rx_mode;
1450 u32 old_tx_mode = tp->tx_mode;
1452 if (tg3_flag(tp, USE_PHYLIB))
1453 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1454 else
1455 autoneg = tp->link_config.autoneg;
1457 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1458 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1459 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1460 else
1461 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1462 } else
1463 flowctrl = tp->link_config.flowctrl;
1465 tp->link_config.active_flowctrl = flowctrl;
1467 if (flowctrl & FLOW_CTRL_RX)
1468 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1469 else
1470 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1472 if (old_rx_mode != tp->rx_mode)
1473 tw32_f(MAC_RX_MODE, tp->rx_mode);
1475 if (flowctrl & FLOW_CTRL_TX)
1476 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1477 else
1478 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1480 if (old_tx_mode != tp->tx_mode)
1481 tw32_f(MAC_TX_MODE, tp->tx_mode);
1484 static void tg3_adjust_link(struct net_device *dev)
1486 u8 oldflowctrl, linkmesg = 0;
1487 u32 mac_mode, lcl_adv, rmt_adv;
1488 struct tg3 *tp = netdev_priv(dev);
1489 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1491 spin_lock_bh(&tp->lock);
1493 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1494 MAC_MODE_HALF_DUPLEX);
1496 oldflowctrl = tp->link_config.active_flowctrl;
1498 if (phydev->link) {
1499 lcl_adv = 0;
1500 rmt_adv = 0;
1502 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1503 mac_mode |= MAC_MODE_PORT_MODE_MII;
1504 else if (phydev->speed == SPEED_1000 ||
1505 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1506 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1507 else
1508 mac_mode |= MAC_MODE_PORT_MODE_MII;
1510 if (phydev->duplex == DUPLEX_HALF)
1511 mac_mode |= MAC_MODE_HALF_DUPLEX;
1512 else {
1513 lcl_adv = tg3_advert_flowctrl_1000T(
1514 tp->link_config.flowctrl);
1516 if (phydev->pause)
1517 rmt_adv = LPA_PAUSE_CAP;
1518 if (phydev->asym_pause)
1519 rmt_adv |= LPA_PAUSE_ASYM;
1522 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1523 } else
1524 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1526 if (mac_mode != tp->mac_mode) {
1527 tp->mac_mode = mac_mode;
1528 tw32_f(MAC_MODE, tp->mac_mode);
1529 udelay(40);
1532 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1533 if (phydev->speed == SPEED_10)
1534 tw32(MAC_MI_STAT,
1535 MAC_MI_STAT_10MBPS_MODE |
1536 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1537 else
1538 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1541 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1542 tw32(MAC_TX_LENGTHS,
1543 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1544 (6 << TX_LENGTHS_IPG_SHIFT) |
1545 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1546 else
1547 tw32(MAC_TX_LENGTHS,
1548 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1549 (6 << TX_LENGTHS_IPG_SHIFT) |
1550 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1552 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1553 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1554 phydev->speed != tp->link_config.active_speed ||
1555 phydev->duplex != tp->link_config.active_duplex ||
1556 oldflowctrl != tp->link_config.active_flowctrl)
1557 linkmesg = 1;
1559 tp->link_config.active_speed = phydev->speed;
1560 tp->link_config.active_duplex = phydev->duplex;
1562 spin_unlock_bh(&tp->lock);
1564 if (linkmesg)
1565 tg3_link_report(tp);
1568 static int tg3_phy_init(struct tg3 *tp)
1570 struct phy_device *phydev;
1572 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1573 return 0;
1575 /* Bring the PHY back to a known state. */
1576 tg3_bmcr_reset(tp);
1578 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1580 /* Attach the MAC to the PHY. */
1581 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1582 phydev->dev_flags, phydev->interface);
1583 if (IS_ERR(phydev)) {
1584 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1585 return PTR_ERR(phydev);
1588 /* Mask with MAC supported features. */
1589 switch (phydev->interface) {
1590 case PHY_INTERFACE_MODE_GMII:
1591 case PHY_INTERFACE_MODE_RGMII:
1592 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1593 phydev->supported &= (PHY_GBIT_FEATURES |
1594 SUPPORTED_Pause |
1595 SUPPORTED_Asym_Pause);
1596 break;
1598 /* fallthru */
1599 case PHY_INTERFACE_MODE_MII:
1600 phydev->supported &= (PHY_BASIC_FEATURES |
1601 SUPPORTED_Pause |
1602 SUPPORTED_Asym_Pause);
1603 break;
1604 default:
1605 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1606 return -EINVAL;
1609 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1611 phydev->advertising = phydev->supported;
1613 return 0;
1616 static void tg3_phy_start(struct tg3 *tp)
1618 struct phy_device *phydev;
1620 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1621 return;
1623 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1625 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1626 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1627 phydev->speed = tp->link_config.orig_speed;
1628 phydev->duplex = tp->link_config.orig_duplex;
1629 phydev->autoneg = tp->link_config.orig_autoneg;
1630 phydev->advertising = tp->link_config.orig_advertising;
1633 phy_start(phydev);
1635 phy_start_aneg(phydev);
1638 static void tg3_phy_stop(struct tg3 *tp)
1640 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1641 return;
1643 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1646 static void tg3_phy_fini(struct tg3 *tp)
1648 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1649 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1650 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1654 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1656 u32 phytest;
1658 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1659 u32 phy;
1661 tg3_writephy(tp, MII_TG3_FET_TEST,
1662 phytest | MII_TG3_FET_SHADOW_EN);
1663 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1664 if (enable)
1665 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1666 else
1667 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1668 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1670 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1674 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1676 u32 reg;
1678 if (!tg3_flag(tp, 5705_PLUS) ||
1679 (tg3_flag(tp, 5717_PLUS) &&
1680 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1681 return;
1683 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1684 tg3_phy_fet_toggle_apd(tp, enable);
1685 return;
1688 reg = MII_TG3_MISC_SHDW_WREN |
1689 MII_TG3_MISC_SHDW_SCR5_SEL |
1690 MII_TG3_MISC_SHDW_SCR5_LPED |
1691 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1692 MII_TG3_MISC_SHDW_SCR5_SDTL |
1693 MII_TG3_MISC_SHDW_SCR5_C125OE;
1694 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1695 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1697 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1700 reg = MII_TG3_MISC_SHDW_WREN |
1701 MII_TG3_MISC_SHDW_APD_SEL |
1702 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1703 if (enable)
1704 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1706 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1709 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1711 u32 phy;
1713 if (!tg3_flag(tp, 5705_PLUS) ||
1714 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
1715 return;
1717 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1718 u32 ephy;
1720 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1721 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1723 tg3_writephy(tp, MII_TG3_FET_TEST,
1724 ephy | MII_TG3_FET_SHADOW_EN);
1725 if (!tg3_readphy(tp, reg, &phy)) {
1726 if (enable)
1727 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1728 else
1729 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1730 tg3_writephy(tp, reg, phy);
1732 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1734 } else {
1735 int ret;
1737 ret = tg3_phy_auxctl_read(tp,
1738 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
1739 if (!ret) {
1740 if (enable)
1741 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1742 else
1743 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1744 tg3_phy_auxctl_write(tp,
1745 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
1750 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1752 int ret;
1753 u32 val;
1755 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
1756 return;
1758 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
1759 if (!ret)
1760 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
1761 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
1764 static void tg3_phy_apply_otp(struct tg3 *tp)
1766 u32 otp, phy;
1768 if (!tp->phy_otp)
1769 return;
1771 otp = tp->phy_otp;
1773 if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
1774 return;
1776 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1777 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1778 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1780 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1781 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1782 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1784 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1785 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1786 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1788 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1789 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1791 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1792 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1794 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1795 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1796 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1798 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1801 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
1803 u32 val;
1805 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
1806 return;
1808 tp->setlpicnt = 0;
1810 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1811 current_link_up == 1 &&
1812 tp->link_config.active_duplex == DUPLEX_FULL &&
1813 (tp->link_config.active_speed == SPEED_100 ||
1814 tp->link_config.active_speed == SPEED_1000)) {
1815 u32 eeectl;
1817 if (tp->link_config.active_speed == SPEED_1000)
1818 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
1819 else
1820 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
1822 tw32(TG3_CPMU_EEE_CTRL, eeectl);
1824 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
1825 TG3_CL45_D7_EEERES_STAT, &val);
1827 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
1828 val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
1829 tp->setlpicnt = 2;
1832 if (!tp->setlpicnt) {
1833 val = tr32(TG3_CPMU_EEE_MODE);
1834 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
1838 static void tg3_phy_eee_enable(struct tg3 *tp)
1840 u32 val;
1842 if (tp->link_config.active_speed == SPEED_1000 &&
1843 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1844 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
1845 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
1846 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
1847 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0003);
1848 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1851 val = tr32(TG3_CPMU_EEE_MODE);
1852 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
1855 static int tg3_wait_macro_done(struct tg3 *tp)
1857 int limit = 100;
1859 while (limit--) {
1860 u32 tmp32;
1862 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
1863 if ((tmp32 & 0x1000) == 0)
1864 break;
1867 if (limit < 0)
1868 return -EBUSY;
1870 return 0;
1873 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1875 static const u32 test_pat[4][6] = {
1876 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1877 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1878 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1879 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1881 int chan;
1883 for (chan = 0; chan < 4; chan++) {
1884 int i;
1886 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1887 (chan * 0x2000) | 0x0200);
1888 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1890 for (i = 0; i < 6; i++)
1891 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1892 test_pat[chan][i]);
1894 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1895 if (tg3_wait_macro_done(tp)) {
1896 *resetp = 1;
1897 return -EBUSY;
1900 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1901 (chan * 0x2000) | 0x0200);
1902 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
1903 if (tg3_wait_macro_done(tp)) {
1904 *resetp = 1;
1905 return -EBUSY;
1908 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
1909 if (tg3_wait_macro_done(tp)) {
1910 *resetp = 1;
1911 return -EBUSY;
1914 for (i = 0; i < 6; i += 2) {
1915 u32 low, high;
1917 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1918 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1919 tg3_wait_macro_done(tp)) {
1920 *resetp = 1;
1921 return -EBUSY;
1923 low &= 0x7fff;
1924 high &= 0x000f;
1925 if (low != test_pat[chan][i] ||
1926 high != test_pat[chan][i+1]) {
1927 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1928 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1929 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1931 return -EBUSY;
1936 return 0;
1939 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1941 int chan;
1943 for (chan = 0; chan < 4; chan++) {
1944 int i;
1946 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1947 (chan * 0x2000) | 0x0200);
1948 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1949 for (i = 0; i < 6; i++)
1950 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1951 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1952 if (tg3_wait_macro_done(tp))
1953 return -EBUSY;
1956 return 0;
1959 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1961 u32 reg32, phy9_orig;
1962 int retries, do_phy_reset, err;
1964 retries = 10;
1965 do_phy_reset = 1;
1966 do {
1967 if (do_phy_reset) {
1968 err = tg3_bmcr_reset(tp);
1969 if (err)
1970 return err;
1971 do_phy_reset = 0;
1974 /* Disable transmitter and interrupt. */
1975 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1976 continue;
1978 reg32 |= 0x3000;
1979 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1981 /* Set full-duplex, 1000 mbps. */
1982 tg3_writephy(tp, MII_BMCR,
1983 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1985 /* Set to master mode. */
1986 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1987 continue;
1989 tg3_writephy(tp, MII_TG3_CTRL,
1990 (MII_TG3_CTRL_AS_MASTER |
1991 MII_TG3_CTRL_ENABLE_AS_MASTER));
1993 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
1994 if (err)
1995 return err;
1997 /* Block the PHY control access. */
1998 tg3_phydsp_write(tp, 0x8005, 0x0800);
2000 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2001 if (!err)
2002 break;
2003 } while (--retries);
2005 err = tg3_phy_reset_chanpat(tp);
2006 if (err)
2007 return err;
2009 tg3_phydsp_write(tp, 0x8005, 0x0000);
2011 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2012 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2014 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2016 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
2018 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2019 reg32 &= ~0x3000;
2020 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2021 } else if (!err)
2022 err = -EBUSY;
2024 return err;
2027 /* This will reset the tigon3 PHY if there is no valid
2028 * link unless the FORCE argument is non-zero.
2030 static int tg3_phy_reset(struct tg3 *tp)
2032 u32 val, cpmuctrl;
2033 int err;
2035 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2036 val = tr32(GRC_MISC_CFG);
2037 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2038 udelay(40);
2040 err = tg3_readphy(tp, MII_BMSR, &val);
2041 err |= tg3_readphy(tp, MII_BMSR, &val);
2042 if (err != 0)
2043 return -EBUSY;
2045 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2046 netif_carrier_off(tp->dev);
2047 tg3_link_report(tp);
2050 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2051 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2052 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2053 err = tg3_phy_reset_5703_4_5(tp);
2054 if (err)
2055 return err;
2056 goto out;
2059 cpmuctrl = 0;
2060 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2061 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2062 cpmuctrl = tr32(TG3_CPMU_CTRL);
2063 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2064 tw32(TG3_CPMU_CTRL,
2065 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2068 err = tg3_bmcr_reset(tp);
2069 if (err)
2070 return err;
2072 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2073 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2074 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2076 tw32(TG3_CPMU_CTRL, cpmuctrl);
2079 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2080 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2081 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2082 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2083 CPMU_LSPD_1000MB_MACCLK_12_5) {
2084 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2085 udelay(40);
2086 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2090 if (tg3_flag(tp, 5717_PLUS) &&
2091 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2092 return 0;
2094 tg3_phy_apply_otp(tp);
2096 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2097 tg3_phy_toggle_apd(tp, true);
2098 else
2099 tg3_phy_toggle_apd(tp, false);
2101 out:
2102 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2103 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2104 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2105 tg3_phydsp_write(tp, 0x000a, 0x0323);
2106 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2109 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2110 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2111 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2114 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2115 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2116 tg3_phydsp_write(tp, 0x000a, 0x310b);
2117 tg3_phydsp_write(tp, 0x201f, 0x9506);
2118 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2119 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2121 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2122 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2123 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2124 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2125 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2126 tg3_writephy(tp, MII_TG3_TEST1,
2127 MII_TG3_TEST1_TRIM_EN | 0x4);
2128 } else
2129 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2131 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2135 /* Set Extended packet length bit (bit 14) on all chips that */
2136 /* support jumbo frames */
2137 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2138 /* Cannot do read-modify-write on 5401 */
2139 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2140 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2141 /* Set bit 14 with read-modify-write to preserve other bits */
2142 err = tg3_phy_auxctl_read(tp,
2143 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2144 if (!err)
2145 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2146 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2149 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2150 * jumbo frames transmission.
2152 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2153 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2154 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2155 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2158 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2159 /* adjust output voltage */
2160 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2163 tg3_phy_toggle_automdix(tp, 1);
2164 tg3_phy_set_wirespeed(tp);
2165 return 0;
2168 static void tg3_frob_aux_power(struct tg3 *tp)
2170 bool need_vaux = false;
2172 /* The GPIOs do something completely different on 57765. */
2173 if (!tg3_flag(tp, IS_NIC) ||
2174 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2175 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2176 return;
2178 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2179 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
2180 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2181 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) &&
2182 tp->pdev_peer != tp->pdev) {
2183 struct net_device *dev_peer;
2185 dev_peer = pci_get_drvdata(tp->pdev_peer);
2187 /* remove_one() may have been run on the peer. */
2188 if (dev_peer) {
2189 struct tg3 *tp_peer = netdev_priv(dev_peer);
2191 if (tg3_flag(tp_peer, INIT_COMPLETE))
2192 return;
2194 if (tg3_flag(tp_peer, WOL_ENABLE) ||
2195 tg3_flag(tp_peer, ENABLE_ASF))
2196 need_vaux = true;
2200 if (tg3_flag(tp, WOL_ENABLE) || tg3_flag(tp, ENABLE_ASF))
2201 need_vaux = true;
2203 if (need_vaux) {
2204 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2205 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2206 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2207 (GRC_LCLCTRL_GPIO_OE0 |
2208 GRC_LCLCTRL_GPIO_OE1 |
2209 GRC_LCLCTRL_GPIO_OE2 |
2210 GRC_LCLCTRL_GPIO_OUTPUT0 |
2211 GRC_LCLCTRL_GPIO_OUTPUT1),
2212 100);
2213 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2214 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2215 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2216 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2217 GRC_LCLCTRL_GPIO_OE1 |
2218 GRC_LCLCTRL_GPIO_OE2 |
2219 GRC_LCLCTRL_GPIO_OUTPUT0 |
2220 GRC_LCLCTRL_GPIO_OUTPUT1 |
2221 tp->grc_local_ctrl;
2222 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2224 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2225 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2227 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2228 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2229 } else {
2230 u32 no_gpio2;
2231 u32 grc_local_ctrl = 0;
2233 /* Workaround to prevent overdrawing Amps. */
2234 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2235 ASIC_REV_5714) {
2236 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2237 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2238 grc_local_ctrl, 100);
2241 /* On 5753 and variants, GPIO2 cannot be used. */
2242 no_gpio2 = tp->nic_sram_data_cfg &
2243 NIC_SRAM_DATA_CFG_NO_GPIO2;
2245 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2246 GRC_LCLCTRL_GPIO_OE1 |
2247 GRC_LCLCTRL_GPIO_OE2 |
2248 GRC_LCLCTRL_GPIO_OUTPUT1 |
2249 GRC_LCLCTRL_GPIO_OUTPUT2;
2250 if (no_gpio2) {
2251 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2252 GRC_LCLCTRL_GPIO_OUTPUT2);
2254 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2255 grc_local_ctrl, 100);
2257 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2259 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2260 grc_local_ctrl, 100);
2262 if (!no_gpio2) {
2263 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2264 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2265 grc_local_ctrl, 100);
2268 } else {
2269 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
2270 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
2271 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2272 (GRC_LCLCTRL_GPIO_OE1 |
2273 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2275 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2276 GRC_LCLCTRL_GPIO_OE1, 100);
2278 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2279 (GRC_LCLCTRL_GPIO_OE1 |
2280 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2285 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2287 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2288 return 1;
2289 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2290 if (speed != SPEED_10)
2291 return 1;
2292 } else if (speed == SPEED_10)
2293 return 1;
2295 return 0;
2298 static int tg3_setup_phy(struct tg3 *, int);
2300 #define RESET_KIND_SHUTDOWN 0
2301 #define RESET_KIND_INIT 1
2302 #define RESET_KIND_SUSPEND 2
2304 static void tg3_write_sig_post_reset(struct tg3 *, int);
2305 static int tg3_halt_cpu(struct tg3 *, u32);
2307 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2309 u32 val;
2311 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2312 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2313 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2314 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2316 sg_dig_ctrl |=
2317 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2318 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2319 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2321 return;
2324 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2325 tg3_bmcr_reset(tp);
2326 val = tr32(GRC_MISC_CFG);
2327 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2328 udelay(40);
2329 return;
2330 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2331 u32 phytest;
2332 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2333 u32 phy;
2335 tg3_writephy(tp, MII_ADVERTISE, 0);
2336 tg3_writephy(tp, MII_BMCR,
2337 BMCR_ANENABLE | BMCR_ANRESTART);
2339 tg3_writephy(tp, MII_TG3_FET_TEST,
2340 phytest | MII_TG3_FET_SHADOW_EN);
2341 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2342 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2343 tg3_writephy(tp,
2344 MII_TG3_FET_SHDW_AUXMODE4,
2345 phy);
2347 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2349 return;
2350 } else if (do_low_power) {
2351 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2352 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2354 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2355 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2356 MII_TG3_AUXCTL_PCTL_VREG_11V;
2357 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2360 /* The PHY should not be powered down on some chips because
2361 * of bugs.
2363 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2364 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2365 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2366 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2367 return;
2369 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2370 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2371 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2372 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2373 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2374 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2377 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2380 /* tp->lock is held. */
2381 static int tg3_nvram_lock(struct tg3 *tp)
2383 if (tg3_flag(tp, NVRAM)) {
2384 int i;
2386 if (tp->nvram_lock_cnt == 0) {
2387 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2388 for (i = 0; i < 8000; i++) {
2389 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2390 break;
2391 udelay(20);
2393 if (i == 8000) {
2394 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2395 return -ENODEV;
2398 tp->nvram_lock_cnt++;
2400 return 0;
2403 /* tp->lock is held. */
2404 static void tg3_nvram_unlock(struct tg3 *tp)
2406 if (tg3_flag(tp, NVRAM)) {
2407 if (tp->nvram_lock_cnt > 0)
2408 tp->nvram_lock_cnt--;
2409 if (tp->nvram_lock_cnt == 0)
2410 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2414 /* tp->lock is held. */
2415 static void tg3_enable_nvram_access(struct tg3 *tp)
2417 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2418 u32 nvaccess = tr32(NVRAM_ACCESS);
2420 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2424 /* tp->lock is held. */
2425 static void tg3_disable_nvram_access(struct tg3 *tp)
2427 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2428 u32 nvaccess = tr32(NVRAM_ACCESS);
2430 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2434 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2435 u32 offset, u32 *val)
2437 u32 tmp;
2438 int i;
2440 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2441 return -EINVAL;
2443 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2444 EEPROM_ADDR_DEVID_MASK |
2445 EEPROM_ADDR_READ);
2446 tw32(GRC_EEPROM_ADDR,
2447 tmp |
2448 (0 << EEPROM_ADDR_DEVID_SHIFT) |
2449 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2450 EEPROM_ADDR_ADDR_MASK) |
2451 EEPROM_ADDR_READ | EEPROM_ADDR_START);
2453 for (i = 0; i < 1000; i++) {
2454 tmp = tr32(GRC_EEPROM_ADDR);
2456 if (tmp & EEPROM_ADDR_COMPLETE)
2457 break;
2458 msleep(1);
2460 if (!(tmp & EEPROM_ADDR_COMPLETE))
2461 return -EBUSY;
2463 tmp = tr32(GRC_EEPROM_DATA);
2466 * The data will always be opposite the native endian
2467 * format. Perform a blind byteswap to compensate.
2469 *val = swab32(tmp);
2471 return 0;
2474 #define NVRAM_CMD_TIMEOUT 10000
2476 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2478 int i;
2480 tw32(NVRAM_CMD, nvram_cmd);
2481 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2482 udelay(10);
2483 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2484 udelay(10);
2485 break;
2489 if (i == NVRAM_CMD_TIMEOUT)
2490 return -EBUSY;
2492 return 0;
2495 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2497 if (tg3_flag(tp, NVRAM) &&
2498 tg3_flag(tp, NVRAM_BUFFERED) &&
2499 tg3_flag(tp, FLASH) &&
2500 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2501 (tp->nvram_jedecnum == JEDEC_ATMEL))
2503 addr = ((addr / tp->nvram_pagesize) <<
2504 ATMEL_AT45DB0X1B_PAGE_POS) +
2505 (addr % tp->nvram_pagesize);
2507 return addr;
2510 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2512 if (tg3_flag(tp, NVRAM) &&
2513 tg3_flag(tp, NVRAM_BUFFERED) &&
2514 tg3_flag(tp, FLASH) &&
2515 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2516 (tp->nvram_jedecnum == JEDEC_ATMEL))
2518 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2519 tp->nvram_pagesize) +
2520 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2522 return addr;
2525 /* NOTE: Data read in from NVRAM is byteswapped according to
2526 * the byteswapping settings for all other register accesses.
2527 * tg3 devices are BE devices, so on a BE machine, the data
2528 * returned will be exactly as it is seen in NVRAM. On a LE
2529 * machine, the 32-bit value will be byteswapped.
2531 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2533 int ret;
2535 if (!tg3_flag(tp, NVRAM))
2536 return tg3_nvram_read_using_eeprom(tp, offset, val);
2538 offset = tg3_nvram_phys_addr(tp, offset);
2540 if (offset > NVRAM_ADDR_MSK)
2541 return -EINVAL;
2543 ret = tg3_nvram_lock(tp);
2544 if (ret)
2545 return ret;
2547 tg3_enable_nvram_access(tp);
2549 tw32(NVRAM_ADDR, offset);
2550 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2551 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2553 if (ret == 0)
2554 *val = tr32(NVRAM_RDDATA);
2556 tg3_disable_nvram_access(tp);
2558 tg3_nvram_unlock(tp);
2560 return ret;
2563 /* Ensures NVRAM data is in bytestream format. */
2564 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2566 u32 v;
2567 int res = tg3_nvram_read(tp, offset, &v);
2568 if (!res)
2569 *val = cpu_to_be32(v);
2570 return res;
2573 /* tp->lock is held. */
2574 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2576 u32 addr_high, addr_low;
2577 int i;
2579 addr_high = ((tp->dev->dev_addr[0] << 8) |
2580 tp->dev->dev_addr[1]);
2581 addr_low = ((tp->dev->dev_addr[2] << 24) |
2582 (tp->dev->dev_addr[3] << 16) |
2583 (tp->dev->dev_addr[4] << 8) |
2584 (tp->dev->dev_addr[5] << 0));
2585 for (i = 0; i < 4; i++) {
2586 if (i == 1 && skip_mac_1)
2587 continue;
2588 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2589 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2592 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2593 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2594 for (i = 0; i < 12; i++) {
2595 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2596 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2600 addr_high = (tp->dev->dev_addr[0] +
2601 tp->dev->dev_addr[1] +
2602 tp->dev->dev_addr[2] +
2603 tp->dev->dev_addr[3] +
2604 tp->dev->dev_addr[4] +
2605 tp->dev->dev_addr[5]) &
2606 TX_BACKOFF_SEED_MASK;
2607 tw32(MAC_TX_BACKOFF_SEED, addr_high);
2610 static void tg3_enable_register_access(struct tg3 *tp)
2613 * Make sure register accesses (indirect or otherwise) will function
2614 * correctly.
2616 pci_write_config_dword(tp->pdev,
2617 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
2620 static int tg3_power_up(struct tg3 *tp)
2622 tg3_enable_register_access(tp);
2624 pci_set_power_state(tp->pdev, PCI_D0);
2626 /* Switch out of Vaux if it is a NIC */
2627 if (tg3_flag(tp, IS_NIC))
2628 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2630 return 0;
2633 static int tg3_power_down_prepare(struct tg3 *tp)
2635 u32 misc_host_ctrl;
2636 bool device_should_wake, do_low_power;
2638 tg3_enable_register_access(tp);
2640 /* Restore the CLKREQ setting. */
2641 if (tg3_flag(tp, CLKREQ_BUG)) {
2642 u16 lnkctl;
2644 pci_read_config_word(tp->pdev,
2645 tp->pcie_cap + PCI_EXP_LNKCTL,
2646 &lnkctl);
2647 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2648 pci_write_config_word(tp->pdev,
2649 tp->pcie_cap + PCI_EXP_LNKCTL,
2650 lnkctl);
2653 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2654 tw32(TG3PCI_MISC_HOST_CTRL,
2655 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2657 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
2658 tg3_flag(tp, WOL_ENABLE);
2660 if (tg3_flag(tp, USE_PHYLIB)) {
2661 do_low_power = false;
2662 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
2663 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2664 struct phy_device *phydev;
2665 u32 phyid, advertising;
2667 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2669 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2671 tp->link_config.orig_speed = phydev->speed;
2672 tp->link_config.orig_duplex = phydev->duplex;
2673 tp->link_config.orig_autoneg = phydev->autoneg;
2674 tp->link_config.orig_advertising = phydev->advertising;
2676 advertising = ADVERTISED_TP |
2677 ADVERTISED_Pause |
2678 ADVERTISED_Autoneg |
2679 ADVERTISED_10baseT_Half;
2681 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
2682 if (tg3_flag(tp, WOL_SPEED_100MB))
2683 advertising |=
2684 ADVERTISED_100baseT_Half |
2685 ADVERTISED_100baseT_Full |
2686 ADVERTISED_10baseT_Full;
2687 else
2688 advertising |= ADVERTISED_10baseT_Full;
2691 phydev->advertising = advertising;
2693 phy_start_aneg(phydev);
2695 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2696 if (phyid != PHY_ID_BCMAC131) {
2697 phyid &= PHY_BCM_OUI_MASK;
2698 if (phyid == PHY_BCM_OUI_1 ||
2699 phyid == PHY_BCM_OUI_2 ||
2700 phyid == PHY_BCM_OUI_3)
2701 do_low_power = true;
2704 } else {
2705 do_low_power = true;
2707 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2708 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2709 tp->link_config.orig_speed = tp->link_config.speed;
2710 tp->link_config.orig_duplex = tp->link_config.duplex;
2711 tp->link_config.orig_autoneg = tp->link_config.autoneg;
2714 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
2715 tp->link_config.speed = SPEED_10;
2716 tp->link_config.duplex = DUPLEX_HALF;
2717 tp->link_config.autoneg = AUTONEG_ENABLE;
2718 tg3_setup_phy(tp, 0);
2722 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2723 u32 val;
2725 val = tr32(GRC_VCPU_EXT_CTRL);
2726 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2727 } else if (!tg3_flag(tp, ENABLE_ASF)) {
2728 int i;
2729 u32 val;
2731 for (i = 0; i < 200; i++) {
2732 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2733 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2734 break;
2735 msleep(1);
2738 if (tg3_flag(tp, WOL_CAP))
2739 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2740 WOL_DRV_STATE_SHUTDOWN |
2741 WOL_DRV_WOL |
2742 WOL_SET_MAGIC_PKT);
2744 if (device_should_wake) {
2745 u32 mac_mode;
2747 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
2748 if (do_low_power &&
2749 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
2750 tg3_phy_auxctl_write(tp,
2751 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
2752 MII_TG3_AUXCTL_PCTL_WOL_EN |
2753 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2754 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
2755 udelay(40);
2758 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
2759 mac_mode = MAC_MODE_PORT_MODE_GMII;
2760 else
2761 mac_mode = MAC_MODE_PORT_MODE_MII;
2763 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2764 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2765 ASIC_REV_5700) {
2766 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
2767 SPEED_100 : SPEED_10;
2768 if (tg3_5700_link_polarity(tp, speed))
2769 mac_mode |= MAC_MODE_LINK_POLARITY;
2770 else
2771 mac_mode &= ~MAC_MODE_LINK_POLARITY;
2773 } else {
2774 mac_mode = MAC_MODE_PORT_MODE_TBI;
2777 if (!tg3_flag(tp, 5750_PLUS))
2778 tw32(MAC_LED_CTRL, tp->led_ctrl);
2780 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2781 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
2782 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
2783 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2785 if (tg3_flag(tp, ENABLE_APE))
2786 mac_mode |= MAC_MODE_APE_TX_EN |
2787 MAC_MODE_APE_RX_EN |
2788 MAC_MODE_TDE_ENABLE;
2790 tw32_f(MAC_MODE, mac_mode);
2791 udelay(100);
2793 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2794 udelay(10);
2797 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
2798 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2799 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2800 u32 base_val;
2802 base_val = tp->pci_clock_ctrl;
2803 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2804 CLOCK_CTRL_TXCLK_DISABLE);
2806 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2807 CLOCK_CTRL_PWRDOWN_PLL133, 40);
2808 } else if (tg3_flag(tp, 5780_CLASS) ||
2809 tg3_flag(tp, CPMU_PRESENT) ||
2810 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2811 /* do nothing */
2812 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
2813 u32 newbits1, newbits2;
2815 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2816 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2817 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2818 CLOCK_CTRL_TXCLK_DISABLE |
2819 CLOCK_CTRL_ALTCLK);
2820 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2821 } else if (tg3_flag(tp, 5705_PLUS)) {
2822 newbits1 = CLOCK_CTRL_625_CORE;
2823 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2824 } else {
2825 newbits1 = CLOCK_CTRL_ALTCLK;
2826 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2829 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2830 40);
2832 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2833 40);
2835 if (!tg3_flag(tp, 5705_PLUS)) {
2836 u32 newbits3;
2838 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2839 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2840 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2841 CLOCK_CTRL_TXCLK_DISABLE |
2842 CLOCK_CTRL_44MHZ_CORE);
2843 } else {
2844 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2847 tw32_wait_f(TG3PCI_CLOCK_CTRL,
2848 tp->pci_clock_ctrl | newbits3, 40);
2852 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
2853 tg3_power_down_phy(tp, do_low_power);
2855 tg3_frob_aux_power(tp);
2857 /* Workaround for unstable PLL clock */
2858 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2859 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2860 u32 val = tr32(0x7d00);
2862 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2863 tw32(0x7d00, val);
2864 if (!tg3_flag(tp, ENABLE_ASF)) {
2865 int err;
2867 err = tg3_nvram_lock(tp);
2868 tg3_halt_cpu(tp, RX_CPU_BASE);
2869 if (!err)
2870 tg3_nvram_unlock(tp);
2874 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2876 return 0;
2879 static void tg3_power_down(struct tg3 *tp)
2881 tg3_power_down_prepare(tp);
2883 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
2884 pci_set_power_state(tp->pdev, PCI_D3hot);
2887 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2889 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2890 case MII_TG3_AUX_STAT_10HALF:
2891 *speed = SPEED_10;
2892 *duplex = DUPLEX_HALF;
2893 break;
2895 case MII_TG3_AUX_STAT_10FULL:
2896 *speed = SPEED_10;
2897 *duplex = DUPLEX_FULL;
2898 break;
2900 case MII_TG3_AUX_STAT_100HALF:
2901 *speed = SPEED_100;
2902 *duplex = DUPLEX_HALF;
2903 break;
2905 case MII_TG3_AUX_STAT_100FULL:
2906 *speed = SPEED_100;
2907 *duplex = DUPLEX_FULL;
2908 break;
2910 case MII_TG3_AUX_STAT_1000HALF:
2911 *speed = SPEED_1000;
2912 *duplex = DUPLEX_HALF;
2913 break;
2915 case MII_TG3_AUX_STAT_1000FULL:
2916 *speed = SPEED_1000;
2917 *duplex = DUPLEX_FULL;
2918 break;
2920 default:
2921 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2922 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2923 SPEED_10;
2924 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2925 DUPLEX_HALF;
2926 break;
2928 *speed = SPEED_INVALID;
2929 *duplex = DUPLEX_INVALID;
2930 break;
2934 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
2936 int err = 0;
2937 u32 val, new_adv;
2939 new_adv = ADVERTISE_CSMA;
2940 if (advertise & ADVERTISED_10baseT_Half)
2941 new_adv |= ADVERTISE_10HALF;
2942 if (advertise & ADVERTISED_10baseT_Full)
2943 new_adv |= ADVERTISE_10FULL;
2944 if (advertise & ADVERTISED_100baseT_Half)
2945 new_adv |= ADVERTISE_100HALF;
2946 if (advertise & ADVERTISED_100baseT_Full)
2947 new_adv |= ADVERTISE_100FULL;
2949 new_adv |= tg3_advert_flowctrl_1000T(flowctrl);
2951 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
2952 if (err)
2953 goto done;
2955 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
2956 goto done;
2958 new_adv = 0;
2959 if (advertise & ADVERTISED_1000baseT_Half)
2960 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2961 if (advertise & ADVERTISED_1000baseT_Full)
2962 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2964 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2965 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2966 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2967 MII_TG3_CTRL_ENABLE_AS_MASTER);
2969 err = tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2970 if (err)
2971 goto done;
2973 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2974 goto done;
2976 tw32(TG3_CPMU_EEE_MODE,
2977 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2979 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2980 if (!err) {
2981 u32 err2;
2983 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
2984 case ASIC_REV_5717:
2985 case ASIC_REV_57765:
2986 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
2987 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
2988 MII_TG3_DSP_CH34TP2_HIBW01);
2989 /* Fall through */
2990 case ASIC_REV_5719:
2991 val = MII_TG3_DSP_TAP26_ALNOKO |
2992 MII_TG3_DSP_TAP26_RMRXSTO |
2993 MII_TG3_DSP_TAP26_OPCSINPT;
2994 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2997 val = 0;
2998 /* Advertise 100-BaseTX EEE ability */
2999 if (advertise & ADVERTISED_100baseT_Full)
3000 val |= MDIO_AN_EEE_ADV_100TX;
3001 /* Advertise 1000-BaseT EEE ability */
3002 if (advertise & ADVERTISED_1000baseT_Full)
3003 val |= MDIO_AN_EEE_ADV_1000T;
3004 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3006 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3007 if (!err)
3008 err = err2;
3011 done:
3012 return err;
3015 static void tg3_phy_copper_begin(struct tg3 *tp)
3017 u32 new_adv;
3018 int i;
3020 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3021 new_adv = ADVERTISED_10baseT_Half |
3022 ADVERTISED_10baseT_Full;
3023 if (tg3_flag(tp, WOL_SPEED_100MB))
3024 new_adv |= ADVERTISED_100baseT_Half |
3025 ADVERTISED_100baseT_Full;
3027 tg3_phy_autoneg_cfg(tp, new_adv,
3028 FLOW_CTRL_TX | FLOW_CTRL_RX);
3029 } else if (tp->link_config.speed == SPEED_INVALID) {
3030 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3031 tp->link_config.advertising &=
3032 ~(ADVERTISED_1000baseT_Half |
3033 ADVERTISED_1000baseT_Full);
3035 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
3036 tp->link_config.flowctrl);
3037 } else {
3038 /* Asking for a specific link mode. */
3039 if (tp->link_config.speed == SPEED_1000) {
3040 if (tp->link_config.duplex == DUPLEX_FULL)
3041 new_adv = ADVERTISED_1000baseT_Full;
3042 else
3043 new_adv = ADVERTISED_1000baseT_Half;
3044 } else if (tp->link_config.speed == SPEED_100) {
3045 if (tp->link_config.duplex == DUPLEX_FULL)
3046 new_adv = ADVERTISED_100baseT_Full;
3047 else
3048 new_adv = ADVERTISED_100baseT_Half;
3049 } else {
3050 if (tp->link_config.duplex == DUPLEX_FULL)
3051 new_adv = ADVERTISED_10baseT_Full;
3052 else
3053 new_adv = ADVERTISED_10baseT_Half;
3056 tg3_phy_autoneg_cfg(tp, new_adv,
3057 tp->link_config.flowctrl);
3060 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3061 tp->link_config.speed != SPEED_INVALID) {
3062 u32 bmcr, orig_bmcr;
3064 tp->link_config.active_speed = tp->link_config.speed;
3065 tp->link_config.active_duplex = tp->link_config.duplex;
3067 bmcr = 0;
3068 switch (tp->link_config.speed) {
3069 default:
3070 case SPEED_10:
3071 break;
3073 case SPEED_100:
3074 bmcr |= BMCR_SPEED100;
3075 break;
3077 case SPEED_1000:
3078 bmcr |= TG3_BMCR_SPEED1000;
3079 break;
3082 if (tp->link_config.duplex == DUPLEX_FULL)
3083 bmcr |= BMCR_FULLDPLX;
3085 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3086 (bmcr != orig_bmcr)) {
3087 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3088 for (i = 0; i < 1500; i++) {
3089 u32 tmp;
3091 udelay(10);
3092 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3093 tg3_readphy(tp, MII_BMSR, &tmp))
3094 continue;
3095 if (!(tmp & BMSR_LSTATUS)) {
3096 udelay(40);
3097 break;
3100 tg3_writephy(tp, MII_BMCR, bmcr);
3101 udelay(40);
3103 } else {
3104 tg3_writephy(tp, MII_BMCR,
3105 BMCR_ANENABLE | BMCR_ANRESTART);
3109 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3111 int err;
3113 /* Turn off tap power management. */
3114 /* Set Extended packet length bit */
3115 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3117 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3118 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3119 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3120 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3121 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3123 udelay(40);
3125 return err;
3128 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
3130 u32 adv_reg, all_mask = 0;
3132 if (mask & ADVERTISED_10baseT_Half)
3133 all_mask |= ADVERTISE_10HALF;
3134 if (mask & ADVERTISED_10baseT_Full)
3135 all_mask |= ADVERTISE_10FULL;
3136 if (mask & ADVERTISED_100baseT_Half)
3137 all_mask |= ADVERTISE_100HALF;
3138 if (mask & ADVERTISED_100baseT_Full)
3139 all_mask |= ADVERTISE_100FULL;
3141 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
3142 return 0;
3144 if ((adv_reg & all_mask) != all_mask)
3145 return 0;
3146 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3147 u32 tg3_ctrl;
3149 all_mask = 0;
3150 if (mask & ADVERTISED_1000baseT_Half)
3151 all_mask |= ADVERTISE_1000HALF;
3152 if (mask & ADVERTISED_1000baseT_Full)
3153 all_mask |= ADVERTISE_1000FULL;
3155 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
3156 return 0;
3158 if ((tg3_ctrl & all_mask) != all_mask)
3159 return 0;
3161 return 1;
3164 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3166 u32 curadv, reqadv;
3168 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3169 return 1;
3171 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3172 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3174 if (tp->link_config.active_duplex == DUPLEX_FULL) {
3175 if (curadv != reqadv)
3176 return 0;
3178 if (tg3_flag(tp, PAUSE_AUTONEG))
3179 tg3_readphy(tp, MII_LPA, rmtadv);
3180 } else {
3181 /* Reprogram the advertisement register, even if it
3182 * does not affect the current link. If the link
3183 * gets renegotiated in the future, we can save an
3184 * additional renegotiation cycle by advertising
3185 * it correctly in the first place.
3187 if (curadv != reqadv) {
3188 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3189 ADVERTISE_PAUSE_ASYM);
3190 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3194 return 1;
3197 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3199 int current_link_up;
3200 u32 bmsr, val;
3201 u32 lcl_adv, rmt_adv;
3202 u16 current_speed;
3203 u8 current_duplex;
3204 int i, err;
3206 tw32(MAC_EVENT, 0);
3208 tw32_f(MAC_STATUS,
3209 (MAC_STATUS_SYNC_CHANGED |
3210 MAC_STATUS_CFG_CHANGED |
3211 MAC_STATUS_MI_COMPLETION |
3212 MAC_STATUS_LNKSTATE_CHANGED));
3213 udelay(40);
3215 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3216 tw32_f(MAC_MI_MODE,
3217 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3218 udelay(80);
3221 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
3223 /* Some third-party PHYs need to be reset on link going
3224 * down.
3226 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3227 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3228 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3229 netif_carrier_ok(tp->dev)) {
3230 tg3_readphy(tp, MII_BMSR, &bmsr);
3231 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3232 !(bmsr & BMSR_LSTATUS))
3233 force_reset = 1;
3235 if (force_reset)
3236 tg3_phy_reset(tp);
3238 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3239 tg3_readphy(tp, MII_BMSR, &bmsr);
3240 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3241 !tg3_flag(tp, INIT_COMPLETE))
3242 bmsr = 0;
3244 if (!(bmsr & BMSR_LSTATUS)) {
3245 err = tg3_init_5401phy_dsp(tp);
3246 if (err)
3247 return err;
3249 tg3_readphy(tp, MII_BMSR, &bmsr);
3250 for (i = 0; i < 1000; i++) {
3251 udelay(10);
3252 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3253 (bmsr & BMSR_LSTATUS)) {
3254 udelay(40);
3255 break;
3259 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3260 TG3_PHY_REV_BCM5401_B0 &&
3261 !(bmsr & BMSR_LSTATUS) &&
3262 tp->link_config.active_speed == SPEED_1000) {
3263 err = tg3_phy_reset(tp);
3264 if (!err)
3265 err = tg3_init_5401phy_dsp(tp);
3266 if (err)
3267 return err;
3270 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3271 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3272 /* 5701 {A0,B0} CRC bug workaround */
3273 tg3_writephy(tp, 0x15, 0x0a75);
3274 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3275 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3276 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3279 /* Clear pending interrupts... */
3280 tg3_readphy(tp, MII_TG3_ISTAT, &val);
3281 tg3_readphy(tp, MII_TG3_ISTAT, &val);
3283 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3284 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3285 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3286 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3288 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3289 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3290 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3291 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3292 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3293 else
3294 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3297 current_link_up = 0;
3298 current_speed = SPEED_INVALID;
3299 current_duplex = DUPLEX_INVALID;
3301 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3302 err = tg3_phy_auxctl_read(tp,
3303 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3304 &val);
3305 if (!err && !(val & (1 << 10))) {
3306 tg3_phy_auxctl_write(tp,
3307 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3308 val | (1 << 10));
3309 goto relink;
3313 bmsr = 0;
3314 for (i = 0; i < 100; i++) {
3315 tg3_readphy(tp, MII_BMSR, &bmsr);
3316 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3317 (bmsr & BMSR_LSTATUS))
3318 break;
3319 udelay(40);
3322 if (bmsr & BMSR_LSTATUS) {
3323 u32 aux_stat, bmcr;
3325 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3326 for (i = 0; i < 2000; i++) {
3327 udelay(10);
3328 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3329 aux_stat)
3330 break;
3333 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3334 &current_speed,
3335 &current_duplex);
3337 bmcr = 0;
3338 for (i = 0; i < 200; i++) {
3339 tg3_readphy(tp, MII_BMCR, &bmcr);
3340 if (tg3_readphy(tp, MII_BMCR, &bmcr))
3341 continue;
3342 if (bmcr && bmcr != 0x7fff)
3343 break;
3344 udelay(10);
3347 lcl_adv = 0;
3348 rmt_adv = 0;
3350 tp->link_config.active_speed = current_speed;
3351 tp->link_config.active_duplex = current_duplex;
3353 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3354 if ((bmcr & BMCR_ANENABLE) &&
3355 tg3_copper_is_advertising_all(tp,
3356 tp->link_config.advertising)) {
3357 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3358 &rmt_adv))
3359 current_link_up = 1;
3361 } else {
3362 if (!(bmcr & BMCR_ANENABLE) &&
3363 tp->link_config.speed == current_speed &&
3364 tp->link_config.duplex == current_duplex &&
3365 tp->link_config.flowctrl ==
3366 tp->link_config.active_flowctrl) {
3367 current_link_up = 1;
3371 if (current_link_up == 1 &&
3372 tp->link_config.active_duplex == DUPLEX_FULL)
3373 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3376 relink:
3377 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3378 tg3_phy_copper_begin(tp);
3380 tg3_readphy(tp, MII_BMSR, &bmsr);
3381 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
3382 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
3383 current_link_up = 1;
3386 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3387 if (current_link_up == 1) {
3388 if (tp->link_config.active_speed == SPEED_100 ||
3389 tp->link_config.active_speed == SPEED_10)
3390 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3391 else
3392 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3393 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
3394 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3395 else
3396 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3398 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3399 if (tp->link_config.active_duplex == DUPLEX_HALF)
3400 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3402 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3403 if (current_link_up == 1 &&
3404 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3405 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3406 else
3407 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3410 /* ??? Without this setting Netgear GA302T PHY does not
3411 * ??? send/receive packets...
3413 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
3414 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3415 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3416 tw32_f(MAC_MI_MODE, tp->mi_mode);
3417 udelay(80);
3420 tw32_f(MAC_MODE, tp->mac_mode);
3421 udelay(40);
3423 tg3_phy_eee_adjust(tp, current_link_up);
3425 if (tg3_flag(tp, USE_LINKCHG_REG)) {
3426 /* Polled via timer. */
3427 tw32_f(MAC_EVENT, 0);
3428 } else {
3429 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3431 udelay(40);
3433 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3434 current_link_up == 1 &&
3435 tp->link_config.active_speed == SPEED_1000 &&
3436 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
3437 udelay(120);
3438 tw32_f(MAC_STATUS,
3439 (MAC_STATUS_SYNC_CHANGED |
3440 MAC_STATUS_CFG_CHANGED));
3441 udelay(40);
3442 tg3_write_mem(tp,
3443 NIC_SRAM_FIRMWARE_MBOX,
3444 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3447 /* Prevent send BD corruption. */
3448 if (tg3_flag(tp, CLKREQ_BUG)) {
3449 u16 oldlnkctl, newlnkctl;
3451 pci_read_config_word(tp->pdev,
3452 tp->pcie_cap + PCI_EXP_LNKCTL,
3453 &oldlnkctl);
3454 if (tp->link_config.active_speed == SPEED_100 ||
3455 tp->link_config.active_speed == SPEED_10)
3456 newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3457 else
3458 newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3459 if (newlnkctl != oldlnkctl)
3460 pci_write_config_word(tp->pdev,
3461 tp->pcie_cap + PCI_EXP_LNKCTL,
3462 newlnkctl);
3465 if (current_link_up != netif_carrier_ok(tp->dev)) {
3466 if (current_link_up)
3467 netif_carrier_on(tp->dev);
3468 else
3469 netif_carrier_off(tp->dev);
3470 tg3_link_report(tp);
3473 return 0;
3476 struct tg3_fiber_aneginfo {
3477 int state;
3478 #define ANEG_STATE_UNKNOWN 0
3479 #define ANEG_STATE_AN_ENABLE 1
3480 #define ANEG_STATE_RESTART_INIT 2
3481 #define ANEG_STATE_RESTART 3
3482 #define ANEG_STATE_DISABLE_LINK_OK 4
3483 #define ANEG_STATE_ABILITY_DETECT_INIT 5
3484 #define ANEG_STATE_ABILITY_DETECT 6
3485 #define ANEG_STATE_ACK_DETECT_INIT 7
3486 #define ANEG_STATE_ACK_DETECT 8
3487 #define ANEG_STATE_COMPLETE_ACK_INIT 9
3488 #define ANEG_STATE_COMPLETE_ACK 10
3489 #define ANEG_STATE_IDLE_DETECT_INIT 11
3490 #define ANEG_STATE_IDLE_DETECT 12
3491 #define ANEG_STATE_LINK_OK 13
3492 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
3493 #define ANEG_STATE_NEXT_PAGE_WAIT 15
3495 u32 flags;
3496 #define MR_AN_ENABLE 0x00000001
3497 #define MR_RESTART_AN 0x00000002
3498 #define MR_AN_COMPLETE 0x00000004
3499 #define MR_PAGE_RX 0x00000008
3500 #define MR_NP_LOADED 0x00000010
3501 #define MR_TOGGLE_TX 0x00000020
3502 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
3503 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
3504 #define MR_LP_ADV_SYM_PAUSE 0x00000100
3505 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
3506 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3507 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3508 #define MR_LP_ADV_NEXT_PAGE 0x00001000
3509 #define MR_TOGGLE_RX 0x00002000
3510 #define MR_NP_RX 0x00004000
3512 #define MR_LINK_OK 0x80000000
3514 unsigned long link_time, cur_time;
3516 u32 ability_match_cfg;
3517 int ability_match_count;
3519 char ability_match, idle_match, ack_match;
3521 u32 txconfig, rxconfig;
3522 #define ANEG_CFG_NP 0x00000080
3523 #define ANEG_CFG_ACK 0x00000040
3524 #define ANEG_CFG_RF2 0x00000020
3525 #define ANEG_CFG_RF1 0x00000010
3526 #define ANEG_CFG_PS2 0x00000001
3527 #define ANEG_CFG_PS1 0x00008000
3528 #define ANEG_CFG_HD 0x00004000
3529 #define ANEG_CFG_FD 0x00002000
3530 #define ANEG_CFG_INVAL 0x00001f06
3533 #define ANEG_OK 0
3534 #define ANEG_DONE 1
3535 #define ANEG_TIMER_ENAB 2
3536 #define ANEG_FAILED -1
3538 #define ANEG_STATE_SETTLE_TIME 10000
3540 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3541 struct tg3_fiber_aneginfo *ap)
3543 u16 flowctrl;
3544 unsigned long delta;
3545 u32 rx_cfg_reg;
3546 int ret;
3548 if (ap->state == ANEG_STATE_UNKNOWN) {
3549 ap->rxconfig = 0;
3550 ap->link_time = 0;
3551 ap->cur_time = 0;
3552 ap->ability_match_cfg = 0;
3553 ap->ability_match_count = 0;
3554 ap->ability_match = 0;
3555 ap->idle_match = 0;
3556 ap->ack_match = 0;
3558 ap->cur_time++;
3560 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3561 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3563 if (rx_cfg_reg != ap->ability_match_cfg) {
3564 ap->ability_match_cfg = rx_cfg_reg;
3565 ap->ability_match = 0;
3566 ap->ability_match_count = 0;
3567 } else {
3568 if (++ap->ability_match_count > 1) {
3569 ap->ability_match = 1;
3570 ap->ability_match_cfg = rx_cfg_reg;
3573 if (rx_cfg_reg & ANEG_CFG_ACK)
3574 ap->ack_match = 1;
3575 else
3576 ap->ack_match = 0;
3578 ap->idle_match = 0;
3579 } else {
3580 ap->idle_match = 1;
3581 ap->ability_match_cfg = 0;
3582 ap->ability_match_count = 0;
3583 ap->ability_match = 0;
3584 ap->ack_match = 0;
3586 rx_cfg_reg = 0;
3589 ap->rxconfig = rx_cfg_reg;
3590 ret = ANEG_OK;
3592 switch (ap->state) {
3593 case ANEG_STATE_UNKNOWN:
3594 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3595 ap->state = ANEG_STATE_AN_ENABLE;
3597 /* fallthru */
3598 case ANEG_STATE_AN_ENABLE:
3599 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3600 if (ap->flags & MR_AN_ENABLE) {
3601 ap->link_time = 0;
3602 ap->cur_time = 0;
3603 ap->ability_match_cfg = 0;
3604 ap->ability_match_count = 0;
3605 ap->ability_match = 0;
3606 ap->idle_match = 0;
3607 ap->ack_match = 0;
3609 ap->state = ANEG_STATE_RESTART_INIT;
3610 } else {
3611 ap->state = ANEG_STATE_DISABLE_LINK_OK;
3613 break;
3615 case ANEG_STATE_RESTART_INIT:
3616 ap->link_time = ap->cur_time;
3617 ap->flags &= ~(MR_NP_LOADED);
3618 ap->txconfig = 0;
3619 tw32(MAC_TX_AUTO_NEG, 0);
3620 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3621 tw32_f(MAC_MODE, tp->mac_mode);
3622 udelay(40);
3624 ret = ANEG_TIMER_ENAB;
3625 ap->state = ANEG_STATE_RESTART;
3627 /* fallthru */
3628 case ANEG_STATE_RESTART:
3629 delta = ap->cur_time - ap->link_time;
3630 if (delta > ANEG_STATE_SETTLE_TIME)
3631 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3632 else
3633 ret = ANEG_TIMER_ENAB;
3634 break;
3636 case ANEG_STATE_DISABLE_LINK_OK:
3637 ret = ANEG_DONE;
3638 break;
3640 case ANEG_STATE_ABILITY_DETECT_INIT:
3641 ap->flags &= ~(MR_TOGGLE_TX);
3642 ap->txconfig = ANEG_CFG_FD;
3643 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3644 if (flowctrl & ADVERTISE_1000XPAUSE)
3645 ap->txconfig |= ANEG_CFG_PS1;
3646 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3647 ap->txconfig |= ANEG_CFG_PS2;
3648 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3649 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3650 tw32_f(MAC_MODE, tp->mac_mode);
3651 udelay(40);
3653 ap->state = ANEG_STATE_ABILITY_DETECT;
3654 break;
3656 case ANEG_STATE_ABILITY_DETECT:
3657 if (ap->ability_match != 0 && ap->rxconfig != 0)
3658 ap->state = ANEG_STATE_ACK_DETECT_INIT;
3659 break;
3661 case ANEG_STATE_ACK_DETECT_INIT:
3662 ap->txconfig |= ANEG_CFG_ACK;
3663 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3664 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3665 tw32_f(MAC_MODE, tp->mac_mode);
3666 udelay(40);
3668 ap->state = ANEG_STATE_ACK_DETECT;
3670 /* fallthru */
3671 case ANEG_STATE_ACK_DETECT:
3672 if (ap->ack_match != 0) {
3673 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3674 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3675 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3676 } else {
3677 ap->state = ANEG_STATE_AN_ENABLE;
3679 } else if (ap->ability_match != 0 &&
3680 ap->rxconfig == 0) {
3681 ap->state = ANEG_STATE_AN_ENABLE;
3683 break;
3685 case ANEG_STATE_COMPLETE_ACK_INIT:
3686 if (ap->rxconfig & ANEG_CFG_INVAL) {
3687 ret = ANEG_FAILED;
3688 break;
3690 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3691 MR_LP_ADV_HALF_DUPLEX |
3692 MR_LP_ADV_SYM_PAUSE |
3693 MR_LP_ADV_ASYM_PAUSE |
3694 MR_LP_ADV_REMOTE_FAULT1 |
3695 MR_LP_ADV_REMOTE_FAULT2 |
3696 MR_LP_ADV_NEXT_PAGE |
3697 MR_TOGGLE_RX |
3698 MR_NP_RX);
3699 if (ap->rxconfig & ANEG_CFG_FD)
3700 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3701 if (ap->rxconfig & ANEG_CFG_HD)
3702 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3703 if (ap->rxconfig & ANEG_CFG_PS1)
3704 ap->flags |= MR_LP_ADV_SYM_PAUSE;
3705 if (ap->rxconfig & ANEG_CFG_PS2)
3706 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3707 if (ap->rxconfig & ANEG_CFG_RF1)
3708 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3709 if (ap->rxconfig & ANEG_CFG_RF2)
3710 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3711 if (ap->rxconfig & ANEG_CFG_NP)
3712 ap->flags |= MR_LP_ADV_NEXT_PAGE;
3714 ap->link_time = ap->cur_time;
3716 ap->flags ^= (MR_TOGGLE_TX);
3717 if (ap->rxconfig & 0x0008)
3718 ap->flags |= MR_TOGGLE_RX;
3719 if (ap->rxconfig & ANEG_CFG_NP)
3720 ap->flags |= MR_NP_RX;
3721 ap->flags |= MR_PAGE_RX;
3723 ap->state = ANEG_STATE_COMPLETE_ACK;
3724 ret = ANEG_TIMER_ENAB;
3725 break;
3727 case ANEG_STATE_COMPLETE_ACK:
3728 if (ap->ability_match != 0 &&
3729 ap->rxconfig == 0) {
3730 ap->state = ANEG_STATE_AN_ENABLE;
3731 break;
3733 delta = ap->cur_time - ap->link_time;
3734 if (delta > ANEG_STATE_SETTLE_TIME) {
3735 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3736 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3737 } else {
3738 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3739 !(ap->flags & MR_NP_RX)) {
3740 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3741 } else {
3742 ret = ANEG_FAILED;
3746 break;
3748 case ANEG_STATE_IDLE_DETECT_INIT:
3749 ap->link_time = ap->cur_time;
3750 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3751 tw32_f(MAC_MODE, tp->mac_mode);
3752 udelay(40);
3754 ap->state = ANEG_STATE_IDLE_DETECT;
3755 ret = ANEG_TIMER_ENAB;
3756 break;
3758 case ANEG_STATE_IDLE_DETECT:
3759 if (ap->ability_match != 0 &&
3760 ap->rxconfig == 0) {
3761 ap->state = ANEG_STATE_AN_ENABLE;
3762 break;
3764 delta = ap->cur_time - ap->link_time;
3765 if (delta > ANEG_STATE_SETTLE_TIME) {
3766 /* XXX another gem from the Broadcom driver :( */
3767 ap->state = ANEG_STATE_LINK_OK;
3769 break;
3771 case ANEG_STATE_LINK_OK:
3772 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3773 ret = ANEG_DONE;
3774 break;
3776 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3777 /* ??? unimplemented */
3778 break;
3780 case ANEG_STATE_NEXT_PAGE_WAIT:
3781 /* ??? unimplemented */
3782 break;
3784 default:
3785 ret = ANEG_FAILED;
3786 break;
3789 return ret;
3792 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3794 int res = 0;
3795 struct tg3_fiber_aneginfo aninfo;
3796 int status = ANEG_FAILED;
3797 unsigned int tick;
3798 u32 tmp;
3800 tw32_f(MAC_TX_AUTO_NEG, 0);
3802 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3803 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3804 udelay(40);
3806 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3807 udelay(40);
3809 memset(&aninfo, 0, sizeof(aninfo));
3810 aninfo.flags |= MR_AN_ENABLE;
3811 aninfo.state = ANEG_STATE_UNKNOWN;
3812 aninfo.cur_time = 0;
3813 tick = 0;
3814 while (++tick < 195000) {
3815 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3816 if (status == ANEG_DONE || status == ANEG_FAILED)
3817 break;
3819 udelay(1);
3822 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3823 tw32_f(MAC_MODE, tp->mac_mode);
3824 udelay(40);
3826 *txflags = aninfo.txconfig;
3827 *rxflags = aninfo.flags;
3829 if (status == ANEG_DONE &&
3830 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3831 MR_LP_ADV_FULL_DUPLEX)))
3832 res = 1;
3834 return res;
3837 static void tg3_init_bcm8002(struct tg3 *tp)
3839 u32 mac_status = tr32(MAC_STATUS);
3840 int i;
3842 /* Reset when initting first time or we have a link. */
3843 if (tg3_flag(tp, INIT_COMPLETE) &&
3844 !(mac_status & MAC_STATUS_PCS_SYNCED))
3845 return;
3847 /* Set PLL lock range. */
3848 tg3_writephy(tp, 0x16, 0x8007);
3850 /* SW reset */
3851 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3853 /* Wait for reset to complete. */
3854 /* XXX schedule_timeout() ... */
3855 for (i = 0; i < 500; i++)
3856 udelay(10);
3858 /* Config mode; select PMA/Ch 1 regs. */
3859 tg3_writephy(tp, 0x10, 0x8411);
3861 /* Enable auto-lock and comdet, select txclk for tx. */
3862 tg3_writephy(tp, 0x11, 0x0a10);
3864 tg3_writephy(tp, 0x18, 0x00a0);
3865 tg3_writephy(tp, 0x16, 0x41ff);
3867 /* Assert and deassert POR. */
3868 tg3_writephy(tp, 0x13, 0x0400);
3869 udelay(40);
3870 tg3_writephy(tp, 0x13, 0x0000);
3872 tg3_writephy(tp, 0x11, 0x0a50);
3873 udelay(40);
3874 tg3_writephy(tp, 0x11, 0x0a10);
3876 /* Wait for signal to stabilize */
3877 /* XXX schedule_timeout() ... */
3878 for (i = 0; i < 15000; i++)
3879 udelay(10);
3881 /* Deselect the channel register so we can read the PHYID
3882 * later.
3884 tg3_writephy(tp, 0x10, 0x8011);
3887 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3889 u16 flowctrl;
3890 u32 sg_dig_ctrl, sg_dig_status;
3891 u32 serdes_cfg, expected_sg_dig_ctrl;
3892 int workaround, port_a;
3893 int current_link_up;
3895 serdes_cfg = 0;
3896 expected_sg_dig_ctrl = 0;
3897 workaround = 0;
3898 port_a = 1;
3899 current_link_up = 0;
3901 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3902 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3903 workaround = 1;
3904 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3905 port_a = 0;
3907 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3908 /* preserve bits 20-23 for voltage regulator */
3909 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3912 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3914 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3915 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3916 if (workaround) {
3917 u32 val = serdes_cfg;
3919 if (port_a)
3920 val |= 0xc010000;
3921 else
3922 val |= 0x4010000;
3923 tw32_f(MAC_SERDES_CFG, val);
3926 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3928 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3929 tg3_setup_flow_control(tp, 0, 0);
3930 current_link_up = 1;
3932 goto out;
3935 /* Want auto-negotiation. */
3936 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3938 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3939 if (flowctrl & ADVERTISE_1000XPAUSE)
3940 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3941 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3942 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3944 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3945 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
3946 tp->serdes_counter &&
3947 ((mac_status & (MAC_STATUS_PCS_SYNCED |
3948 MAC_STATUS_RCVD_CFG)) ==
3949 MAC_STATUS_PCS_SYNCED)) {
3950 tp->serdes_counter--;
3951 current_link_up = 1;
3952 goto out;
3954 restart_autoneg:
3955 if (workaround)
3956 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3957 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3958 udelay(5);
3959 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3961 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3962 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3963 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3964 MAC_STATUS_SIGNAL_DET)) {
3965 sg_dig_status = tr32(SG_DIG_STATUS);
3966 mac_status = tr32(MAC_STATUS);
3968 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3969 (mac_status & MAC_STATUS_PCS_SYNCED)) {
3970 u32 local_adv = 0, remote_adv = 0;
3972 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3973 local_adv |= ADVERTISE_1000XPAUSE;
3974 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3975 local_adv |= ADVERTISE_1000XPSE_ASYM;
3977 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3978 remote_adv |= LPA_1000XPAUSE;
3979 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3980 remote_adv |= LPA_1000XPAUSE_ASYM;
3982 tg3_setup_flow_control(tp, local_adv, remote_adv);
3983 current_link_up = 1;
3984 tp->serdes_counter = 0;
3985 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3986 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3987 if (tp->serdes_counter)
3988 tp->serdes_counter--;
3989 else {
3990 if (workaround) {
3991 u32 val = serdes_cfg;
3993 if (port_a)
3994 val |= 0xc010000;
3995 else
3996 val |= 0x4010000;
3998 tw32_f(MAC_SERDES_CFG, val);
4001 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4002 udelay(40);
4004 /* Link parallel detection - link is up */
4005 /* only if we have PCS_SYNC and not */
4006 /* receiving config code words */
4007 mac_status = tr32(MAC_STATUS);
4008 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4009 !(mac_status & MAC_STATUS_RCVD_CFG)) {
4010 tg3_setup_flow_control(tp, 0, 0);
4011 current_link_up = 1;
4012 tp->phy_flags |=
4013 TG3_PHYFLG_PARALLEL_DETECT;
4014 tp->serdes_counter =
4015 SERDES_PARALLEL_DET_TIMEOUT;
4016 } else
4017 goto restart_autoneg;
4020 } else {
4021 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4022 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4025 out:
4026 return current_link_up;
4029 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4031 int current_link_up = 0;
4033 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4034 goto out;
4036 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4037 u32 txflags, rxflags;
4038 int i;
4040 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4041 u32 local_adv = 0, remote_adv = 0;
4043 if (txflags & ANEG_CFG_PS1)
4044 local_adv |= ADVERTISE_1000XPAUSE;
4045 if (txflags & ANEG_CFG_PS2)
4046 local_adv |= ADVERTISE_1000XPSE_ASYM;
4048 if (rxflags & MR_LP_ADV_SYM_PAUSE)
4049 remote_adv |= LPA_1000XPAUSE;
4050 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4051 remote_adv |= LPA_1000XPAUSE_ASYM;
4053 tg3_setup_flow_control(tp, local_adv, remote_adv);
4055 current_link_up = 1;
4057 for (i = 0; i < 30; i++) {
4058 udelay(20);
4059 tw32_f(MAC_STATUS,
4060 (MAC_STATUS_SYNC_CHANGED |
4061 MAC_STATUS_CFG_CHANGED));
4062 udelay(40);
4063 if ((tr32(MAC_STATUS) &
4064 (MAC_STATUS_SYNC_CHANGED |
4065 MAC_STATUS_CFG_CHANGED)) == 0)
4066 break;
4069 mac_status = tr32(MAC_STATUS);
4070 if (current_link_up == 0 &&
4071 (mac_status & MAC_STATUS_PCS_SYNCED) &&
4072 !(mac_status & MAC_STATUS_RCVD_CFG))
4073 current_link_up = 1;
4074 } else {
4075 tg3_setup_flow_control(tp, 0, 0);
4077 /* Forcing 1000FD link up. */
4078 current_link_up = 1;
4080 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4081 udelay(40);
4083 tw32_f(MAC_MODE, tp->mac_mode);
4084 udelay(40);
4087 out:
4088 return current_link_up;
4091 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4093 u32 orig_pause_cfg;
4094 u16 orig_active_speed;
4095 u8 orig_active_duplex;
4096 u32 mac_status;
4097 int current_link_up;
4098 int i;
4100 orig_pause_cfg = tp->link_config.active_flowctrl;
4101 orig_active_speed = tp->link_config.active_speed;
4102 orig_active_duplex = tp->link_config.active_duplex;
4104 if (!tg3_flag(tp, HW_AUTONEG) &&
4105 netif_carrier_ok(tp->dev) &&
4106 tg3_flag(tp, INIT_COMPLETE)) {
4107 mac_status = tr32(MAC_STATUS);
4108 mac_status &= (MAC_STATUS_PCS_SYNCED |
4109 MAC_STATUS_SIGNAL_DET |
4110 MAC_STATUS_CFG_CHANGED |
4111 MAC_STATUS_RCVD_CFG);
4112 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4113 MAC_STATUS_SIGNAL_DET)) {
4114 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4115 MAC_STATUS_CFG_CHANGED));
4116 return 0;
4120 tw32_f(MAC_TX_AUTO_NEG, 0);
4122 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4123 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4124 tw32_f(MAC_MODE, tp->mac_mode);
4125 udelay(40);
4127 if (tp->phy_id == TG3_PHY_ID_BCM8002)
4128 tg3_init_bcm8002(tp);
4130 /* Enable link change event even when serdes polling. */
4131 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4132 udelay(40);
4134 current_link_up = 0;
4135 mac_status = tr32(MAC_STATUS);
4137 if (tg3_flag(tp, HW_AUTONEG))
4138 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4139 else
4140 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4142 tp->napi[0].hw_status->status =
4143 (SD_STATUS_UPDATED |
4144 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4146 for (i = 0; i < 100; i++) {
4147 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4148 MAC_STATUS_CFG_CHANGED));
4149 udelay(5);
4150 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4151 MAC_STATUS_CFG_CHANGED |
4152 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4153 break;
4156 mac_status = tr32(MAC_STATUS);
4157 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4158 current_link_up = 0;
4159 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4160 tp->serdes_counter == 0) {
4161 tw32_f(MAC_MODE, (tp->mac_mode |
4162 MAC_MODE_SEND_CONFIGS));
4163 udelay(1);
4164 tw32_f(MAC_MODE, tp->mac_mode);
4168 if (current_link_up == 1) {
4169 tp->link_config.active_speed = SPEED_1000;
4170 tp->link_config.active_duplex = DUPLEX_FULL;
4171 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4172 LED_CTRL_LNKLED_OVERRIDE |
4173 LED_CTRL_1000MBPS_ON));
4174 } else {
4175 tp->link_config.active_speed = SPEED_INVALID;
4176 tp->link_config.active_duplex = DUPLEX_INVALID;
4177 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4178 LED_CTRL_LNKLED_OVERRIDE |
4179 LED_CTRL_TRAFFIC_OVERRIDE));
4182 if (current_link_up != netif_carrier_ok(tp->dev)) {
4183 if (current_link_up)
4184 netif_carrier_on(tp->dev);
4185 else
4186 netif_carrier_off(tp->dev);
4187 tg3_link_report(tp);
4188 } else {
4189 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4190 if (orig_pause_cfg != now_pause_cfg ||
4191 orig_active_speed != tp->link_config.active_speed ||
4192 orig_active_duplex != tp->link_config.active_duplex)
4193 tg3_link_report(tp);
4196 return 0;
4199 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4201 int current_link_up, err = 0;
4202 u32 bmsr, bmcr;
4203 u16 current_speed;
4204 u8 current_duplex;
4205 u32 local_adv, remote_adv;
4207 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4208 tw32_f(MAC_MODE, tp->mac_mode);
4209 udelay(40);
4211 tw32(MAC_EVENT, 0);
4213 tw32_f(MAC_STATUS,
4214 (MAC_STATUS_SYNC_CHANGED |
4215 MAC_STATUS_CFG_CHANGED |
4216 MAC_STATUS_MI_COMPLETION |
4217 MAC_STATUS_LNKSTATE_CHANGED));
4218 udelay(40);
4220 if (force_reset)
4221 tg3_phy_reset(tp);
4223 current_link_up = 0;
4224 current_speed = SPEED_INVALID;
4225 current_duplex = DUPLEX_INVALID;
4227 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4228 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4229 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4230 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4231 bmsr |= BMSR_LSTATUS;
4232 else
4233 bmsr &= ~BMSR_LSTATUS;
4236 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4238 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4239 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4240 /* do nothing, just check for link up at the end */
4241 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4242 u32 adv, new_adv;
4244 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4245 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4246 ADVERTISE_1000XPAUSE |
4247 ADVERTISE_1000XPSE_ASYM |
4248 ADVERTISE_SLCT);
4250 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4252 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4253 new_adv |= ADVERTISE_1000XHALF;
4254 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4255 new_adv |= ADVERTISE_1000XFULL;
4257 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4258 tg3_writephy(tp, MII_ADVERTISE, new_adv);
4259 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4260 tg3_writephy(tp, MII_BMCR, bmcr);
4262 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4263 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4264 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4266 return err;
4268 } else {
4269 u32 new_bmcr;
4271 bmcr &= ~BMCR_SPEED1000;
4272 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4274 if (tp->link_config.duplex == DUPLEX_FULL)
4275 new_bmcr |= BMCR_FULLDPLX;
4277 if (new_bmcr != bmcr) {
4278 /* BMCR_SPEED1000 is a reserved bit that needs
4279 * to be set on write.
4281 new_bmcr |= BMCR_SPEED1000;
4283 /* Force a linkdown */
4284 if (netif_carrier_ok(tp->dev)) {
4285 u32 adv;
4287 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4288 adv &= ~(ADVERTISE_1000XFULL |
4289 ADVERTISE_1000XHALF |
4290 ADVERTISE_SLCT);
4291 tg3_writephy(tp, MII_ADVERTISE, adv);
4292 tg3_writephy(tp, MII_BMCR, bmcr |
4293 BMCR_ANRESTART |
4294 BMCR_ANENABLE);
4295 udelay(10);
4296 netif_carrier_off(tp->dev);
4298 tg3_writephy(tp, MII_BMCR, new_bmcr);
4299 bmcr = new_bmcr;
4300 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4301 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4302 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4303 ASIC_REV_5714) {
4304 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4305 bmsr |= BMSR_LSTATUS;
4306 else
4307 bmsr &= ~BMSR_LSTATUS;
4309 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4313 if (bmsr & BMSR_LSTATUS) {
4314 current_speed = SPEED_1000;
4315 current_link_up = 1;
4316 if (bmcr & BMCR_FULLDPLX)
4317 current_duplex = DUPLEX_FULL;
4318 else
4319 current_duplex = DUPLEX_HALF;
4321 local_adv = 0;
4322 remote_adv = 0;
4324 if (bmcr & BMCR_ANENABLE) {
4325 u32 common;
4327 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4328 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4329 common = local_adv & remote_adv;
4330 if (common & (ADVERTISE_1000XHALF |
4331 ADVERTISE_1000XFULL)) {
4332 if (common & ADVERTISE_1000XFULL)
4333 current_duplex = DUPLEX_FULL;
4334 else
4335 current_duplex = DUPLEX_HALF;
4336 } else if (!tg3_flag(tp, 5780_CLASS)) {
4337 /* Link is up via parallel detect */
4338 } else {
4339 current_link_up = 0;
4344 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4345 tg3_setup_flow_control(tp, local_adv, remote_adv);
4347 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4348 if (tp->link_config.active_duplex == DUPLEX_HALF)
4349 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4351 tw32_f(MAC_MODE, tp->mac_mode);
4352 udelay(40);
4354 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4356 tp->link_config.active_speed = current_speed;
4357 tp->link_config.active_duplex = current_duplex;
4359 if (current_link_up != netif_carrier_ok(tp->dev)) {
4360 if (current_link_up)
4361 netif_carrier_on(tp->dev);
4362 else {
4363 netif_carrier_off(tp->dev);
4364 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4366 tg3_link_report(tp);
4368 return err;
4371 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4373 if (tp->serdes_counter) {
4374 /* Give autoneg time to complete. */
4375 tp->serdes_counter--;
4376 return;
4379 if (!netif_carrier_ok(tp->dev) &&
4380 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4381 u32 bmcr;
4383 tg3_readphy(tp, MII_BMCR, &bmcr);
4384 if (bmcr & BMCR_ANENABLE) {
4385 u32 phy1, phy2;
4387 /* Select shadow register 0x1f */
4388 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
4389 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
4391 /* Select expansion interrupt status register */
4392 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4393 MII_TG3_DSP_EXP1_INT_STAT);
4394 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4395 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4397 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4398 /* We have signal detect and not receiving
4399 * config code words, link is up by parallel
4400 * detection.
4403 bmcr &= ~BMCR_ANENABLE;
4404 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4405 tg3_writephy(tp, MII_BMCR, bmcr);
4406 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
4409 } else if (netif_carrier_ok(tp->dev) &&
4410 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4411 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4412 u32 phy2;
4414 /* Select expansion interrupt status register */
4415 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4416 MII_TG3_DSP_EXP1_INT_STAT);
4417 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4418 if (phy2 & 0x20) {
4419 u32 bmcr;
4421 /* Config code words received, turn on autoneg. */
4422 tg3_readphy(tp, MII_BMCR, &bmcr);
4423 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4425 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4431 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4433 u32 val;
4434 int err;
4436 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
4437 err = tg3_setup_fiber_phy(tp, force_reset);
4438 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4439 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4440 else
4441 err = tg3_setup_copper_phy(tp, force_reset);
4443 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4444 u32 scale;
4446 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4447 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4448 scale = 65;
4449 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4450 scale = 6;
4451 else
4452 scale = 12;
4454 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4455 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4456 tw32(GRC_MISC_CFG, val);
4459 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4460 (6 << TX_LENGTHS_IPG_SHIFT);
4461 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
4462 val |= tr32(MAC_TX_LENGTHS) &
4463 (TX_LENGTHS_JMB_FRM_LEN_MSK |
4464 TX_LENGTHS_CNT_DWN_VAL_MSK);
4466 if (tp->link_config.active_speed == SPEED_1000 &&
4467 tp->link_config.active_duplex == DUPLEX_HALF)
4468 tw32(MAC_TX_LENGTHS, val |
4469 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
4470 else
4471 tw32(MAC_TX_LENGTHS, val |
4472 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
4474 if (!tg3_flag(tp, 5705_PLUS)) {
4475 if (netif_carrier_ok(tp->dev)) {
4476 tw32(HOSTCC_STAT_COAL_TICKS,
4477 tp->coal.stats_block_coalesce_usecs);
4478 } else {
4479 tw32(HOSTCC_STAT_COAL_TICKS, 0);
4483 if (tg3_flag(tp, ASPM_WORKAROUND)) {
4484 val = tr32(PCIE_PWR_MGMT_THRESH);
4485 if (!netif_carrier_ok(tp->dev))
4486 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4487 tp->pwrmgmt_thresh;
4488 else
4489 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4490 tw32(PCIE_PWR_MGMT_THRESH, val);
4493 return err;
4496 static inline int tg3_irq_sync(struct tg3 *tp)
4498 return tp->irq_sync;
4501 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
4503 int i;
4505 dst = (u32 *)((u8 *)dst + off);
4506 for (i = 0; i < len; i += sizeof(u32))
4507 *dst++ = tr32(off + i);
4510 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
4512 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
4513 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
4514 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
4515 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
4516 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
4517 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
4518 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
4519 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
4520 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
4521 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
4522 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
4523 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
4524 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
4525 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
4526 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
4527 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
4528 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
4529 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
4530 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
4532 if (tg3_flag(tp, SUPPORT_MSIX))
4533 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
4535 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
4536 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
4537 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
4538 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
4539 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
4540 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
4541 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
4542 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
4544 if (!tg3_flag(tp, 5705_PLUS)) {
4545 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
4546 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
4547 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
4550 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
4551 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
4552 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
4553 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
4554 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
4556 if (tg3_flag(tp, NVRAM))
4557 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
4560 static void tg3_dump_state(struct tg3 *tp)
4562 int i;
4563 u32 *regs;
4565 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
4566 if (!regs) {
4567 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
4568 return;
4571 if (tg3_flag(tp, PCI_EXPRESS)) {
4572 /* Read up to but not including private PCI registers */
4573 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
4574 regs[i / sizeof(u32)] = tr32(i);
4575 } else
4576 tg3_dump_legacy_regs(tp, regs);
4578 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
4579 if (!regs[i + 0] && !regs[i + 1] &&
4580 !regs[i + 2] && !regs[i + 3])
4581 continue;
4583 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
4584 i * 4,
4585 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
4588 kfree(regs);
4590 for (i = 0; i < tp->irq_cnt; i++) {
4591 struct tg3_napi *tnapi = &tp->napi[i];
4593 /* SW status block */
4594 netdev_err(tp->dev,
4595 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
4597 tnapi->hw_status->status,
4598 tnapi->hw_status->status_tag,
4599 tnapi->hw_status->rx_jumbo_consumer,
4600 tnapi->hw_status->rx_consumer,
4601 tnapi->hw_status->rx_mini_consumer,
4602 tnapi->hw_status->idx[0].rx_producer,
4603 tnapi->hw_status->idx[0].tx_consumer);
4605 netdev_err(tp->dev,
4606 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
4608 tnapi->last_tag, tnapi->last_irq_tag,
4609 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
4610 tnapi->rx_rcb_ptr,
4611 tnapi->prodring.rx_std_prod_idx,
4612 tnapi->prodring.rx_std_cons_idx,
4613 tnapi->prodring.rx_jmb_prod_idx,
4614 tnapi->prodring.rx_jmb_cons_idx);
4618 /* This is called whenever we suspect that the system chipset is re-
4619 * ordering the sequence of MMIO to the tx send mailbox. The symptom
4620 * is bogus tx completions. We try to recover by setting the
4621 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4622 * in the workqueue.
4624 static void tg3_tx_recover(struct tg3 *tp)
4626 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
4627 tp->write32_tx_mbox == tg3_write_indirect_mbox);
4629 netdev_warn(tp->dev,
4630 "The system may be re-ordering memory-mapped I/O "
4631 "cycles to the network device, attempting to recover. "
4632 "Please report the problem to the driver maintainer "
4633 "and include system chipset information.\n");
4635 spin_lock(&tp->lock);
4636 tg3_flag_set(tp, TX_RECOVERY_PENDING);
4637 spin_unlock(&tp->lock);
4640 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4642 /* Tell compiler to fetch tx indices from memory. */
4643 barrier();
4644 return tnapi->tx_pending -
4645 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4648 /* Tigon3 never reports partial packet sends. So we do not
4649 * need special logic to handle SKBs that have not had all
4650 * of their frags sent yet, like SunGEM does.
4652 static void tg3_tx(struct tg3_napi *tnapi)
4654 struct tg3 *tp = tnapi->tp;
4655 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4656 u32 sw_idx = tnapi->tx_cons;
4657 struct netdev_queue *txq;
4658 int index = tnapi - tp->napi;
4660 if (tg3_flag(tp, ENABLE_TSS))
4661 index--;
4663 txq = netdev_get_tx_queue(tp->dev, index);
4665 while (sw_idx != hw_idx) {
4666 struct ring_info *ri = &tnapi->tx_buffers[sw_idx];
4667 struct sk_buff *skb = ri->skb;
4668 int i, tx_bug = 0;
4670 if (unlikely(skb == NULL)) {
4671 tg3_tx_recover(tp);
4672 return;
4675 pci_unmap_single(tp->pdev,
4676 dma_unmap_addr(ri, mapping),
4677 skb_headlen(skb),
4678 PCI_DMA_TODEVICE);
4680 ri->skb = NULL;
4682 sw_idx = NEXT_TX(sw_idx);
4684 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4685 ri = &tnapi->tx_buffers[sw_idx];
4686 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4687 tx_bug = 1;
4689 pci_unmap_page(tp->pdev,
4690 dma_unmap_addr(ri, mapping),
4691 skb_shinfo(skb)->frags[i].size,
4692 PCI_DMA_TODEVICE);
4693 sw_idx = NEXT_TX(sw_idx);
4696 dev_kfree_skb(skb);
4698 if (unlikely(tx_bug)) {
4699 tg3_tx_recover(tp);
4700 return;
4704 tnapi->tx_cons = sw_idx;
4706 /* Need to make the tx_cons update visible to tg3_start_xmit()
4707 * before checking for netif_queue_stopped(). Without the
4708 * memory barrier, there is a small possibility that tg3_start_xmit()
4709 * will miss it and cause the queue to be stopped forever.
4711 smp_mb();
4713 if (unlikely(netif_tx_queue_stopped(txq) &&
4714 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4715 __netif_tx_lock(txq, smp_processor_id());
4716 if (netif_tx_queue_stopped(txq) &&
4717 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4718 netif_tx_wake_queue(txq);
4719 __netif_tx_unlock(txq);
4723 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4725 if (!ri->skb)
4726 return;
4728 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
4729 map_sz, PCI_DMA_FROMDEVICE);
4730 dev_kfree_skb_any(ri->skb);
4731 ri->skb = NULL;
4734 /* Returns size of skb allocated or < 0 on error.
4736 * We only need to fill in the address because the other members
4737 * of the RX descriptor are invariant, see tg3_init_rings.
4739 * Note the purposeful assymetry of cpu vs. chip accesses. For
4740 * posting buffers we only dirty the first cache line of the RX
4741 * descriptor (containing the address). Whereas for the RX status
4742 * buffers the cpu only reads the last cacheline of the RX descriptor
4743 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4745 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4746 u32 opaque_key, u32 dest_idx_unmasked)
4748 struct tg3_rx_buffer_desc *desc;
4749 struct ring_info *map;
4750 struct sk_buff *skb;
4751 dma_addr_t mapping;
4752 int skb_size, dest_idx;
4754 switch (opaque_key) {
4755 case RXD_OPAQUE_RING_STD:
4756 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4757 desc = &tpr->rx_std[dest_idx];
4758 map = &tpr->rx_std_buffers[dest_idx];
4759 skb_size = tp->rx_pkt_map_sz;
4760 break;
4762 case RXD_OPAQUE_RING_JUMBO:
4763 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4764 desc = &tpr->rx_jmb[dest_idx].std;
4765 map = &tpr->rx_jmb_buffers[dest_idx];
4766 skb_size = TG3_RX_JMB_MAP_SZ;
4767 break;
4769 default:
4770 return -EINVAL;
4773 /* Do not overwrite any of the map or rp information
4774 * until we are sure we can commit to a new buffer.
4776 * Callers depend upon this behavior and assume that
4777 * we leave everything unchanged if we fail.
4779 skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
4780 if (skb == NULL)
4781 return -ENOMEM;
4783 skb_reserve(skb, tp->rx_offset);
4785 mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4786 PCI_DMA_FROMDEVICE);
4787 if (pci_dma_mapping_error(tp->pdev, mapping)) {
4788 dev_kfree_skb(skb);
4789 return -EIO;
4792 map->skb = skb;
4793 dma_unmap_addr_set(map, mapping, mapping);
4795 desc->addr_hi = ((u64)mapping >> 32);
4796 desc->addr_lo = ((u64)mapping & 0xffffffff);
4798 return skb_size;
4801 /* We only need to move over in the address because the other
4802 * members of the RX descriptor are invariant. See notes above
4803 * tg3_alloc_rx_skb for full details.
4805 static void tg3_recycle_rx(struct tg3_napi *tnapi,
4806 struct tg3_rx_prodring_set *dpr,
4807 u32 opaque_key, int src_idx,
4808 u32 dest_idx_unmasked)
4810 struct tg3 *tp = tnapi->tp;
4811 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4812 struct ring_info *src_map, *dest_map;
4813 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
4814 int dest_idx;
4816 switch (opaque_key) {
4817 case RXD_OPAQUE_RING_STD:
4818 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4819 dest_desc = &dpr->rx_std[dest_idx];
4820 dest_map = &dpr->rx_std_buffers[dest_idx];
4821 src_desc = &spr->rx_std[src_idx];
4822 src_map = &spr->rx_std_buffers[src_idx];
4823 break;
4825 case RXD_OPAQUE_RING_JUMBO:
4826 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4827 dest_desc = &dpr->rx_jmb[dest_idx].std;
4828 dest_map = &dpr->rx_jmb_buffers[dest_idx];
4829 src_desc = &spr->rx_jmb[src_idx].std;
4830 src_map = &spr->rx_jmb_buffers[src_idx];
4831 break;
4833 default:
4834 return;
4837 dest_map->skb = src_map->skb;
4838 dma_unmap_addr_set(dest_map, mapping,
4839 dma_unmap_addr(src_map, mapping));
4840 dest_desc->addr_hi = src_desc->addr_hi;
4841 dest_desc->addr_lo = src_desc->addr_lo;
4843 /* Ensure that the update to the skb happens after the physical
4844 * addresses have been transferred to the new BD location.
4846 smp_wmb();
4848 src_map->skb = NULL;
4851 /* The RX ring scheme is composed of multiple rings which post fresh
4852 * buffers to the chip, and one special ring the chip uses to report
4853 * status back to the host.
4855 * The special ring reports the status of received packets to the
4856 * host. The chip does not write into the original descriptor the
4857 * RX buffer was obtained from. The chip simply takes the original
4858 * descriptor as provided by the host, updates the status and length
4859 * field, then writes this into the next status ring entry.
4861 * Each ring the host uses to post buffers to the chip is described
4862 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
4863 * it is first placed into the on-chip ram. When the packet's length
4864 * is known, it walks down the TG3_BDINFO entries to select the ring.
4865 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4866 * which is within the range of the new packet's length is chosen.
4868 * The "separate ring for rx status" scheme may sound queer, but it makes
4869 * sense from a cache coherency perspective. If only the host writes
4870 * to the buffer post rings, and only the chip writes to the rx status
4871 * rings, then cache lines never move beyond shared-modified state.
4872 * If both the host and chip were to write into the same ring, cache line
4873 * eviction could occur since both entities want it in an exclusive state.
4875 static int tg3_rx(struct tg3_napi *tnapi, int budget)
4877 struct tg3 *tp = tnapi->tp;
4878 u32 work_mask, rx_std_posted = 0;
4879 u32 std_prod_idx, jmb_prod_idx;
4880 u32 sw_idx = tnapi->rx_rcb_ptr;
4881 u16 hw_idx;
4882 int received;
4883 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
4885 hw_idx = *(tnapi->rx_rcb_prod_idx);
4887 * We need to order the read of hw_idx and the read of
4888 * the opaque cookie.
4890 rmb();
4891 work_mask = 0;
4892 received = 0;
4893 std_prod_idx = tpr->rx_std_prod_idx;
4894 jmb_prod_idx = tpr->rx_jmb_prod_idx;
4895 while (sw_idx != hw_idx && budget > 0) {
4896 struct ring_info *ri;
4897 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
4898 unsigned int len;
4899 struct sk_buff *skb;
4900 dma_addr_t dma_addr;
4901 u32 opaque_key, desc_idx, *post_ptr;
4903 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4904 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4905 if (opaque_key == RXD_OPAQUE_RING_STD) {
4906 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
4907 dma_addr = dma_unmap_addr(ri, mapping);
4908 skb = ri->skb;
4909 post_ptr = &std_prod_idx;
4910 rx_std_posted++;
4911 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4912 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
4913 dma_addr = dma_unmap_addr(ri, mapping);
4914 skb = ri->skb;
4915 post_ptr = &jmb_prod_idx;
4916 } else
4917 goto next_pkt_nopost;
4919 work_mask |= opaque_key;
4921 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4922 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4923 drop_it:
4924 tg3_recycle_rx(tnapi, tpr, opaque_key,
4925 desc_idx, *post_ptr);
4926 drop_it_no_recycle:
4927 /* Other statistics kept track of by card. */
4928 tp->rx_dropped++;
4929 goto next_pkt;
4932 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
4933 ETH_FCS_LEN;
4935 if (len > TG3_RX_COPY_THRESH(tp)) {
4936 int skb_size;
4938 skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
4939 *post_ptr);
4940 if (skb_size < 0)
4941 goto drop_it;
4943 pci_unmap_single(tp->pdev, dma_addr, skb_size,
4944 PCI_DMA_FROMDEVICE);
4946 /* Ensure that the update to the skb happens
4947 * after the usage of the old DMA mapping.
4949 smp_wmb();
4951 ri->skb = NULL;
4953 skb_put(skb, len);
4954 } else {
4955 struct sk_buff *copy_skb;
4957 tg3_recycle_rx(tnapi, tpr, opaque_key,
4958 desc_idx, *post_ptr);
4960 copy_skb = netdev_alloc_skb(tp->dev, len +
4961 TG3_RAW_IP_ALIGN);
4962 if (copy_skb == NULL)
4963 goto drop_it_no_recycle;
4965 skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
4966 skb_put(copy_skb, len);
4967 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4968 skb_copy_from_linear_data(skb, copy_skb->data, len);
4969 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4971 /* We'll reuse the original ring buffer. */
4972 skb = copy_skb;
4975 if ((tp->dev->features & NETIF_F_RXCSUM) &&
4976 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4977 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4978 >> RXD_TCPCSUM_SHIFT) == 0xffff))
4979 skb->ip_summed = CHECKSUM_UNNECESSARY;
4980 else
4981 skb_checksum_none_assert(skb);
4983 skb->protocol = eth_type_trans(skb, tp->dev);
4985 if (len > (tp->dev->mtu + ETH_HLEN) &&
4986 skb->protocol != htons(ETH_P_8021Q)) {
4987 dev_kfree_skb(skb);
4988 goto drop_it_no_recycle;
4991 if (desc->type_flags & RXD_FLAG_VLAN &&
4992 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
4993 __vlan_hwaccel_put_tag(skb,
4994 desc->err_vlan & RXD_VLAN_MASK);
4996 napi_gro_receive(&tnapi->napi, skb);
4998 received++;
4999 budget--;
5001 next_pkt:
5002 (*post_ptr)++;
5004 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5005 tpr->rx_std_prod_idx = std_prod_idx &
5006 tp->rx_std_ring_mask;
5007 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5008 tpr->rx_std_prod_idx);
5009 work_mask &= ~RXD_OPAQUE_RING_STD;
5010 rx_std_posted = 0;
5012 next_pkt_nopost:
5013 sw_idx++;
5014 sw_idx &= tp->rx_ret_ring_mask;
5016 /* Refresh hw_idx to see if there is new work */
5017 if (sw_idx == hw_idx) {
5018 hw_idx = *(tnapi->rx_rcb_prod_idx);
5019 rmb();
5023 /* ACK the status ring. */
5024 tnapi->rx_rcb_ptr = sw_idx;
5025 tw32_rx_mbox(tnapi->consmbox, sw_idx);
5027 /* Refill RX ring(s). */
5028 if (!tg3_flag(tp, ENABLE_RSS)) {
5029 if (work_mask & RXD_OPAQUE_RING_STD) {
5030 tpr->rx_std_prod_idx = std_prod_idx &
5031 tp->rx_std_ring_mask;
5032 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5033 tpr->rx_std_prod_idx);
5035 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5036 tpr->rx_jmb_prod_idx = jmb_prod_idx &
5037 tp->rx_jmb_ring_mask;
5038 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5039 tpr->rx_jmb_prod_idx);
5041 mmiowb();
5042 } else if (work_mask) {
5043 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5044 * updated before the producer indices can be updated.
5046 smp_wmb();
5048 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5049 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5051 if (tnapi != &tp->napi[1])
5052 napi_schedule(&tp->napi[1].napi);
5055 return received;
5058 static void tg3_poll_link(struct tg3 *tp)
5060 /* handle link change and other phy events */
5061 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5062 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5064 if (sblk->status & SD_STATUS_LINK_CHG) {
5065 sblk->status = SD_STATUS_UPDATED |
5066 (sblk->status & ~SD_STATUS_LINK_CHG);
5067 spin_lock(&tp->lock);
5068 if (tg3_flag(tp, USE_PHYLIB)) {
5069 tw32_f(MAC_STATUS,
5070 (MAC_STATUS_SYNC_CHANGED |
5071 MAC_STATUS_CFG_CHANGED |
5072 MAC_STATUS_MI_COMPLETION |
5073 MAC_STATUS_LNKSTATE_CHANGED));
5074 udelay(40);
5075 } else
5076 tg3_setup_phy(tp, 0);
5077 spin_unlock(&tp->lock);
5082 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5083 struct tg3_rx_prodring_set *dpr,
5084 struct tg3_rx_prodring_set *spr)
5086 u32 si, di, cpycnt, src_prod_idx;
5087 int i, err = 0;
5089 while (1) {
5090 src_prod_idx = spr->rx_std_prod_idx;
5092 /* Make sure updates to the rx_std_buffers[] entries and the
5093 * standard producer index are seen in the correct order.
5095 smp_rmb();
5097 if (spr->rx_std_cons_idx == src_prod_idx)
5098 break;
5100 if (spr->rx_std_cons_idx < src_prod_idx)
5101 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
5102 else
5103 cpycnt = tp->rx_std_ring_mask + 1 -
5104 spr->rx_std_cons_idx;
5106 cpycnt = min(cpycnt,
5107 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
5109 si = spr->rx_std_cons_idx;
5110 di = dpr->rx_std_prod_idx;
5112 for (i = di; i < di + cpycnt; i++) {
5113 if (dpr->rx_std_buffers[i].skb) {
5114 cpycnt = i - di;
5115 err = -ENOSPC;
5116 break;
5120 if (!cpycnt)
5121 break;
5123 /* Ensure that updates to the rx_std_buffers ring and the
5124 * shadowed hardware producer ring from tg3_recycle_skb() are
5125 * ordered correctly WRT the skb check above.
5127 smp_rmb();
5129 memcpy(&dpr->rx_std_buffers[di],
5130 &spr->rx_std_buffers[si],
5131 cpycnt * sizeof(struct ring_info));
5133 for (i = 0; i < cpycnt; i++, di++, si++) {
5134 struct tg3_rx_buffer_desc *sbd, *dbd;
5135 sbd = &spr->rx_std[si];
5136 dbd = &dpr->rx_std[di];
5137 dbd->addr_hi = sbd->addr_hi;
5138 dbd->addr_lo = sbd->addr_lo;
5141 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
5142 tp->rx_std_ring_mask;
5143 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
5144 tp->rx_std_ring_mask;
5147 while (1) {
5148 src_prod_idx = spr->rx_jmb_prod_idx;
5150 /* Make sure updates to the rx_jmb_buffers[] entries and
5151 * the jumbo producer index are seen in the correct order.
5153 smp_rmb();
5155 if (spr->rx_jmb_cons_idx == src_prod_idx)
5156 break;
5158 if (spr->rx_jmb_cons_idx < src_prod_idx)
5159 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
5160 else
5161 cpycnt = tp->rx_jmb_ring_mask + 1 -
5162 spr->rx_jmb_cons_idx;
5164 cpycnt = min(cpycnt,
5165 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
5167 si = spr->rx_jmb_cons_idx;
5168 di = dpr->rx_jmb_prod_idx;
5170 for (i = di; i < di + cpycnt; i++) {
5171 if (dpr->rx_jmb_buffers[i].skb) {
5172 cpycnt = i - di;
5173 err = -ENOSPC;
5174 break;
5178 if (!cpycnt)
5179 break;
5181 /* Ensure that updates to the rx_jmb_buffers ring and the
5182 * shadowed hardware producer ring from tg3_recycle_skb() are
5183 * ordered correctly WRT the skb check above.
5185 smp_rmb();
5187 memcpy(&dpr->rx_jmb_buffers[di],
5188 &spr->rx_jmb_buffers[si],
5189 cpycnt * sizeof(struct ring_info));
5191 for (i = 0; i < cpycnt; i++, di++, si++) {
5192 struct tg3_rx_buffer_desc *sbd, *dbd;
5193 sbd = &spr->rx_jmb[si].std;
5194 dbd = &dpr->rx_jmb[di].std;
5195 dbd->addr_hi = sbd->addr_hi;
5196 dbd->addr_lo = sbd->addr_lo;
5199 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
5200 tp->rx_jmb_ring_mask;
5201 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
5202 tp->rx_jmb_ring_mask;
5205 return err;
5208 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5210 struct tg3 *tp = tnapi->tp;
5212 /* run TX completion thread */
5213 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
5214 tg3_tx(tnapi);
5215 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5216 return work_done;
5219 /* run RX thread, within the bounds set by NAPI.
5220 * All RX "locking" is done by ensuring outside
5221 * code synchronizes with tg3->napi.poll()
5223 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
5224 work_done += tg3_rx(tnapi, budget - work_done);
5226 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
5227 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
5228 int i, err = 0;
5229 u32 std_prod_idx = dpr->rx_std_prod_idx;
5230 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
5232 for (i = 1; i < tp->irq_cnt; i++)
5233 err |= tg3_rx_prodring_xfer(tp, dpr,
5234 &tp->napi[i].prodring);
5236 wmb();
5238 if (std_prod_idx != dpr->rx_std_prod_idx)
5239 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5240 dpr->rx_std_prod_idx);
5242 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5243 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5244 dpr->rx_jmb_prod_idx);
5246 mmiowb();
5248 if (err)
5249 tw32_f(HOSTCC_MODE, tp->coal_now);
5252 return work_done;
5255 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5257 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5258 struct tg3 *tp = tnapi->tp;
5259 int work_done = 0;
5260 struct tg3_hw_status *sblk = tnapi->hw_status;
5262 while (1) {
5263 work_done = tg3_poll_work(tnapi, work_done, budget);
5265 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5266 goto tx_recovery;
5268 if (unlikely(work_done >= budget))
5269 break;
5271 /* tp->last_tag is used in tg3_int_reenable() below
5272 * to tell the hw how much work has been processed,
5273 * so we must read it before checking for more work.
5275 tnapi->last_tag = sblk->status_tag;
5276 tnapi->last_irq_tag = tnapi->last_tag;
5277 rmb();
5279 /* check for RX/TX work to do */
5280 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5281 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5282 napi_complete(napi);
5283 /* Reenable interrupts. */
5284 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5285 mmiowb();
5286 break;
5290 return work_done;
5292 tx_recovery:
5293 /* work_done is guaranteed to be less than budget. */
5294 napi_complete(napi);
5295 schedule_work(&tp->reset_task);
5296 return work_done;
5299 static void tg3_process_error(struct tg3 *tp)
5301 u32 val;
5302 bool real_error = false;
5304 if (tg3_flag(tp, ERROR_PROCESSED))
5305 return;
5307 /* Check Flow Attention register */
5308 val = tr32(HOSTCC_FLOW_ATTN);
5309 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
5310 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
5311 real_error = true;
5314 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
5315 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
5316 real_error = true;
5319 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
5320 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
5321 real_error = true;
5324 if (!real_error)
5325 return;
5327 tg3_dump_state(tp);
5329 tg3_flag_set(tp, ERROR_PROCESSED);
5330 schedule_work(&tp->reset_task);
5333 static int tg3_poll(struct napi_struct *napi, int budget)
5335 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5336 struct tg3 *tp = tnapi->tp;
5337 int work_done = 0;
5338 struct tg3_hw_status *sblk = tnapi->hw_status;
5340 while (1) {
5341 if (sblk->status & SD_STATUS_ERROR)
5342 tg3_process_error(tp);
5344 tg3_poll_link(tp);
5346 work_done = tg3_poll_work(tnapi, work_done, budget);
5348 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5349 goto tx_recovery;
5351 if (unlikely(work_done >= budget))
5352 break;
5354 if (tg3_flag(tp, TAGGED_STATUS)) {
5355 /* tp->last_tag is used in tg3_int_reenable() below
5356 * to tell the hw how much work has been processed,
5357 * so we must read it before checking for more work.
5359 tnapi->last_tag = sblk->status_tag;
5360 tnapi->last_irq_tag = tnapi->last_tag;
5361 rmb();
5362 } else
5363 sblk->status &= ~SD_STATUS_UPDATED;
5365 if (likely(!tg3_has_work(tnapi))) {
5366 napi_complete(napi);
5367 tg3_int_reenable(tnapi);
5368 break;
5372 return work_done;
5374 tx_recovery:
5375 /* work_done is guaranteed to be less than budget. */
5376 napi_complete(napi);
5377 schedule_work(&tp->reset_task);
5378 return work_done;
5381 static void tg3_napi_disable(struct tg3 *tp)
5383 int i;
5385 for (i = tp->irq_cnt - 1; i >= 0; i--)
5386 napi_disable(&tp->napi[i].napi);
5389 static void tg3_napi_enable(struct tg3 *tp)
5391 int i;
5393 for (i = 0; i < tp->irq_cnt; i++)
5394 napi_enable(&tp->napi[i].napi);
5397 static void tg3_napi_init(struct tg3 *tp)
5399 int i;
5401 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
5402 for (i = 1; i < tp->irq_cnt; i++)
5403 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
5406 static void tg3_napi_fini(struct tg3 *tp)
5408 int i;
5410 for (i = 0; i < tp->irq_cnt; i++)
5411 netif_napi_del(&tp->napi[i].napi);
5414 static inline void tg3_netif_stop(struct tg3 *tp)
5416 tp->dev->trans_start = jiffies; /* prevent tx timeout */
5417 tg3_napi_disable(tp);
5418 netif_tx_disable(tp->dev);
5421 static inline void tg3_netif_start(struct tg3 *tp)
5423 /* NOTE: unconditional netif_tx_wake_all_queues is only
5424 * appropriate so long as all callers are assured to
5425 * have free tx slots (such as after tg3_init_hw)
5427 netif_tx_wake_all_queues(tp->dev);
5429 tg3_napi_enable(tp);
5430 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
5431 tg3_enable_ints(tp);
5434 static void tg3_irq_quiesce(struct tg3 *tp)
5436 int i;
5438 BUG_ON(tp->irq_sync);
5440 tp->irq_sync = 1;
5441 smp_mb();
5443 for (i = 0; i < tp->irq_cnt; i++)
5444 synchronize_irq(tp->napi[i].irq_vec);
5447 /* Fully shutdown all tg3 driver activity elsewhere in the system.
5448 * If irq_sync is non-zero, then the IRQ handler must be synchronized
5449 * with as well. Most of the time, this is not necessary except when
5450 * shutting down the device.
5452 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
5454 spin_lock_bh(&tp->lock);
5455 if (irq_sync)
5456 tg3_irq_quiesce(tp);
5459 static inline void tg3_full_unlock(struct tg3 *tp)
5461 spin_unlock_bh(&tp->lock);
5464 /* One-shot MSI handler - Chip automatically disables interrupt
5465 * after sending MSI so driver doesn't have to do it.
5467 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
5469 struct tg3_napi *tnapi = dev_id;
5470 struct tg3 *tp = tnapi->tp;
5472 prefetch(tnapi->hw_status);
5473 if (tnapi->rx_rcb)
5474 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5476 if (likely(!tg3_irq_sync(tp)))
5477 napi_schedule(&tnapi->napi);
5479 return IRQ_HANDLED;
5482 /* MSI ISR - No need to check for interrupt sharing and no need to
5483 * flush status block and interrupt mailbox. PCI ordering rules
5484 * guarantee that MSI will arrive after the status block.
5486 static irqreturn_t tg3_msi(int irq, void *dev_id)
5488 struct tg3_napi *tnapi = dev_id;
5489 struct tg3 *tp = tnapi->tp;
5491 prefetch(tnapi->hw_status);
5492 if (tnapi->rx_rcb)
5493 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5495 * Writing any value to intr-mbox-0 clears PCI INTA# and
5496 * chip-internal interrupt pending events.
5497 * Writing non-zero to intr-mbox-0 additional tells the
5498 * NIC to stop sending us irqs, engaging "in-intr-handler"
5499 * event coalescing.
5501 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5502 if (likely(!tg3_irq_sync(tp)))
5503 napi_schedule(&tnapi->napi);
5505 return IRQ_RETVAL(1);
5508 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
5510 struct tg3_napi *tnapi = dev_id;
5511 struct tg3 *tp = tnapi->tp;
5512 struct tg3_hw_status *sblk = tnapi->hw_status;
5513 unsigned int handled = 1;
5515 /* In INTx mode, it is possible for the interrupt to arrive at
5516 * the CPU before the status block posted prior to the interrupt.
5517 * Reading the PCI State register will confirm whether the
5518 * interrupt is ours and will flush the status block.
5520 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
5521 if (tg3_flag(tp, CHIP_RESETTING) ||
5522 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5523 handled = 0;
5524 goto out;
5529 * Writing any value to intr-mbox-0 clears PCI INTA# and
5530 * chip-internal interrupt pending events.
5531 * Writing non-zero to intr-mbox-0 additional tells the
5532 * NIC to stop sending us irqs, engaging "in-intr-handler"
5533 * event coalescing.
5535 * Flush the mailbox to de-assert the IRQ immediately to prevent
5536 * spurious interrupts. The flush impacts performance but
5537 * excessive spurious interrupts can be worse in some cases.
5539 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5540 if (tg3_irq_sync(tp))
5541 goto out;
5542 sblk->status &= ~SD_STATUS_UPDATED;
5543 if (likely(tg3_has_work(tnapi))) {
5544 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5545 napi_schedule(&tnapi->napi);
5546 } else {
5547 /* No work, shared interrupt perhaps? re-enable
5548 * interrupts, and flush that PCI write
5550 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
5551 0x00000000);
5553 out:
5554 return IRQ_RETVAL(handled);
5557 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
5559 struct tg3_napi *tnapi = dev_id;
5560 struct tg3 *tp = tnapi->tp;
5561 struct tg3_hw_status *sblk = tnapi->hw_status;
5562 unsigned int handled = 1;
5564 /* In INTx mode, it is possible for the interrupt to arrive at
5565 * the CPU before the status block posted prior to the interrupt.
5566 * Reading the PCI State register will confirm whether the
5567 * interrupt is ours and will flush the status block.
5569 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
5570 if (tg3_flag(tp, CHIP_RESETTING) ||
5571 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5572 handled = 0;
5573 goto out;
5578 * writing any value to intr-mbox-0 clears PCI INTA# and
5579 * chip-internal interrupt pending events.
5580 * writing non-zero to intr-mbox-0 additional tells the
5581 * NIC to stop sending us irqs, engaging "in-intr-handler"
5582 * event coalescing.
5584 * Flush the mailbox to de-assert the IRQ immediately to prevent
5585 * spurious interrupts. The flush impacts performance but
5586 * excessive spurious interrupts can be worse in some cases.
5588 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5591 * In a shared interrupt configuration, sometimes other devices'
5592 * interrupts will scream. We record the current status tag here
5593 * so that the above check can report that the screaming interrupts
5594 * are unhandled. Eventually they will be silenced.
5596 tnapi->last_irq_tag = sblk->status_tag;
5598 if (tg3_irq_sync(tp))
5599 goto out;
5601 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5603 napi_schedule(&tnapi->napi);
5605 out:
5606 return IRQ_RETVAL(handled);
5609 /* ISR for interrupt test */
5610 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
5612 struct tg3_napi *tnapi = dev_id;
5613 struct tg3 *tp = tnapi->tp;
5614 struct tg3_hw_status *sblk = tnapi->hw_status;
5616 if ((sblk->status & SD_STATUS_UPDATED) ||
5617 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5618 tg3_disable_ints(tp);
5619 return IRQ_RETVAL(1);
5621 return IRQ_RETVAL(0);
5624 static int tg3_init_hw(struct tg3 *, int);
5625 static int tg3_halt(struct tg3 *, int, int);
5627 /* Restart hardware after configuration changes, self-test, etc.
5628 * Invoked with tp->lock held.
5630 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
5631 __releases(tp->lock)
5632 __acquires(tp->lock)
5634 int err;
5636 err = tg3_init_hw(tp, reset_phy);
5637 if (err) {
5638 netdev_err(tp->dev,
5639 "Failed to re-initialize device, aborting\n");
5640 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5641 tg3_full_unlock(tp);
5642 del_timer_sync(&tp->timer);
5643 tp->irq_sync = 0;
5644 tg3_napi_enable(tp);
5645 dev_close(tp->dev);
5646 tg3_full_lock(tp, 0);
5648 return err;
5651 #ifdef CONFIG_NET_POLL_CONTROLLER
5652 static void tg3_poll_controller(struct net_device *dev)
5654 int i;
5655 struct tg3 *tp = netdev_priv(dev);
5657 for (i = 0; i < tp->irq_cnt; i++)
5658 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
5660 #endif
5662 static void tg3_reset_task(struct work_struct *work)
5664 struct tg3 *tp = container_of(work, struct tg3, reset_task);
5665 int err;
5666 unsigned int restart_timer;
5668 tg3_full_lock(tp, 0);
5670 if (!netif_running(tp->dev)) {
5671 tg3_full_unlock(tp);
5672 return;
5675 tg3_full_unlock(tp);
5677 tg3_phy_stop(tp);
5679 tg3_netif_stop(tp);
5681 tg3_full_lock(tp, 1);
5683 restart_timer = tg3_flag(tp, RESTART_TIMER);
5684 tg3_flag_clear(tp, RESTART_TIMER);
5686 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
5687 tp->write32_tx_mbox = tg3_write32_tx_mbox;
5688 tp->write32_rx_mbox = tg3_write_flush_reg32;
5689 tg3_flag_set(tp, MBOX_WRITE_REORDER);
5690 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
5693 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
5694 err = tg3_init_hw(tp, 1);
5695 if (err)
5696 goto out;
5698 tg3_netif_start(tp);
5700 if (restart_timer)
5701 mod_timer(&tp->timer, jiffies + 1);
5703 out:
5704 tg3_full_unlock(tp);
5706 if (!err)
5707 tg3_phy_start(tp);
5710 static void tg3_tx_timeout(struct net_device *dev)
5712 struct tg3 *tp = netdev_priv(dev);
5714 if (netif_msg_tx_err(tp)) {
5715 netdev_err(dev, "transmit timed out, resetting\n");
5716 tg3_dump_state(tp);
5719 schedule_work(&tp->reset_task);
5722 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5723 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5725 u32 base = (u32) mapping & 0xffffffff;
5727 return (base > 0xffffdcc0) && (base + len + 8 < base);
5730 /* Test for DMA addresses > 40-bit */
5731 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5732 int len)
5734 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5735 if (tg3_flag(tp, 40BIT_DMA_BUG))
5736 return ((u64) mapping + len) > DMA_BIT_MASK(40);
5737 return 0;
5738 #else
5739 return 0;
5740 #endif
5743 static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
5744 dma_addr_t mapping, int len, u32 flags,
5745 u32 mss_and_is_end)
5747 struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry];
5748 int is_end = (mss_and_is_end & 0x1);
5749 u32 mss = (mss_and_is_end >> 1);
5750 u32 vlan_tag = 0;
5752 if (is_end)
5753 flags |= TXD_FLAG_END;
5754 if (flags & TXD_FLAG_VLAN) {
5755 vlan_tag = flags >> 16;
5756 flags &= 0xffff;
5758 vlan_tag |= (mss << TXD_MSS_SHIFT);
5760 txd->addr_hi = ((u64) mapping >> 32);
5761 txd->addr_lo = ((u64) mapping & 0xffffffff);
5762 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
5763 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
5766 static void tg3_skb_error_unmap(struct tg3_napi *tnapi,
5767 struct sk_buff *skb, int last)
5769 int i;
5770 u32 entry = tnapi->tx_prod;
5771 struct ring_info *txb = &tnapi->tx_buffers[entry];
5773 pci_unmap_single(tnapi->tp->pdev,
5774 dma_unmap_addr(txb, mapping),
5775 skb_headlen(skb),
5776 PCI_DMA_TODEVICE);
5777 for (i = 0; i <= last; i++) {
5778 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5780 entry = NEXT_TX(entry);
5781 txb = &tnapi->tx_buffers[entry];
5783 pci_unmap_page(tnapi->tp->pdev,
5784 dma_unmap_addr(txb, mapping),
5785 frag->size, PCI_DMA_TODEVICE);
5789 /* Workaround 4GB and 40-bit hardware DMA bugs. */
5790 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5791 struct sk_buff *skb,
5792 u32 base_flags, u32 mss)
5794 struct tg3 *tp = tnapi->tp;
5795 struct sk_buff *new_skb;
5796 dma_addr_t new_addr = 0;
5797 u32 entry = tnapi->tx_prod;
5798 int ret = 0;
5800 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
5801 new_skb = skb_copy(skb, GFP_ATOMIC);
5802 else {
5803 int more_headroom = 4 - ((unsigned long)skb->data & 3);
5805 new_skb = skb_copy_expand(skb,
5806 skb_headroom(skb) + more_headroom,
5807 skb_tailroom(skb), GFP_ATOMIC);
5810 if (!new_skb) {
5811 ret = -1;
5812 } else {
5813 /* New SKB is guaranteed to be linear. */
5814 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
5815 PCI_DMA_TODEVICE);
5816 /* Make sure the mapping succeeded */
5817 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
5818 ret = -1;
5819 dev_kfree_skb(new_skb);
5821 /* Make sure new skb does not cross any 4G boundaries.
5822 * Drop the packet if it does.
5824 } else if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
5825 tg3_4g_overflow_test(new_addr, new_skb->len)) {
5826 pci_unmap_single(tp->pdev, new_addr, new_skb->len,
5827 PCI_DMA_TODEVICE);
5828 ret = -1;
5829 dev_kfree_skb(new_skb);
5830 } else {
5831 tnapi->tx_buffers[entry].skb = new_skb;
5832 dma_unmap_addr_set(&tnapi->tx_buffers[entry],
5833 mapping, new_addr);
5835 tg3_set_txd(tnapi, entry, new_addr, new_skb->len,
5836 base_flags, 1 | (mss << 1));
5840 dev_kfree_skb(skb);
5842 return ret;
5845 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
5847 /* Use GSO to workaround a rare TSO bug that may be triggered when the
5848 * TSO header is greater than 80 bytes.
5850 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
5852 struct sk_buff *segs, *nskb;
5853 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
5855 /* Estimate the number of fragments in the worst case */
5856 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
5857 netif_stop_queue(tp->dev);
5859 /* netif_tx_stop_queue() must be done before checking
5860 * checking tx index in tg3_tx_avail() below, because in
5861 * tg3_tx(), we update tx index before checking for
5862 * netif_tx_queue_stopped().
5864 smp_mb();
5865 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
5866 return NETDEV_TX_BUSY;
5868 netif_wake_queue(tp->dev);
5871 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
5872 if (IS_ERR(segs))
5873 goto tg3_tso_bug_end;
5875 do {
5876 nskb = segs;
5877 segs = segs->next;
5878 nskb->next = NULL;
5879 tg3_start_xmit(nskb, tp->dev);
5880 } while (segs);
5882 tg3_tso_bug_end:
5883 dev_kfree_skb(skb);
5885 return NETDEV_TX_OK;
5888 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
5889 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
5891 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
5893 struct tg3 *tp = netdev_priv(dev);
5894 u32 len, entry, base_flags, mss;
5895 int i = -1, would_hit_hwbug;
5896 dma_addr_t mapping;
5897 struct tg3_napi *tnapi;
5898 struct netdev_queue *txq;
5899 unsigned int last;
5901 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5902 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5903 if (tg3_flag(tp, ENABLE_TSS))
5904 tnapi++;
5906 /* We are running in BH disabled context with netif_tx_lock
5907 * and TX reclaim runs via tp->napi.poll inside of a software
5908 * interrupt. Furthermore, IRQ processing runs lockless so we have
5909 * no IRQ context deadlocks to worry about either. Rejoice!
5911 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5912 if (!netif_tx_queue_stopped(txq)) {
5913 netif_tx_stop_queue(txq);
5915 /* This is a hard error, log it. */
5916 netdev_err(dev,
5917 "BUG! Tx Ring full when queue awake!\n");
5919 return NETDEV_TX_BUSY;
5922 entry = tnapi->tx_prod;
5923 base_flags = 0;
5924 if (skb->ip_summed == CHECKSUM_PARTIAL)
5925 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5927 mss = skb_shinfo(skb)->gso_size;
5928 if (mss) {
5929 struct iphdr *iph;
5930 u32 tcp_opt_len, hdr_len;
5932 if (skb_header_cloned(skb) &&
5933 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5934 dev_kfree_skb(skb);
5935 goto out_unlock;
5938 iph = ip_hdr(skb);
5939 tcp_opt_len = tcp_optlen(skb);
5941 if (skb_is_gso_v6(skb)) {
5942 hdr_len = skb_headlen(skb) - ETH_HLEN;
5943 } else {
5944 u32 ip_tcp_len;
5946 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5947 hdr_len = ip_tcp_len + tcp_opt_len;
5949 iph->check = 0;
5950 iph->tot_len = htons(mss + hdr_len);
5953 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
5954 tg3_flag(tp, TSO_BUG))
5955 return tg3_tso_bug(tp, skb);
5957 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5958 TXD_FLAG_CPU_POST_DMA);
5960 if (tg3_flag(tp, HW_TSO_1) ||
5961 tg3_flag(tp, HW_TSO_2) ||
5962 tg3_flag(tp, HW_TSO_3)) {
5963 tcp_hdr(skb)->check = 0;
5964 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
5965 } else
5966 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5967 iph->daddr, 0,
5968 IPPROTO_TCP,
5971 if (tg3_flag(tp, HW_TSO_3)) {
5972 mss |= (hdr_len & 0xc) << 12;
5973 if (hdr_len & 0x10)
5974 base_flags |= 0x00000010;
5975 base_flags |= (hdr_len & 0x3e0) << 5;
5976 } else if (tg3_flag(tp, HW_TSO_2))
5977 mss |= hdr_len << 9;
5978 else if (tg3_flag(tp, HW_TSO_1) ||
5979 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5980 if (tcp_opt_len || iph->ihl > 5) {
5981 int tsflags;
5983 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5984 mss |= (tsflags << 11);
5986 } else {
5987 if (tcp_opt_len || iph->ihl > 5) {
5988 int tsflags;
5990 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5991 base_flags |= tsflags << 12;
5996 if (vlan_tx_tag_present(skb))
5997 base_flags |= (TXD_FLAG_VLAN |
5998 (vlan_tx_tag_get(skb) << 16));
6000 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6001 !mss && skb->len > VLAN_ETH_FRAME_LEN)
6002 base_flags |= TXD_FLAG_JMB_PKT;
6004 len = skb_headlen(skb);
6006 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6007 if (pci_dma_mapping_error(tp->pdev, mapping)) {
6008 dev_kfree_skb(skb);
6009 goto out_unlock;
6012 tnapi->tx_buffers[entry].skb = skb;
6013 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6015 would_hit_hwbug = 0;
6017 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6018 would_hit_hwbug = 1;
6020 if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
6021 tg3_4g_overflow_test(mapping, len))
6022 would_hit_hwbug = 1;
6024 if (tg3_flag(tp, 40BIT_DMA_LIMIT_BUG) &&
6025 tg3_40bit_overflow_test(tp, mapping, len))
6026 would_hit_hwbug = 1;
6028 if (tg3_flag(tp, 5701_DMA_BUG))
6029 would_hit_hwbug = 1;
6031 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
6032 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
6034 entry = NEXT_TX(entry);
6036 /* Now loop through additional data fragments, and queue them. */
6037 if (skb_shinfo(skb)->nr_frags > 0) {
6038 last = skb_shinfo(skb)->nr_frags - 1;
6039 for (i = 0; i <= last; i++) {
6040 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6042 len = frag->size;
6043 mapping = pci_map_page(tp->pdev,
6044 frag->page,
6045 frag->page_offset,
6046 len, PCI_DMA_TODEVICE);
6048 tnapi->tx_buffers[entry].skb = NULL;
6049 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6050 mapping);
6051 if (pci_dma_mapping_error(tp->pdev, mapping))
6052 goto dma_error;
6054 if (tg3_flag(tp, SHORT_DMA_BUG) &&
6055 len <= 8)
6056 would_hit_hwbug = 1;
6058 if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
6059 tg3_4g_overflow_test(mapping, len))
6060 would_hit_hwbug = 1;
6062 if (tg3_flag(tp, 40BIT_DMA_LIMIT_BUG) &&
6063 tg3_40bit_overflow_test(tp, mapping, len))
6064 would_hit_hwbug = 1;
6066 if (tg3_flag(tp, HW_TSO_1) ||
6067 tg3_flag(tp, HW_TSO_2) ||
6068 tg3_flag(tp, HW_TSO_3))
6069 tg3_set_txd(tnapi, entry, mapping, len,
6070 base_flags, (i == last)|(mss << 1));
6071 else
6072 tg3_set_txd(tnapi, entry, mapping, len,
6073 base_flags, (i == last));
6075 entry = NEXT_TX(entry);
6079 if (would_hit_hwbug) {
6080 tg3_skb_error_unmap(tnapi, skb, i);
6082 /* If the workaround fails due to memory/mapping
6083 * failure, silently drop this packet.
6085 if (tigon3_dma_hwbug_workaround(tnapi, skb, base_flags, mss))
6086 goto out_unlock;
6088 entry = NEXT_TX(tnapi->tx_prod);
6091 /* Packets are ready, update Tx producer idx local and on card. */
6092 tw32_tx_mbox(tnapi->prodmbox, entry);
6094 tnapi->tx_prod = entry;
6095 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
6096 netif_tx_stop_queue(txq);
6098 /* netif_tx_stop_queue() must be done before checking
6099 * checking tx index in tg3_tx_avail() below, because in
6100 * tg3_tx(), we update tx index before checking for
6101 * netif_tx_queue_stopped().
6103 smp_mb();
6104 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
6105 netif_tx_wake_queue(txq);
6108 out_unlock:
6109 mmiowb();
6111 return NETDEV_TX_OK;
6113 dma_error:
6114 tg3_skb_error_unmap(tnapi, skb, i);
6115 dev_kfree_skb(skb);
6116 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
6117 return NETDEV_TX_OK;
6120 static void tg3_set_loopback(struct net_device *dev, u32 features)
6122 struct tg3 *tp = netdev_priv(dev);
6124 if (features & NETIF_F_LOOPBACK) {
6125 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
6126 return;
6129 * Clear MAC_MODE_HALF_DUPLEX or you won't get packets back in
6130 * loopback mode if Half-Duplex mode was negotiated earlier.
6132 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
6134 /* Enable internal MAC loopback mode */
6135 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
6136 spin_lock_bh(&tp->lock);
6137 tw32(MAC_MODE, tp->mac_mode);
6138 netif_carrier_on(tp->dev);
6139 spin_unlock_bh(&tp->lock);
6140 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
6141 } else {
6142 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
6143 return;
6145 /* Disable internal MAC loopback mode */
6146 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
6147 spin_lock_bh(&tp->lock);
6148 tw32(MAC_MODE, tp->mac_mode);
6149 /* Force link status check */
6150 tg3_setup_phy(tp, 1);
6151 spin_unlock_bh(&tp->lock);
6152 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
6156 static u32 tg3_fix_features(struct net_device *dev, u32 features)
6158 struct tg3 *tp = netdev_priv(dev);
6160 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
6161 features &= ~NETIF_F_ALL_TSO;
6163 return features;
6166 static int tg3_set_features(struct net_device *dev, u32 features)
6168 u32 changed = dev->features ^ features;
6170 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
6171 tg3_set_loopback(dev, features);
6173 return 0;
6176 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
6177 int new_mtu)
6179 dev->mtu = new_mtu;
6181 if (new_mtu > ETH_DATA_LEN) {
6182 if (tg3_flag(tp, 5780_CLASS)) {
6183 netdev_update_features(dev);
6184 tg3_flag_clear(tp, TSO_CAPABLE);
6185 } else {
6186 tg3_flag_set(tp, JUMBO_RING_ENABLE);
6188 } else {
6189 if (tg3_flag(tp, 5780_CLASS)) {
6190 tg3_flag_set(tp, TSO_CAPABLE);
6191 netdev_update_features(dev);
6193 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
6197 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
6199 struct tg3 *tp = netdev_priv(dev);
6200 int err;
6202 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
6203 return -EINVAL;
6205 if (!netif_running(dev)) {
6206 /* We'll just catch it later when the
6207 * device is up'd.
6209 tg3_set_mtu(dev, tp, new_mtu);
6210 return 0;
6213 tg3_phy_stop(tp);
6215 tg3_netif_stop(tp);
6217 tg3_full_lock(tp, 1);
6219 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6221 tg3_set_mtu(dev, tp, new_mtu);
6223 err = tg3_restart_hw(tp, 0);
6225 if (!err)
6226 tg3_netif_start(tp);
6228 tg3_full_unlock(tp);
6230 if (!err)
6231 tg3_phy_start(tp);
6233 return err;
6236 static void tg3_rx_prodring_free(struct tg3 *tp,
6237 struct tg3_rx_prodring_set *tpr)
6239 int i;
6241 if (tpr != &tp->napi[0].prodring) {
6242 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
6243 i = (i + 1) & tp->rx_std_ring_mask)
6244 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6245 tp->rx_pkt_map_sz);
6247 if (tg3_flag(tp, JUMBO_CAPABLE)) {
6248 for (i = tpr->rx_jmb_cons_idx;
6249 i != tpr->rx_jmb_prod_idx;
6250 i = (i + 1) & tp->rx_jmb_ring_mask) {
6251 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6252 TG3_RX_JMB_MAP_SZ);
6256 return;
6259 for (i = 0; i <= tp->rx_std_ring_mask; i++)
6260 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6261 tp->rx_pkt_map_sz);
6263 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6264 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
6265 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6266 TG3_RX_JMB_MAP_SZ);
6270 /* Initialize rx rings for packet processing.
6272 * The chip has been shut down and the driver detached from
6273 * the networking, so no interrupts or new tx packets will
6274 * end up in the driver. tp->{tx,}lock are held and thus
6275 * we may not sleep.
6277 static int tg3_rx_prodring_alloc(struct tg3 *tp,
6278 struct tg3_rx_prodring_set *tpr)
6280 u32 i, rx_pkt_dma_sz;
6282 tpr->rx_std_cons_idx = 0;
6283 tpr->rx_std_prod_idx = 0;
6284 tpr->rx_jmb_cons_idx = 0;
6285 tpr->rx_jmb_prod_idx = 0;
6287 if (tpr != &tp->napi[0].prodring) {
6288 memset(&tpr->rx_std_buffers[0], 0,
6289 TG3_RX_STD_BUFF_RING_SIZE(tp));
6290 if (tpr->rx_jmb_buffers)
6291 memset(&tpr->rx_jmb_buffers[0], 0,
6292 TG3_RX_JMB_BUFF_RING_SIZE(tp));
6293 goto done;
6296 /* Zero out all descriptors. */
6297 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
6299 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
6300 if (tg3_flag(tp, 5780_CLASS) &&
6301 tp->dev->mtu > ETH_DATA_LEN)
6302 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
6303 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
6305 /* Initialize invariants of the rings, we only set this
6306 * stuff once. This works because the card does not
6307 * write into the rx buffer posting rings.
6309 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
6310 struct tg3_rx_buffer_desc *rxd;
6312 rxd = &tpr->rx_std[i];
6313 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
6314 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
6315 rxd->opaque = (RXD_OPAQUE_RING_STD |
6316 (i << RXD_OPAQUE_INDEX_SHIFT));
6319 /* Now allocate fresh SKBs for each rx ring. */
6320 for (i = 0; i < tp->rx_pending; i++) {
6321 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
6322 netdev_warn(tp->dev,
6323 "Using a smaller RX standard ring. Only "
6324 "%d out of %d buffers were allocated "
6325 "successfully\n", i, tp->rx_pending);
6326 if (i == 0)
6327 goto initfail;
6328 tp->rx_pending = i;
6329 break;
6333 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
6334 goto done;
6336 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
6338 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
6339 goto done;
6341 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
6342 struct tg3_rx_buffer_desc *rxd;
6344 rxd = &tpr->rx_jmb[i].std;
6345 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
6346 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
6347 RXD_FLAG_JUMBO;
6348 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
6349 (i << RXD_OPAQUE_INDEX_SHIFT));
6352 for (i = 0; i < tp->rx_jumbo_pending; i++) {
6353 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
6354 netdev_warn(tp->dev,
6355 "Using a smaller RX jumbo ring. Only %d "
6356 "out of %d buffers were allocated "
6357 "successfully\n", i, tp->rx_jumbo_pending);
6358 if (i == 0)
6359 goto initfail;
6360 tp->rx_jumbo_pending = i;
6361 break;
6365 done:
6366 return 0;
6368 initfail:
6369 tg3_rx_prodring_free(tp, tpr);
6370 return -ENOMEM;
6373 static void tg3_rx_prodring_fini(struct tg3 *tp,
6374 struct tg3_rx_prodring_set *tpr)
6376 kfree(tpr->rx_std_buffers);
6377 tpr->rx_std_buffers = NULL;
6378 kfree(tpr->rx_jmb_buffers);
6379 tpr->rx_jmb_buffers = NULL;
6380 if (tpr->rx_std) {
6381 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
6382 tpr->rx_std, tpr->rx_std_mapping);
6383 tpr->rx_std = NULL;
6385 if (tpr->rx_jmb) {
6386 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
6387 tpr->rx_jmb, tpr->rx_jmb_mapping);
6388 tpr->rx_jmb = NULL;
6392 static int tg3_rx_prodring_init(struct tg3 *tp,
6393 struct tg3_rx_prodring_set *tpr)
6395 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
6396 GFP_KERNEL);
6397 if (!tpr->rx_std_buffers)
6398 return -ENOMEM;
6400 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
6401 TG3_RX_STD_RING_BYTES(tp),
6402 &tpr->rx_std_mapping,
6403 GFP_KERNEL);
6404 if (!tpr->rx_std)
6405 goto err_out;
6407 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6408 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
6409 GFP_KERNEL);
6410 if (!tpr->rx_jmb_buffers)
6411 goto err_out;
6413 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
6414 TG3_RX_JMB_RING_BYTES(tp),
6415 &tpr->rx_jmb_mapping,
6416 GFP_KERNEL);
6417 if (!tpr->rx_jmb)
6418 goto err_out;
6421 return 0;
6423 err_out:
6424 tg3_rx_prodring_fini(tp, tpr);
6425 return -ENOMEM;
6428 /* Free up pending packets in all rx/tx rings.
6430 * The chip has been shut down and the driver detached from
6431 * the networking, so no interrupts or new tx packets will
6432 * end up in the driver. tp->{tx,}lock is not held and we are not
6433 * in an interrupt context and thus may sleep.
6435 static void tg3_free_rings(struct tg3 *tp)
6437 int i, j;
6439 for (j = 0; j < tp->irq_cnt; j++) {
6440 struct tg3_napi *tnapi = &tp->napi[j];
6442 tg3_rx_prodring_free(tp, &tnapi->prodring);
6444 if (!tnapi->tx_buffers)
6445 continue;
6447 for (i = 0; i < TG3_TX_RING_SIZE; ) {
6448 struct ring_info *txp;
6449 struct sk_buff *skb;
6450 unsigned int k;
6452 txp = &tnapi->tx_buffers[i];
6453 skb = txp->skb;
6455 if (skb == NULL) {
6456 i++;
6457 continue;
6460 pci_unmap_single(tp->pdev,
6461 dma_unmap_addr(txp, mapping),
6462 skb_headlen(skb),
6463 PCI_DMA_TODEVICE);
6464 txp->skb = NULL;
6466 i++;
6468 for (k = 0; k < skb_shinfo(skb)->nr_frags; k++) {
6469 txp = &tnapi->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
6470 pci_unmap_page(tp->pdev,
6471 dma_unmap_addr(txp, mapping),
6472 skb_shinfo(skb)->frags[k].size,
6473 PCI_DMA_TODEVICE);
6474 i++;
6477 dev_kfree_skb_any(skb);
6482 /* Initialize tx/rx rings for packet processing.
6484 * The chip has been shut down and the driver detached from
6485 * the networking, so no interrupts or new tx packets will
6486 * end up in the driver. tp->{tx,}lock are held and thus
6487 * we may not sleep.
6489 static int tg3_init_rings(struct tg3 *tp)
6491 int i;
6493 /* Free up all the SKBs. */
6494 tg3_free_rings(tp);
6496 for (i = 0; i < tp->irq_cnt; i++) {
6497 struct tg3_napi *tnapi = &tp->napi[i];
6499 tnapi->last_tag = 0;
6500 tnapi->last_irq_tag = 0;
6501 tnapi->hw_status->status = 0;
6502 tnapi->hw_status->status_tag = 0;
6503 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6505 tnapi->tx_prod = 0;
6506 tnapi->tx_cons = 0;
6507 if (tnapi->tx_ring)
6508 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
6510 tnapi->rx_rcb_ptr = 0;
6511 if (tnapi->rx_rcb)
6512 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6514 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
6515 tg3_free_rings(tp);
6516 return -ENOMEM;
6520 return 0;
6524 * Must not be invoked with interrupt sources disabled and
6525 * the hardware shutdown down.
6527 static void tg3_free_consistent(struct tg3 *tp)
6529 int i;
6531 for (i = 0; i < tp->irq_cnt; i++) {
6532 struct tg3_napi *tnapi = &tp->napi[i];
6534 if (tnapi->tx_ring) {
6535 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
6536 tnapi->tx_ring, tnapi->tx_desc_mapping);
6537 tnapi->tx_ring = NULL;
6540 kfree(tnapi->tx_buffers);
6541 tnapi->tx_buffers = NULL;
6543 if (tnapi->rx_rcb) {
6544 dma_free_coherent(&tp->pdev->dev,
6545 TG3_RX_RCB_RING_BYTES(tp),
6546 tnapi->rx_rcb,
6547 tnapi->rx_rcb_mapping);
6548 tnapi->rx_rcb = NULL;
6551 tg3_rx_prodring_fini(tp, &tnapi->prodring);
6553 if (tnapi->hw_status) {
6554 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
6555 tnapi->hw_status,
6556 tnapi->status_mapping);
6557 tnapi->hw_status = NULL;
6561 if (tp->hw_stats) {
6562 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
6563 tp->hw_stats, tp->stats_mapping);
6564 tp->hw_stats = NULL;
6569 * Must not be invoked with interrupt sources disabled and
6570 * the hardware shutdown down. Can sleep.
6572 static int tg3_alloc_consistent(struct tg3 *tp)
6574 int i;
6576 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
6577 sizeof(struct tg3_hw_stats),
6578 &tp->stats_mapping,
6579 GFP_KERNEL);
6580 if (!tp->hw_stats)
6581 goto err_out;
6583 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6585 for (i = 0; i < tp->irq_cnt; i++) {
6586 struct tg3_napi *tnapi = &tp->napi[i];
6587 struct tg3_hw_status *sblk;
6589 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
6590 TG3_HW_STATUS_SIZE,
6591 &tnapi->status_mapping,
6592 GFP_KERNEL);
6593 if (!tnapi->hw_status)
6594 goto err_out;
6596 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6597 sblk = tnapi->hw_status;
6599 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
6600 goto err_out;
6602 /* If multivector TSS is enabled, vector 0 does not handle
6603 * tx interrupts. Don't allocate any resources for it.
6605 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
6606 (i && tg3_flag(tp, ENABLE_TSS))) {
6607 tnapi->tx_buffers = kzalloc(sizeof(struct ring_info) *
6608 TG3_TX_RING_SIZE,
6609 GFP_KERNEL);
6610 if (!tnapi->tx_buffers)
6611 goto err_out;
6613 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
6614 TG3_TX_RING_BYTES,
6615 &tnapi->tx_desc_mapping,
6616 GFP_KERNEL);
6617 if (!tnapi->tx_ring)
6618 goto err_out;
6622 * When RSS is enabled, the status block format changes
6623 * slightly. The "rx_jumbo_consumer", "reserved",
6624 * and "rx_mini_consumer" members get mapped to the
6625 * other three rx return ring producer indexes.
6627 switch (i) {
6628 default:
6629 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
6630 break;
6631 case 2:
6632 tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
6633 break;
6634 case 3:
6635 tnapi->rx_rcb_prod_idx = &sblk->reserved;
6636 break;
6637 case 4:
6638 tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
6639 break;
6643 * If multivector RSS is enabled, vector 0 does not handle
6644 * rx or tx interrupts. Don't allocate any resources for it.
6646 if (!i && tg3_flag(tp, ENABLE_RSS))
6647 continue;
6649 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
6650 TG3_RX_RCB_RING_BYTES(tp),
6651 &tnapi->rx_rcb_mapping,
6652 GFP_KERNEL);
6653 if (!tnapi->rx_rcb)
6654 goto err_out;
6656 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6659 return 0;
6661 err_out:
6662 tg3_free_consistent(tp);
6663 return -ENOMEM;
6666 #define MAX_WAIT_CNT 1000
6668 /* To stop a block, clear the enable bit and poll till it
6669 * clears. tp->lock is held.
6671 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
6673 unsigned int i;
6674 u32 val;
6676 if (tg3_flag(tp, 5705_PLUS)) {
6677 switch (ofs) {
6678 case RCVLSC_MODE:
6679 case DMAC_MODE:
6680 case MBFREE_MODE:
6681 case BUFMGR_MODE:
6682 case MEMARB_MODE:
6683 /* We can't enable/disable these bits of the
6684 * 5705/5750, just say success.
6686 return 0;
6688 default:
6689 break;
6693 val = tr32(ofs);
6694 val &= ~enable_bit;
6695 tw32_f(ofs, val);
6697 for (i = 0; i < MAX_WAIT_CNT; i++) {
6698 udelay(100);
6699 val = tr32(ofs);
6700 if ((val & enable_bit) == 0)
6701 break;
6704 if (i == MAX_WAIT_CNT && !silent) {
6705 dev_err(&tp->pdev->dev,
6706 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
6707 ofs, enable_bit);
6708 return -ENODEV;
6711 return 0;
6714 /* tp->lock is held. */
6715 static int tg3_abort_hw(struct tg3 *tp, int silent)
6717 int i, err;
6719 tg3_disable_ints(tp);
6721 tp->rx_mode &= ~RX_MODE_ENABLE;
6722 tw32_f(MAC_RX_MODE, tp->rx_mode);
6723 udelay(10);
6725 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
6726 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
6727 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
6728 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
6729 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
6730 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
6732 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
6733 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
6734 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
6735 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
6736 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
6737 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
6738 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
6740 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
6741 tw32_f(MAC_MODE, tp->mac_mode);
6742 udelay(40);
6744 tp->tx_mode &= ~TX_MODE_ENABLE;
6745 tw32_f(MAC_TX_MODE, tp->tx_mode);
6747 for (i = 0; i < MAX_WAIT_CNT; i++) {
6748 udelay(100);
6749 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
6750 break;
6752 if (i >= MAX_WAIT_CNT) {
6753 dev_err(&tp->pdev->dev,
6754 "%s timed out, TX_MODE_ENABLE will not clear "
6755 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
6756 err |= -ENODEV;
6759 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
6760 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
6761 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
6763 tw32(FTQ_RESET, 0xffffffff);
6764 tw32(FTQ_RESET, 0x00000000);
6766 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
6767 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
6769 for (i = 0; i < tp->irq_cnt; i++) {
6770 struct tg3_napi *tnapi = &tp->napi[i];
6771 if (tnapi->hw_status)
6772 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6774 if (tp->hw_stats)
6775 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6777 return err;
6780 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
6782 int i;
6783 u32 apedata;
6785 /* NCSI does not support APE events */
6786 if (tg3_flag(tp, APE_HAS_NCSI))
6787 return;
6789 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
6790 if (apedata != APE_SEG_SIG_MAGIC)
6791 return;
6793 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
6794 if (!(apedata & APE_FW_STATUS_READY))
6795 return;
6797 /* Wait for up to 1 millisecond for APE to service previous event. */
6798 for (i = 0; i < 10; i++) {
6799 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
6800 return;
6802 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
6804 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6805 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
6806 event | APE_EVENT_STATUS_EVENT_PENDING);
6808 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
6810 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6811 break;
6813 udelay(100);
6816 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6817 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
6820 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
6822 u32 event;
6823 u32 apedata;
6825 if (!tg3_flag(tp, ENABLE_APE))
6826 return;
6828 switch (kind) {
6829 case RESET_KIND_INIT:
6830 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
6831 APE_HOST_SEG_SIG_MAGIC);
6832 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
6833 APE_HOST_SEG_LEN_MAGIC);
6834 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
6835 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
6836 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
6837 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
6838 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
6839 APE_HOST_BEHAV_NO_PHYLOCK);
6840 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
6841 TG3_APE_HOST_DRVR_STATE_START);
6843 event = APE_EVENT_STATUS_STATE_START;
6844 break;
6845 case RESET_KIND_SHUTDOWN:
6846 /* With the interface we are currently using,
6847 * APE does not track driver state. Wiping
6848 * out the HOST SEGMENT SIGNATURE forces
6849 * the APE to assume OS absent status.
6851 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
6853 if (device_may_wakeup(&tp->pdev->dev) &&
6854 tg3_flag(tp, WOL_ENABLE)) {
6855 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
6856 TG3_APE_HOST_WOL_SPEED_AUTO);
6857 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
6858 } else
6859 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
6861 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
6863 event = APE_EVENT_STATUS_STATE_UNLOAD;
6864 break;
6865 case RESET_KIND_SUSPEND:
6866 event = APE_EVENT_STATUS_STATE_SUSPEND;
6867 break;
6868 default:
6869 return;
6872 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
6874 tg3_ape_send_event(tp, event);
6877 /* tp->lock is held. */
6878 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
6880 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
6881 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
6883 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
6884 switch (kind) {
6885 case RESET_KIND_INIT:
6886 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6887 DRV_STATE_START);
6888 break;
6890 case RESET_KIND_SHUTDOWN:
6891 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6892 DRV_STATE_UNLOAD);
6893 break;
6895 case RESET_KIND_SUSPEND:
6896 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6897 DRV_STATE_SUSPEND);
6898 break;
6900 default:
6901 break;
6905 if (kind == RESET_KIND_INIT ||
6906 kind == RESET_KIND_SUSPEND)
6907 tg3_ape_driver_state_change(tp, kind);
6910 /* tp->lock is held. */
6911 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
6913 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
6914 switch (kind) {
6915 case RESET_KIND_INIT:
6916 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6917 DRV_STATE_START_DONE);
6918 break;
6920 case RESET_KIND_SHUTDOWN:
6921 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6922 DRV_STATE_UNLOAD_DONE);
6923 break;
6925 default:
6926 break;
6930 if (kind == RESET_KIND_SHUTDOWN)
6931 tg3_ape_driver_state_change(tp, kind);
6934 /* tp->lock is held. */
6935 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
6937 if (tg3_flag(tp, ENABLE_ASF)) {
6938 switch (kind) {
6939 case RESET_KIND_INIT:
6940 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6941 DRV_STATE_START);
6942 break;
6944 case RESET_KIND_SHUTDOWN:
6945 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6946 DRV_STATE_UNLOAD);
6947 break;
6949 case RESET_KIND_SUSPEND:
6950 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6951 DRV_STATE_SUSPEND);
6952 break;
6954 default:
6955 break;
6960 static int tg3_poll_fw(struct tg3 *tp)
6962 int i;
6963 u32 val;
6965 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6966 /* Wait up to 20ms for init done. */
6967 for (i = 0; i < 200; i++) {
6968 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
6969 return 0;
6970 udelay(100);
6972 return -ENODEV;
6975 /* Wait for firmware initialization to complete. */
6976 for (i = 0; i < 100000; i++) {
6977 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
6978 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
6979 break;
6980 udelay(10);
6983 /* Chip might not be fitted with firmware. Some Sun onboard
6984 * parts are configured like that. So don't signal the timeout
6985 * of the above loop as an error, but do report the lack of
6986 * running firmware once.
6988 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
6989 tg3_flag_set(tp, NO_FWARE_REPORTED);
6991 netdev_info(tp->dev, "No firmware running\n");
6994 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
6995 /* The 57765 A0 needs a little more
6996 * time to do some important work.
6998 mdelay(10);
7001 return 0;
7004 /* Save PCI command register before chip reset */
7005 static void tg3_save_pci_state(struct tg3 *tp)
7007 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7010 /* Restore PCI state after chip reset */
7011 static void tg3_restore_pci_state(struct tg3 *tp)
7013 u32 val;
7015 /* Re-enable indirect register accesses. */
7016 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7017 tp->misc_host_ctrl);
7019 /* Set MAX PCI retry to zero. */
7020 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7021 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7022 tg3_flag(tp, PCIX_MODE))
7023 val |= PCISTATE_RETRY_SAME_DMA;
7024 /* Allow reads and writes to the APE register and memory space. */
7025 if (tg3_flag(tp, ENABLE_APE))
7026 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7027 PCISTATE_ALLOW_APE_SHMEM_WR |
7028 PCISTATE_ALLOW_APE_PSPACE_WR;
7029 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7031 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7033 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
7034 if (tg3_flag(tp, PCI_EXPRESS))
7035 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7036 else {
7037 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7038 tp->pci_cacheline_sz);
7039 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7040 tp->pci_lat_timer);
7044 /* Make sure PCI-X relaxed ordering bit is clear. */
7045 if (tg3_flag(tp, PCIX_MODE)) {
7046 u16 pcix_cmd;
7048 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7049 &pcix_cmd);
7050 pcix_cmd &= ~PCI_X_CMD_ERO;
7051 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7052 pcix_cmd);
7055 if (tg3_flag(tp, 5780_CLASS)) {
7057 /* Chip reset on 5780 will reset MSI enable bit,
7058 * so need to restore it.
7060 if (tg3_flag(tp, USING_MSI)) {
7061 u16 ctrl;
7063 pci_read_config_word(tp->pdev,
7064 tp->msi_cap + PCI_MSI_FLAGS,
7065 &ctrl);
7066 pci_write_config_word(tp->pdev,
7067 tp->msi_cap + PCI_MSI_FLAGS,
7068 ctrl | PCI_MSI_FLAGS_ENABLE);
7069 val = tr32(MSGINT_MODE);
7070 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7075 static void tg3_stop_fw(struct tg3 *);
7077 /* tp->lock is held. */
7078 static int tg3_chip_reset(struct tg3 *tp)
7080 u32 val;
7081 void (*write_op)(struct tg3 *, u32, u32);
7082 int i, err;
7084 tg3_nvram_lock(tp);
7086 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7088 /* No matching tg3_nvram_unlock() after this because
7089 * chip reset below will undo the nvram lock.
7091 tp->nvram_lock_cnt = 0;
7093 /* GRC_MISC_CFG core clock reset will clear the memory
7094 * enable bit in PCI register 4 and the MSI enable bit
7095 * on some chips, so we save relevant registers here.
7097 tg3_save_pci_state(tp);
7099 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7100 tg3_flag(tp, 5755_PLUS))
7101 tw32(GRC_FASTBOOT_PC, 0);
7104 * We must avoid the readl() that normally takes place.
7105 * It locks machines, causes machine checks, and other
7106 * fun things. So, temporarily disable the 5701
7107 * hardware workaround, while we do the reset.
7109 write_op = tp->write32;
7110 if (write_op == tg3_write_flush_reg32)
7111 tp->write32 = tg3_write32;
7113 /* Prevent the irq handler from reading or writing PCI registers
7114 * during chip reset when the memory enable bit in the PCI command
7115 * register may be cleared. The chip does not generate interrupt
7116 * at this time, but the irq handler may still be called due to irq
7117 * sharing or irqpoll.
7119 tg3_flag_set(tp, CHIP_RESETTING);
7120 for (i = 0; i < tp->irq_cnt; i++) {
7121 struct tg3_napi *tnapi = &tp->napi[i];
7122 if (tnapi->hw_status) {
7123 tnapi->hw_status->status = 0;
7124 tnapi->hw_status->status_tag = 0;
7126 tnapi->last_tag = 0;
7127 tnapi->last_irq_tag = 0;
7129 smp_mb();
7131 for (i = 0; i < tp->irq_cnt; i++)
7132 synchronize_irq(tp->napi[i].irq_vec);
7134 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7135 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7136 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7139 /* do the reset */
7140 val = GRC_MISC_CFG_CORECLK_RESET;
7142 if (tg3_flag(tp, PCI_EXPRESS)) {
7143 /* Force PCIe 1.0a mode */
7144 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7145 !tg3_flag(tp, 57765_PLUS) &&
7146 tr32(TG3_PCIE_PHY_TSTCTL) ==
7147 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7148 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7150 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7151 tw32(GRC_MISC_CFG, (1 << 29));
7152 val |= (1 << 29);
7156 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7157 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7158 tw32(GRC_VCPU_EXT_CTRL,
7159 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7162 /* Manage gphy power for all CPMU absent PCIe devices. */
7163 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7164 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7166 tw32(GRC_MISC_CFG, val);
7168 /* restore 5701 hardware bug workaround write method */
7169 tp->write32 = write_op;
7171 /* Unfortunately, we have to delay before the PCI read back.
7172 * Some 575X chips even will not respond to a PCI cfg access
7173 * when the reset command is given to the chip.
7175 * How do these hardware designers expect things to work
7176 * properly if the PCI write is posted for a long period
7177 * of time? It is always necessary to have some method by
7178 * which a register read back can occur to push the write
7179 * out which does the reset.
7181 * For most tg3 variants the trick below was working.
7182 * Ho hum...
7184 udelay(120);
7186 /* Flush PCI posted writes. The normal MMIO registers
7187 * are inaccessible at this time so this is the only
7188 * way to make this reliably (actually, this is no longer
7189 * the case, see above). I tried to use indirect
7190 * register read/write but this upset some 5701 variants.
7192 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7194 udelay(120);
7196 if (tg3_flag(tp, PCI_EXPRESS) && tp->pcie_cap) {
7197 u16 val16;
7199 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7200 int i;
7201 u32 cfg_val;
7203 /* Wait for link training to complete. */
7204 for (i = 0; i < 5000; i++)
7205 udelay(100);
7207 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7208 pci_write_config_dword(tp->pdev, 0xc4,
7209 cfg_val | (1 << 15));
7212 /* Clear the "no snoop" and "relaxed ordering" bits. */
7213 pci_read_config_word(tp->pdev,
7214 tp->pcie_cap + PCI_EXP_DEVCTL,
7215 &val16);
7216 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7217 PCI_EXP_DEVCTL_NOSNOOP_EN);
7219 * Older PCIe devices only support the 128 byte
7220 * MPS setting. Enforce the restriction.
7222 if (!tg3_flag(tp, CPMU_PRESENT))
7223 val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7224 pci_write_config_word(tp->pdev,
7225 tp->pcie_cap + PCI_EXP_DEVCTL,
7226 val16);
7228 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7230 /* Clear error status */
7231 pci_write_config_word(tp->pdev,
7232 tp->pcie_cap + PCI_EXP_DEVSTA,
7233 PCI_EXP_DEVSTA_CED |
7234 PCI_EXP_DEVSTA_NFED |
7235 PCI_EXP_DEVSTA_FED |
7236 PCI_EXP_DEVSTA_URD);
7239 tg3_restore_pci_state(tp);
7241 tg3_flag_clear(tp, CHIP_RESETTING);
7242 tg3_flag_clear(tp, ERROR_PROCESSED);
7244 val = 0;
7245 if (tg3_flag(tp, 5780_CLASS))
7246 val = tr32(MEMARB_MODE);
7247 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7249 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7250 tg3_stop_fw(tp);
7251 tw32(0x5000, 0x400);
7254 tw32(GRC_MODE, tp->grc_mode);
7256 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7257 val = tr32(0xc4);
7259 tw32(0xc4, val | (1 << 15));
7262 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7263 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7264 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7265 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7266 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7267 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7270 if (tg3_flag(tp, ENABLE_APE))
7271 tp->mac_mode = MAC_MODE_APE_TX_EN |
7272 MAC_MODE_APE_RX_EN |
7273 MAC_MODE_TDE_ENABLE;
7275 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7276 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
7277 val = tp->mac_mode;
7278 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7279 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7280 val = tp->mac_mode;
7281 } else
7282 val = 0;
7284 tw32_f(MAC_MODE, val);
7285 udelay(40);
7287 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7289 err = tg3_poll_fw(tp);
7290 if (err)
7291 return err;
7293 tg3_mdio_start(tp);
7295 if (tg3_flag(tp, PCI_EXPRESS) &&
7296 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7297 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7298 !tg3_flag(tp, 57765_PLUS)) {
7299 val = tr32(0x7c00);
7301 tw32(0x7c00, val | (1 << 25));
7304 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
7305 val = tr32(TG3_CPMU_CLCK_ORIDE);
7306 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
7309 /* Reprobe ASF enable state. */
7310 tg3_flag_clear(tp, ENABLE_ASF);
7311 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
7312 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7313 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7314 u32 nic_cfg;
7316 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7317 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7318 tg3_flag_set(tp, ENABLE_ASF);
7319 tp->last_event_jiffies = jiffies;
7320 if (tg3_flag(tp, 5750_PLUS))
7321 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
7325 return 0;
7328 /* tp->lock is held. */
7329 static void tg3_stop_fw(struct tg3 *tp)
7331 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
7332 /* Wait for RX cpu to ACK the previous event. */
7333 tg3_wait_for_event_ack(tp);
7335 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
7337 tg3_generate_fw_event(tp);
7339 /* Wait for RX cpu to ACK this event. */
7340 tg3_wait_for_event_ack(tp);
7344 /* tp->lock is held. */
7345 static int tg3_halt(struct tg3 *tp, int kind, int silent)
7347 int err;
7349 tg3_stop_fw(tp);
7351 tg3_write_sig_pre_reset(tp, kind);
7353 tg3_abort_hw(tp, silent);
7354 err = tg3_chip_reset(tp);
7356 __tg3_set_mac_addr(tp, 0);
7358 tg3_write_sig_legacy(tp, kind);
7359 tg3_write_sig_post_reset(tp, kind);
7361 if (err)
7362 return err;
7364 return 0;
7367 #define RX_CPU_SCRATCH_BASE 0x30000
7368 #define RX_CPU_SCRATCH_SIZE 0x04000
7369 #define TX_CPU_SCRATCH_BASE 0x34000
7370 #define TX_CPU_SCRATCH_SIZE 0x04000
7372 /* tp->lock is held. */
7373 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
7375 int i;
7377 BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
7379 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7380 u32 val = tr32(GRC_VCPU_EXT_CTRL);
7382 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
7383 return 0;
7385 if (offset == RX_CPU_BASE) {
7386 for (i = 0; i < 10000; i++) {
7387 tw32(offset + CPU_STATE, 0xffffffff);
7388 tw32(offset + CPU_MODE, CPU_MODE_HALT);
7389 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7390 break;
7393 tw32(offset + CPU_STATE, 0xffffffff);
7394 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
7395 udelay(10);
7396 } else {
7397 for (i = 0; i < 10000; i++) {
7398 tw32(offset + CPU_STATE, 0xffffffff);
7399 tw32(offset + CPU_MODE, CPU_MODE_HALT);
7400 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7401 break;
7405 if (i >= 10000) {
7406 netdev_err(tp->dev, "%s timed out, %s CPU\n",
7407 __func__, offset == RX_CPU_BASE ? "RX" : "TX");
7408 return -ENODEV;
7411 /* Clear firmware's nvram arbitration. */
7412 if (tg3_flag(tp, NVRAM))
7413 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
7414 return 0;
7417 struct fw_info {
7418 unsigned int fw_base;
7419 unsigned int fw_len;
7420 const __be32 *fw_data;
7423 /* tp->lock is held. */
7424 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
7425 int cpu_scratch_size, struct fw_info *info)
7427 int err, lock_err, i;
7428 void (*write_op)(struct tg3 *, u32, u32);
7430 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
7431 netdev_err(tp->dev,
7432 "%s: Trying to load TX cpu firmware which is 5705\n",
7433 __func__);
7434 return -EINVAL;
7437 if (tg3_flag(tp, 5705_PLUS))
7438 write_op = tg3_write_mem;
7439 else
7440 write_op = tg3_write_indirect_reg32;
7442 /* It is possible that bootcode is still loading at this point.
7443 * Get the nvram lock first before halting the cpu.
7445 lock_err = tg3_nvram_lock(tp);
7446 err = tg3_halt_cpu(tp, cpu_base);
7447 if (!lock_err)
7448 tg3_nvram_unlock(tp);
7449 if (err)
7450 goto out;
7452 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
7453 write_op(tp, cpu_scratch_base + i, 0);
7454 tw32(cpu_base + CPU_STATE, 0xffffffff);
7455 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
7456 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
7457 write_op(tp, (cpu_scratch_base +
7458 (info->fw_base & 0xffff) +
7459 (i * sizeof(u32))),
7460 be32_to_cpu(info->fw_data[i]));
7462 err = 0;
7464 out:
7465 return err;
7468 /* tp->lock is held. */
7469 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
7471 struct fw_info info;
7472 const __be32 *fw_data;
7473 int err, i;
7475 fw_data = (void *)tp->fw->data;
7477 /* Firmware blob starts with version numbers, followed by
7478 start address and length. We are setting complete length.
7479 length = end_address_of_bss - start_address_of_text.
7480 Remainder is the blob to be loaded contiguously
7481 from start address. */
7483 info.fw_base = be32_to_cpu(fw_data[1]);
7484 info.fw_len = tp->fw->size - 12;
7485 info.fw_data = &fw_data[3];
7487 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
7488 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
7489 &info);
7490 if (err)
7491 return err;
7493 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
7494 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
7495 &info);
7496 if (err)
7497 return err;
7499 /* Now startup only the RX cpu. */
7500 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7501 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7503 for (i = 0; i < 5; i++) {
7504 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
7505 break;
7506 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7507 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
7508 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7509 udelay(1000);
7511 if (i >= 5) {
7512 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
7513 "should be %08x\n", __func__,
7514 tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
7515 return -ENODEV;
7517 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7518 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
7520 return 0;
7523 /* tp->lock is held. */
7524 static int tg3_load_tso_firmware(struct tg3 *tp)
7526 struct fw_info info;
7527 const __be32 *fw_data;
7528 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
7529 int err, i;
7531 if (tg3_flag(tp, HW_TSO_1) ||
7532 tg3_flag(tp, HW_TSO_2) ||
7533 tg3_flag(tp, HW_TSO_3))
7534 return 0;
7536 fw_data = (void *)tp->fw->data;
7538 /* Firmware blob starts with version numbers, followed by
7539 start address and length. We are setting complete length.
7540 length = end_address_of_bss - start_address_of_text.
7541 Remainder is the blob to be loaded contiguously
7542 from start address. */
7544 info.fw_base = be32_to_cpu(fw_data[1]);
7545 cpu_scratch_size = tp->fw_len;
7546 info.fw_len = tp->fw->size - 12;
7547 info.fw_data = &fw_data[3];
7549 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7550 cpu_base = RX_CPU_BASE;
7551 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
7552 } else {
7553 cpu_base = TX_CPU_BASE;
7554 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
7555 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
7558 err = tg3_load_firmware_cpu(tp, cpu_base,
7559 cpu_scratch_base, cpu_scratch_size,
7560 &info);
7561 if (err)
7562 return err;
7564 /* Now startup the cpu. */
7565 tw32(cpu_base + CPU_STATE, 0xffffffff);
7566 tw32_f(cpu_base + CPU_PC, info.fw_base);
7568 for (i = 0; i < 5; i++) {
7569 if (tr32(cpu_base + CPU_PC) == info.fw_base)
7570 break;
7571 tw32(cpu_base + CPU_STATE, 0xffffffff);
7572 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
7573 tw32_f(cpu_base + CPU_PC, info.fw_base);
7574 udelay(1000);
7576 if (i >= 5) {
7577 netdev_err(tp->dev,
7578 "%s fails to set CPU PC, is %08x should be %08x\n",
7579 __func__, tr32(cpu_base + CPU_PC), info.fw_base);
7580 return -ENODEV;
7582 tw32(cpu_base + CPU_STATE, 0xffffffff);
7583 tw32_f(cpu_base + CPU_MODE, 0x00000000);
7584 return 0;
7588 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7590 struct tg3 *tp = netdev_priv(dev);
7591 struct sockaddr *addr = p;
7592 int err = 0, skip_mac_1 = 0;
7594 if (!is_valid_ether_addr(addr->sa_data))
7595 return -EINVAL;
7597 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7599 if (!netif_running(dev))
7600 return 0;
7602 if (tg3_flag(tp, ENABLE_ASF)) {
7603 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7605 addr0_high = tr32(MAC_ADDR_0_HIGH);
7606 addr0_low = tr32(MAC_ADDR_0_LOW);
7607 addr1_high = tr32(MAC_ADDR_1_HIGH);
7608 addr1_low = tr32(MAC_ADDR_1_LOW);
7610 /* Skip MAC addr 1 if ASF is using it. */
7611 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7612 !(addr1_high == 0 && addr1_low == 0))
7613 skip_mac_1 = 1;
7615 spin_lock_bh(&tp->lock);
7616 __tg3_set_mac_addr(tp, skip_mac_1);
7617 spin_unlock_bh(&tp->lock);
7619 return err;
7622 /* tp->lock is held. */
7623 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7624 dma_addr_t mapping, u32 maxlen_flags,
7625 u32 nic_addr)
7627 tg3_write_mem(tp,
7628 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7629 ((u64) mapping >> 32));
7630 tg3_write_mem(tp,
7631 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7632 ((u64) mapping & 0xffffffff));
7633 tg3_write_mem(tp,
7634 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7635 maxlen_flags);
7637 if (!tg3_flag(tp, 5705_PLUS))
7638 tg3_write_mem(tp,
7639 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7640 nic_addr);
7643 static void __tg3_set_rx_mode(struct net_device *);
7644 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7646 int i;
7648 if (!tg3_flag(tp, ENABLE_TSS)) {
7649 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7650 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7651 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7652 } else {
7653 tw32(HOSTCC_TXCOL_TICKS, 0);
7654 tw32(HOSTCC_TXMAX_FRAMES, 0);
7655 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
7658 if (!tg3_flag(tp, ENABLE_RSS)) {
7659 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7660 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7661 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7662 } else {
7663 tw32(HOSTCC_RXCOL_TICKS, 0);
7664 tw32(HOSTCC_RXMAX_FRAMES, 0);
7665 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
7668 if (!tg3_flag(tp, 5705_PLUS)) {
7669 u32 val = ec->stats_block_coalesce_usecs;
7671 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7672 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7674 if (!netif_carrier_ok(tp->dev))
7675 val = 0;
7677 tw32(HOSTCC_STAT_COAL_TICKS, val);
7680 for (i = 0; i < tp->irq_cnt - 1; i++) {
7681 u32 reg;
7683 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
7684 tw32(reg, ec->rx_coalesce_usecs);
7685 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
7686 tw32(reg, ec->rx_max_coalesced_frames);
7687 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
7688 tw32(reg, ec->rx_max_coalesced_frames_irq);
7690 if (tg3_flag(tp, ENABLE_TSS)) {
7691 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
7692 tw32(reg, ec->tx_coalesce_usecs);
7693 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
7694 tw32(reg, ec->tx_max_coalesced_frames);
7695 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
7696 tw32(reg, ec->tx_max_coalesced_frames_irq);
7700 for (; i < tp->irq_max - 1; i++) {
7701 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
7702 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
7703 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7705 if (tg3_flag(tp, ENABLE_TSS)) {
7706 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
7707 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
7708 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7713 /* tp->lock is held. */
7714 static void tg3_rings_reset(struct tg3 *tp)
7716 int i;
7717 u32 stblk, txrcb, rxrcb, limit;
7718 struct tg3_napi *tnapi = &tp->napi[0];
7720 /* Disable all transmit rings but the first. */
7721 if (!tg3_flag(tp, 5705_PLUS))
7722 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
7723 else if (tg3_flag(tp, 5717_PLUS))
7724 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
7725 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7726 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
7727 else
7728 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7730 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7731 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
7732 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
7733 BDINFO_FLAGS_DISABLED);
7736 /* Disable all receive return rings but the first. */
7737 if (tg3_flag(tp, 5717_PLUS))
7738 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
7739 else if (!tg3_flag(tp, 5705_PLUS))
7740 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
7741 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7742 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7743 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
7744 else
7745 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7747 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7748 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
7749 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
7750 BDINFO_FLAGS_DISABLED);
7752 /* Disable interrupts */
7753 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
7755 /* Zero mailbox registers. */
7756 if (tg3_flag(tp, SUPPORT_MSIX)) {
7757 for (i = 1; i < tp->irq_max; i++) {
7758 tp->napi[i].tx_prod = 0;
7759 tp->napi[i].tx_cons = 0;
7760 if (tg3_flag(tp, ENABLE_TSS))
7761 tw32_mailbox(tp->napi[i].prodmbox, 0);
7762 tw32_rx_mbox(tp->napi[i].consmbox, 0);
7763 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
7765 if (!tg3_flag(tp, ENABLE_TSS))
7766 tw32_mailbox(tp->napi[0].prodmbox, 0);
7767 } else {
7768 tp->napi[0].tx_prod = 0;
7769 tp->napi[0].tx_cons = 0;
7770 tw32_mailbox(tp->napi[0].prodmbox, 0);
7771 tw32_rx_mbox(tp->napi[0].consmbox, 0);
7774 /* Make sure the NIC-based send BD rings are disabled. */
7775 if (!tg3_flag(tp, 5705_PLUS)) {
7776 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
7777 for (i = 0; i < 16; i++)
7778 tw32_tx_mbox(mbox + i * 8, 0);
7781 txrcb = NIC_SRAM_SEND_RCB;
7782 rxrcb = NIC_SRAM_RCV_RET_RCB;
7784 /* Clear status block in ram. */
7785 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7787 /* Set status block DMA address */
7788 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7789 ((u64) tnapi->status_mapping >> 32));
7790 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7791 ((u64) tnapi->status_mapping & 0xffffffff));
7793 if (tnapi->tx_ring) {
7794 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7795 (TG3_TX_RING_SIZE <<
7796 BDINFO_FLAGS_MAXLEN_SHIFT),
7797 NIC_SRAM_TX_BUFFER_DESC);
7798 txrcb += TG3_BDINFO_SIZE;
7801 if (tnapi->rx_rcb) {
7802 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7803 (tp->rx_ret_ring_mask + 1) <<
7804 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
7805 rxrcb += TG3_BDINFO_SIZE;
7808 stblk = HOSTCC_STATBLCK_RING1;
7810 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
7811 u64 mapping = (u64)tnapi->status_mapping;
7812 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
7813 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
7815 /* Clear status block in ram. */
7816 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7818 if (tnapi->tx_ring) {
7819 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7820 (TG3_TX_RING_SIZE <<
7821 BDINFO_FLAGS_MAXLEN_SHIFT),
7822 NIC_SRAM_TX_BUFFER_DESC);
7823 txrcb += TG3_BDINFO_SIZE;
7826 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7827 ((tp->rx_ret_ring_mask + 1) <<
7828 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
7830 stblk += 8;
7831 rxrcb += TG3_BDINFO_SIZE;
7835 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
7837 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
7839 if (!tg3_flag(tp, 5750_PLUS) ||
7840 tg3_flag(tp, 5780_CLASS) ||
7841 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
7842 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7843 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
7844 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7845 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
7846 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
7847 else
7848 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
7850 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
7851 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
7853 val = min(nic_rep_thresh, host_rep_thresh);
7854 tw32(RCVBDI_STD_THRESH, val);
7856 if (tg3_flag(tp, 57765_PLUS))
7857 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
7859 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7860 return;
7862 if (!tg3_flag(tp, 5705_PLUS))
7863 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
7864 else
7865 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717;
7867 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
7869 val = min(bdcache_maxcnt / 2, host_rep_thresh);
7870 tw32(RCVBDI_JUMBO_THRESH, val);
7872 if (tg3_flag(tp, 57765_PLUS))
7873 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
7876 /* tp->lock is held. */
7877 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7879 u32 val, rdmac_mode;
7880 int i, err, limit;
7881 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
7883 tg3_disable_ints(tp);
7885 tg3_stop_fw(tp);
7887 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
7889 if (tg3_flag(tp, INIT_COMPLETE))
7890 tg3_abort_hw(tp, 1);
7892 /* Enable MAC control of LPI */
7893 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
7894 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
7895 TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
7896 TG3_CPMU_EEE_LNKIDL_UART_IDL);
7898 tw32_f(TG3_CPMU_EEE_CTRL,
7899 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
7901 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
7902 TG3_CPMU_EEEMD_LPI_IN_TX |
7903 TG3_CPMU_EEEMD_LPI_IN_RX |
7904 TG3_CPMU_EEEMD_EEE_ENABLE;
7906 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
7907 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
7909 if (tg3_flag(tp, ENABLE_APE))
7910 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
7912 tw32_f(TG3_CPMU_EEE_MODE, val);
7914 tw32_f(TG3_CPMU_EEE_DBTMR1,
7915 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
7916 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
7918 tw32_f(TG3_CPMU_EEE_DBTMR2,
7919 TG3_CPMU_DBTMR2_APE_TX_2047US |
7920 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
7923 if (reset_phy)
7924 tg3_phy_reset(tp);
7926 err = tg3_chip_reset(tp);
7927 if (err)
7928 return err;
7930 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
7932 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
7933 val = tr32(TG3_CPMU_CTRL);
7934 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
7935 tw32(TG3_CPMU_CTRL, val);
7937 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7938 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7939 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7940 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7942 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
7943 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
7944 val |= CPMU_LNK_AWARE_MACCLK_6_25;
7945 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
7947 val = tr32(TG3_CPMU_HST_ACC);
7948 val &= ~CPMU_HST_ACC_MACCLK_MASK;
7949 val |= CPMU_HST_ACC_MACCLK_6_25;
7950 tw32(TG3_CPMU_HST_ACC, val);
7953 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7954 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
7955 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
7956 PCIE_PWR_MGMT_L1_THRESH_4MS;
7957 tw32(PCIE_PWR_MGMT_THRESH, val);
7959 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
7960 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
7962 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
7964 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7965 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7968 if (tg3_flag(tp, L1PLLPD_EN)) {
7969 u32 grc_mode = tr32(GRC_MODE);
7971 /* Access the lower 1K of PL PCIE block registers. */
7972 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
7973 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
7975 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
7976 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
7977 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
7979 tw32(GRC_MODE, grc_mode);
7982 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
7983 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
7984 u32 grc_mode = tr32(GRC_MODE);
7986 /* Access the lower 1K of PL PCIE block registers. */
7987 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
7988 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
7990 val = tr32(TG3_PCIE_TLDLPL_PORT +
7991 TG3_PCIE_PL_LO_PHYCTL5);
7992 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
7993 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
7995 tw32(GRC_MODE, grc_mode);
7998 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
7999 u32 grc_mode = tr32(GRC_MODE);
8001 /* Access the lower 1K of DL PCIE block registers. */
8002 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8003 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8005 val = tr32(TG3_PCIE_TLDLPL_PORT +
8006 TG3_PCIE_DL_LO_FTSMAX);
8007 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8008 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8009 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8011 tw32(GRC_MODE, grc_mode);
8014 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8015 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8016 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8017 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8020 /* This works around an issue with Athlon chipsets on
8021 * B3 tigon3 silicon. This bit has no effect on any
8022 * other revision. But do not set this on PCI Express
8023 * chips and don't even touch the clocks if the CPMU is present.
8025 if (!tg3_flag(tp, CPMU_PRESENT)) {
8026 if (!tg3_flag(tp, PCI_EXPRESS))
8027 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8028 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8031 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8032 tg3_flag(tp, PCIX_MODE)) {
8033 val = tr32(TG3PCI_PCISTATE);
8034 val |= PCISTATE_RETRY_SAME_DMA;
8035 tw32(TG3PCI_PCISTATE, val);
8038 if (tg3_flag(tp, ENABLE_APE)) {
8039 /* Allow reads and writes to the
8040 * APE register and memory space.
8042 val = tr32(TG3PCI_PCISTATE);
8043 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8044 PCISTATE_ALLOW_APE_SHMEM_WR |
8045 PCISTATE_ALLOW_APE_PSPACE_WR;
8046 tw32(TG3PCI_PCISTATE, val);
8049 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8050 /* Enable some hw fixes. */
8051 val = tr32(TG3PCI_MSI_DATA);
8052 val |= (1 << 26) | (1 << 28) | (1 << 29);
8053 tw32(TG3PCI_MSI_DATA, val);
8056 /* Descriptor ring init may make accesses to the
8057 * NIC SRAM area to setup the TX descriptors, so we
8058 * can only do this after the hardware has been
8059 * successfully reset.
8061 err = tg3_init_rings(tp);
8062 if (err)
8063 return err;
8065 if (tg3_flag(tp, 57765_PLUS)) {
8066 val = tr32(TG3PCI_DMA_RW_CTRL) &
8067 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8068 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8069 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8070 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
8071 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8072 val |= DMA_RWCTRL_TAGGED_STAT_WA;
8073 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8074 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8075 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8076 /* This value is determined during the probe time DMA
8077 * engine test, tg3_test_dma.
8079 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8082 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8083 GRC_MODE_4X_NIC_SEND_RINGS |
8084 GRC_MODE_NO_TX_PHDR_CSUM |
8085 GRC_MODE_NO_RX_PHDR_CSUM);
8086 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8088 /* Pseudo-header checksum is done by hardware logic and not
8089 * the offload processers, so make the chip do the pseudo-
8090 * header checksums on receive. For transmit it is more
8091 * convenient to do the pseudo-header checksum in software
8092 * as Linux does that on transmit for us in all cases.
8094 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8096 tw32(GRC_MODE,
8097 tp->grc_mode |
8098 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8100 /* Setup the timer prescalar register. Clock is always 66Mhz. */
8101 val = tr32(GRC_MISC_CFG);
8102 val &= ~0xff;
8103 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8104 tw32(GRC_MISC_CFG, val);
8106 /* Initialize MBUF/DESC pool. */
8107 if (tg3_flag(tp, 5750_PLUS)) {
8108 /* Do nothing. */
8109 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8110 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8111 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8112 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8113 else
8114 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8115 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8116 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8117 } else if (tg3_flag(tp, TSO_CAPABLE)) {
8118 int fw_len;
8120 fw_len = tp->fw_len;
8121 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8122 tw32(BUFMGR_MB_POOL_ADDR,
8123 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8124 tw32(BUFMGR_MB_POOL_SIZE,
8125 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8128 if (tp->dev->mtu <= ETH_DATA_LEN) {
8129 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8130 tp->bufmgr_config.mbuf_read_dma_low_water);
8131 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8132 tp->bufmgr_config.mbuf_mac_rx_low_water);
8133 tw32(BUFMGR_MB_HIGH_WATER,
8134 tp->bufmgr_config.mbuf_high_water);
8135 } else {
8136 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8137 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8138 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8139 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8140 tw32(BUFMGR_MB_HIGH_WATER,
8141 tp->bufmgr_config.mbuf_high_water_jumbo);
8143 tw32(BUFMGR_DMA_LOW_WATER,
8144 tp->bufmgr_config.dma_low_water);
8145 tw32(BUFMGR_DMA_HIGH_WATER,
8146 tp->bufmgr_config.dma_high_water);
8148 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8149 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8150 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8151 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8152 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8153 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8154 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8155 tw32(BUFMGR_MODE, val);
8156 for (i = 0; i < 2000; i++) {
8157 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8158 break;
8159 udelay(10);
8161 if (i >= 2000) {
8162 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8163 return -ENODEV;
8166 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8167 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8169 tg3_setup_rxbd_thresholds(tp);
8171 /* Initialize TG3_BDINFO's at:
8172 * RCVDBDI_STD_BD: standard eth size rx ring
8173 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
8174 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
8176 * like so:
8177 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
8178 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
8179 * ring attribute flags
8180 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
8182 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8183 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8185 * The size of each ring is fixed in the firmware, but the location is
8186 * configurable.
8188 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8189 ((u64) tpr->rx_std_mapping >> 32));
8190 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8191 ((u64) tpr->rx_std_mapping & 0xffffffff));
8192 if (!tg3_flag(tp, 5717_PLUS))
8193 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8194 NIC_SRAM_RX_BUFFER_DESC);
8196 /* Disable the mini ring */
8197 if (!tg3_flag(tp, 5705_PLUS))
8198 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8199 BDINFO_FLAGS_DISABLED);
8201 /* Program the jumbo buffer descriptor ring control
8202 * blocks on those devices that have them.
8204 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8205 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8207 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8208 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8209 ((u64) tpr->rx_jmb_mapping >> 32));
8210 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8211 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8212 val = TG3_RX_JMB_RING_SIZE(tp) <<
8213 BDINFO_FLAGS_MAXLEN_SHIFT;
8214 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8215 val | BDINFO_FLAGS_USE_EXT_RECV);
8216 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8217 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8218 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8219 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8220 } else {
8221 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8222 BDINFO_FLAGS_DISABLED);
8225 if (tg3_flag(tp, 57765_PLUS)) {
8226 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8227 val = TG3_RX_STD_MAX_SIZE_5700;
8228 else
8229 val = TG3_RX_STD_MAX_SIZE_5717;
8230 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8231 val |= (TG3_RX_STD_DMA_SZ << 2);
8232 } else
8233 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8234 } else
8235 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8237 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8239 tpr->rx_std_prod_idx = tp->rx_pending;
8240 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8242 tpr->rx_jmb_prod_idx =
8243 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8244 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8246 tg3_rings_reset(tp);
8248 /* Initialize MAC address and backoff seed. */
8249 __tg3_set_mac_addr(tp, 0);
8251 /* MTU + ethernet header + FCS + optional VLAN tag */
8252 tw32(MAC_RX_MTU_SIZE,
8253 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8255 /* The slot time is changed by tg3_setup_phy if we
8256 * run at gigabit with half duplex.
8258 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8259 (6 << TX_LENGTHS_IPG_SHIFT) |
8260 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8262 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8263 val |= tr32(MAC_TX_LENGTHS) &
8264 (TX_LENGTHS_JMB_FRM_LEN_MSK |
8265 TX_LENGTHS_CNT_DWN_VAL_MSK);
8267 tw32(MAC_TX_LENGTHS, val);
8269 /* Receive rules. */
8270 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8271 tw32(RCVLPC_CONFIG, 0x0181);
8273 /* Calculate RDMAC_MODE setting early, we need it to determine
8274 * the RCVLPC_STATE_ENABLE mask.
8276 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8277 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8278 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8279 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8280 RDMAC_MODE_LNGREAD_ENAB);
8282 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8283 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8285 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8286 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8287 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8288 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8289 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8290 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8292 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8293 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8294 if (tg3_flag(tp, TSO_CAPABLE) &&
8295 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8296 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8297 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8298 !tg3_flag(tp, IS_5788)) {
8299 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8303 if (tg3_flag(tp, PCI_EXPRESS))
8304 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8306 if (tg3_flag(tp, HW_TSO_1) ||
8307 tg3_flag(tp, HW_TSO_2) ||
8308 tg3_flag(tp, HW_TSO_3))
8309 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8311 if (tg3_flag(tp, 57765_PLUS) ||
8312 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8313 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8314 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8316 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8317 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8319 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8320 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8321 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8322 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8323 tg3_flag(tp, 57765_PLUS)) {
8324 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8325 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8326 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8327 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8328 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8329 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8330 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8331 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8332 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8334 tw32(TG3_RDMA_RSRVCTRL_REG,
8335 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8338 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8339 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8340 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8341 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8342 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8343 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8346 /* Receive/send statistics. */
8347 if (tg3_flag(tp, 5750_PLUS)) {
8348 val = tr32(RCVLPC_STATS_ENABLE);
8349 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8350 tw32(RCVLPC_STATS_ENABLE, val);
8351 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8352 tg3_flag(tp, TSO_CAPABLE)) {
8353 val = tr32(RCVLPC_STATS_ENABLE);
8354 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8355 tw32(RCVLPC_STATS_ENABLE, val);
8356 } else {
8357 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8359 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8360 tw32(SNDDATAI_STATSENAB, 0xffffff);
8361 tw32(SNDDATAI_STATSCTRL,
8362 (SNDDATAI_SCTRL_ENABLE |
8363 SNDDATAI_SCTRL_FASTUPD));
8365 /* Setup host coalescing engine. */
8366 tw32(HOSTCC_MODE, 0);
8367 for (i = 0; i < 2000; i++) {
8368 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8369 break;
8370 udelay(10);
8373 __tg3_set_coalesce(tp, &tp->coal);
8375 if (!tg3_flag(tp, 5705_PLUS)) {
8376 /* Status/statistics block address. See tg3_timer,
8377 * the tg3_periodic_fetch_stats call there, and
8378 * tg3_get_stats to see how this works for 5705/5750 chips.
8380 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8381 ((u64) tp->stats_mapping >> 32));
8382 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8383 ((u64) tp->stats_mapping & 0xffffffff));
8384 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8386 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8388 /* Clear statistics and status block memory areas */
8389 for (i = NIC_SRAM_STATS_BLK;
8390 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8391 i += sizeof(u32)) {
8392 tg3_write_mem(tp, i, 0);
8393 udelay(40);
8397 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8399 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8400 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8401 if (!tg3_flag(tp, 5705_PLUS))
8402 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8404 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8405 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
8406 /* reset to prevent losing 1st rx packet intermittently */
8407 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8408 udelay(10);
8411 if (tg3_flag(tp, ENABLE_APE))
8412 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8413 else
8414 tp->mac_mode = 0;
8415 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8416 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
8417 if (!tg3_flag(tp, 5705_PLUS) &&
8418 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8419 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8420 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8421 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8422 udelay(40);
8424 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8425 * If TG3_FLAG_IS_NIC is zero, we should read the
8426 * register to preserve the GPIO settings for LOMs. The GPIOs,
8427 * whether used as inputs or outputs, are set by boot code after
8428 * reset.
8430 if (!tg3_flag(tp, IS_NIC)) {
8431 u32 gpio_mask;
8433 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8434 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8435 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
8437 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8438 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8439 GRC_LCLCTRL_GPIO_OUTPUT3;
8441 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8442 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8444 tp->grc_local_ctrl &= ~gpio_mask;
8445 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8447 /* GPIO1 must be driven high for eeprom write protect */
8448 if (tg3_flag(tp, EEPROM_WRITE_PROT))
8449 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8450 GRC_LCLCTRL_GPIO_OUTPUT1);
8452 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8453 udelay(100);
8455 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) {
8456 val = tr32(MSGINT_MODE);
8457 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8458 tw32(MSGINT_MODE, val);
8461 if (!tg3_flag(tp, 5705_PLUS)) {
8462 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8463 udelay(40);
8466 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8467 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8468 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8469 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8470 WDMAC_MODE_LNGREAD_ENAB);
8472 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8473 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8474 if (tg3_flag(tp, TSO_CAPABLE) &&
8475 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8476 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8477 /* nothing */
8478 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8479 !tg3_flag(tp, IS_5788)) {
8480 val |= WDMAC_MODE_RX_ACCEL;
8484 /* Enable host coalescing bug fix */
8485 if (tg3_flag(tp, 5755_PLUS))
8486 val |= WDMAC_MODE_STATUS_TAG_FIX;
8488 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8489 val |= WDMAC_MODE_BURST_ALL_DATA;
8491 tw32_f(WDMAC_MODE, val);
8492 udelay(40);
8494 if (tg3_flag(tp, PCIX_MODE)) {
8495 u16 pcix_cmd;
8497 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8498 &pcix_cmd);
8499 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
8500 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8501 pcix_cmd |= PCI_X_CMD_READ_2K;
8502 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8503 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8504 pcix_cmd |= PCI_X_CMD_READ_2K;
8506 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8507 pcix_cmd);
8510 tw32_f(RDMAC_MODE, rdmac_mode);
8511 udelay(40);
8513 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8514 if (!tg3_flag(tp, 5705_PLUS))
8515 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8517 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8518 tw32(SNDDATAC_MODE,
8519 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8520 else
8521 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8523 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8524 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8525 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
8526 if (tg3_flag(tp, LRG_PROD_RING_CAP))
8527 val |= RCVDBDI_MODE_LRG_RING_SZ;
8528 tw32(RCVDBDI_MODE, val);
8529 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8530 if (tg3_flag(tp, HW_TSO_1) ||
8531 tg3_flag(tp, HW_TSO_2) ||
8532 tg3_flag(tp, HW_TSO_3))
8533 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8534 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8535 if (tg3_flag(tp, ENABLE_TSS))
8536 val |= SNDBDI_MODE_MULTI_TXQ_EN;
8537 tw32(SNDBDI_MODE, val);
8538 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8540 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8541 err = tg3_load_5701_a0_firmware_fix(tp);
8542 if (err)
8543 return err;
8546 if (tg3_flag(tp, TSO_CAPABLE)) {
8547 err = tg3_load_tso_firmware(tp);
8548 if (err)
8549 return err;
8552 tp->tx_mode = TX_MODE_ENABLE;
8554 if (tg3_flag(tp, 5755_PLUS) ||
8555 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8556 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
8558 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8559 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
8560 tp->tx_mode &= ~val;
8561 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
8564 tw32_f(MAC_TX_MODE, tp->tx_mode);
8565 udelay(100);
8567 if (tg3_flag(tp, ENABLE_RSS)) {
8568 u32 reg = MAC_RSS_INDIR_TBL_0;
8569 u8 *ent = (u8 *)&val;
8571 /* Setup the indirection table */
8572 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8573 int idx = i % sizeof(val);
8575 ent[idx] = i % (tp->irq_cnt - 1);
8576 if (idx == sizeof(val) - 1) {
8577 tw32(reg, val);
8578 reg += 4;
8582 /* Setup the "secret" hash key. */
8583 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
8584 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
8585 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
8586 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
8587 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
8588 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
8589 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
8590 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
8591 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
8592 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
8595 tp->rx_mode = RX_MODE_ENABLE;
8596 if (tg3_flag(tp, 5755_PLUS))
8597 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
8599 if (tg3_flag(tp, ENABLE_RSS))
8600 tp->rx_mode |= RX_MODE_RSS_ENABLE |
8601 RX_MODE_RSS_ITBL_HASH_BITS_7 |
8602 RX_MODE_RSS_IPV6_HASH_EN |
8603 RX_MODE_RSS_TCP_IPV6_HASH_EN |
8604 RX_MODE_RSS_IPV4_HASH_EN |
8605 RX_MODE_RSS_TCP_IPV4_HASH_EN;
8607 tw32_f(MAC_RX_MODE, tp->rx_mode);
8608 udelay(10);
8610 tw32(MAC_LED_CTRL, tp->led_ctrl);
8612 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
8613 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8614 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8615 udelay(10);
8617 tw32_f(MAC_RX_MODE, tp->rx_mode);
8618 udelay(10);
8620 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8621 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
8622 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
8623 /* Set drive transmission level to 1.2V */
8624 /* only if the signal pre-emphasis bit is not set */
8625 val = tr32(MAC_SERDES_CFG);
8626 val &= 0xfffff000;
8627 val |= 0x880;
8628 tw32(MAC_SERDES_CFG, val);
8630 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
8631 tw32(MAC_SERDES_CFG, 0x616000);
8634 /* Prevent chip from dropping frames when flow control
8635 * is enabled.
8637 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8638 val = 1;
8639 else
8640 val = 2;
8641 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
8643 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8644 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
8645 /* Use hardware link auto-negotiation */
8646 tg3_flag_set(tp, HW_AUTONEG);
8649 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8650 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
8651 u32 tmp;
8653 tmp = tr32(SERDES_RX_CTRL);
8654 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
8655 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
8656 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
8657 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8660 if (!tg3_flag(tp, USE_PHYLIB)) {
8661 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
8662 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
8663 tp->link_config.speed = tp->link_config.orig_speed;
8664 tp->link_config.duplex = tp->link_config.orig_duplex;
8665 tp->link_config.autoneg = tp->link_config.orig_autoneg;
8668 err = tg3_setup_phy(tp, 0);
8669 if (err)
8670 return err;
8672 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8673 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8674 u32 tmp;
8676 /* Clear CRC stats. */
8677 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
8678 tg3_writephy(tp, MII_TG3_TEST1,
8679 tmp | MII_TG3_TEST1_CRC_EN);
8680 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
8685 __tg3_set_rx_mode(tp->dev);
8687 /* Initialize receive rules. */
8688 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
8689 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
8690 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
8691 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
8693 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
8694 limit = 8;
8695 else
8696 limit = 16;
8697 if (tg3_flag(tp, ENABLE_ASF))
8698 limit -= 4;
8699 switch (limit) {
8700 case 16:
8701 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
8702 case 15:
8703 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
8704 case 14:
8705 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
8706 case 13:
8707 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
8708 case 12:
8709 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
8710 case 11:
8711 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
8712 case 10:
8713 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
8714 case 9:
8715 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
8716 case 8:
8717 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
8718 case 7:
8719 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
8720 case 6:
8721 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
8722 case 5:
8723 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
8724 case 4:
8725 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
8726 case 3:
8727 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
8728 case 2:
8729 case 1:
8731 default:
8732 break;
8735 if (tg3_flag(tp, ENABLE_APE))
8736 /* Write our heartbeat update interval to APE. */
8737 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
8738 APE_HOST_HEARTBEAT_INT_DISABLE);
8740 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
8742 return 0;
8745 /* Called at device open time to get the chip ready for
8746 * packet processing. Invoked with tp->lock held.
8748 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
8750 tg3_switch_clocks(tp);
8752 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
8754 return tg3_reset_hw(tp, reset_phy);
8757 #define TG3_STAT_ADD32(PSTAT, REG) \
8758 do { u32 __val = tr32(REG); \
8759 (PSTAT)->low += __val; \
8760 if ((PSTAT)->low < __val) \
8761 (PSTAT)->high += 1; \
8762 } while (0)
8764 static void tg3_periodic_fetch_stats(struct tg3 *tp)
8766 struct tg3_hw_stats *sp = tp->hw_stats;
8768 if (!netif_carrier_ok(tp->dev))
8769 return;
8771 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
8772 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
8773 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
8774 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
8775 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
8776 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
8777 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
8778 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
8779 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
8780 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
8781 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
8782 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
8783 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
8785 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
8786 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
8787 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
8788 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
8789 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
8790 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
8791 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
8792 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
8793 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
8794 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
8795 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
8796 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
8797 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
8798 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
8800 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
8801 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
8802 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
8803 tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
8804 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
8805 } else {
8806 u32 val = tr32(HOSTCC_FLOW_ATTN);
8807 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
8808 if (val) {
8809 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
8810 sp->rx_discards.low += val;
8811 if (sp->rx_discards.low < val)
8812 sp->rx_discards.high += 1;
8814 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
8816 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
8819 static void tg3_timer(unsigned long __opaque)
8821 struct tg3 *tp = (struct tg3 *) __opaque;
8823 if (tp->irq_sync)
8824 goto restart_timer;
8826 spin_lock(&tp->lock);
8828 if (!tg3_flag(tp, TAGGED_STATUS)) {
8829 /* All of this garbage is because when using non-tagged
8830 * IRQ status the mailbox/status_block protocol the chip
8831 * uses with the cpu is race prone.
8833 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
8834 tw32(GRC_LOCAL_CTRL,
8835 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
8836 } else {
8837 tw32(HOSTCC_MODE, tp->coalesce_mode |
8838 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
8841 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
8842 tg3_flag_set(tp, RESTART_TIMER);
8843 spin_unlock(&tp->lock);
8844 schedule_work(&tp->reset_task);
8845 return;
8849 /* This part only runs once per second. */
8850 if (!--tp->timer_counter) {
8851 if (tg3_flag(tp, 5705_PLUS))
8852 tg3_periodic_fetch_stats(tp);
8854 if (tp->setlpicnt && !--tp->setlpicnt)
8855 tg3_phy_eee_enable(tp);
8857 if (tg3_flag(tp, USE_LINKCHG_REG)) {
8858 u32 mac_stat;
8859 int phy_event;
8861 mac_stat = tr32(MAC_STATUS);
8863 phy_event = 0;
8864 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
8865 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
8866 phy_event = 1;
8867 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
8868 phy_event = 1;
8870 if (phy_event)
8871 tg3_setup_phy(tp, 0);
8872 } else if (tg3_flag(tp, POLL_SERDES)) {
8873 u32 mac_stat = tr32(MAC_STATUS);
8874 int need_setup = 0;
8876 if (netif_carrier_ok(tp->dev) &&
8877 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
8878 need_setup = 1;
8880 if (!netif_carrier_ok(tp->dev) &&
8881 (mac_stat & (MAC_STATUS_PCS_SYNCED |
8882 MAC_STATUS_SIGNAL_DET))) {
8883 need_setup = 1;
8885 if (need_setup) {
8886 if (!tp->serdes_counter) {
8887 tw32_f(MAC_MODE,
8888 (tp->mac_mode &
8889 ~MAC_MODE_PORT_MODE_MASK));
8890 udelay(40);
8891 tw32_f(MAC_MODE, tp->mac_mode);
8892 udelay(40);
8894 tg3_setup_phy(tp, 0);
8896 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8897 tg3_flag(tp, 5780_CLASS)) {
8898 tg3_serdes_parallel_detect(tp);
8901 tp->timer_counter = tp->timer_multiplier;
8904 /* Heartbeat is only sent once every 2 seconds.
8906 * The heartbeat is to tell the ASF firmware that the host
8907 * driver is still alive. In the event that the OS crashes,
8908 * ASF needs to reset the hardware to free up the FIFO space
8909 * that may be filled with rx packets destined for the host.
8910 * If the FIFO is full, ASF will no longer function properly.
8912 * Unintended resets have been reported on real time kernels
8913 * where the timer doesn't run on time. Netpoll will also have
8914 * same problem.
8916 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
8917 * to check the ring condition when the heartbeat is expiring
8918 * before doing the reset. This will prevent most unintended
8919 * resets.
8921 if (!--tp->asf_counter) {
8922 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
8923 tg3_wait_for_event_ack(tp);
8925 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
8926 FWCMD_NICDRV_ALIVE3);
8927 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
8928 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
8929 TG3_FW_UPDATE_TIMEOUT_SEC);
8931 tg3_generate_fw_event(tp);
8933 tp->asf_counter = tp->asf_multiplier;
8936 spin_unlock(&tp->lock);
8938 restart_timer:
8939 tp->timer.expires = jiffies + tp->timer_offset;
8940 add_timer(&tp->timer);
8943 static int tg3_request_irq(struct tg3 *tp, int irq_num)
8945 irq_handler_t fn;
8946 unsigned long flags;
8947 char *name;
8948 struct tg3_napi *tnapi = &tp->napi[irq_num];
8950 if (tp->irq_cnt == 1)
8951 name = tp->dev->name;
8952 else {
8953 name = &tnapi->irq_lbl[0];
8954 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
8955 name[IFNAMSIZ-1] = 0;
8958 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
8959 fn = tg3_msi;
8960 if (tg3_flag(tp, 1SHOT_MSI))
8961 fn = tg3_msi_1shot;
8962 flags = 0;
8963 } else {
8964 fn = tg3_interrupt;
8965 if (tg3_flag(tp, TAGGED_STATUS))
8966 fn = tg3_interrupt_tagged;
8967 flags = IRQF_SHARED;
8970 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
8973 static int tg3_test_interrupt(struct tg3 *tp)
8975 struct tg3_napi *tnapi = &tp->napi[0];
8976 struct net_device *dev = tp->dev;
8977 int err, i, intr_ok = 0;
8978 u32 val;
8980 if (!netif_running(dev))
8981 return -ENODEV;
8983 tg3_disable_ints(tp);
8985 free_irq(tnapi->irq_vec, tnapi);
8988 * Turn off MSI one shot mode. Otherwise this test has no
8989 * observable way to know whether the interrupt was delivered.
8991 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
8992 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
8993 tw32(MSGINT_MODE, val);
8996 err = request_irq(tnapi->irq_vec, tg3_test_isr,
8997 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
8998 if (err)
8999 return err;
9001 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9002 tg3_enable_ints(tp);
9004 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9005 tnapi->coal_now);
9007 for (i = 0; i < 5; i++) {
9008 u32 int_mbox, misc_host_ctrl;
9010 int_mbox = tr32_mailbox(tnapi->int_mbox);
9011 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9013 if ((int_mbox != 0) ||
9014 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9015 intr_ok = 1;
9016 break;
9019 msleep(10);
9022 tg3_disable_ints(tp);
9024 free_irq(tnapi->irq_vec, tnapi);
9026 err = tg3_request_irq(tp, 0);
9028 if (err)
9029 return err;
9031 if (intr_ok) {
9032 /* Reenable MSI one shot mode. */
9033 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9034 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9035 tw32(MSGINT_MODE, val);
9037 return 0;
9040 return -EIO;
9043 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9044 * successfully restored
9046 static int tg3_test_msi(struct tg3 *tp)
9048 int err;
9049 u16 pci_cmd;
9051 if (!tg3_flag(tp, USING_MSI))
9052 return 0;
9054 /* Turn off SERR reporting in case MSI terminates with Master
9055 * Abort.
9057 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9058 pci_write_config_word(tp->pdev, PCI_COMMAND,
9059 pci_cmd & ~PCI_COMMAND_SERR);
9061 err = tg3_test_interrupt(tp);
9063 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9065 if (!err)
9066 return 0;
9068 /* other failures */
9069 if (err != -EIO)
9070 return err;
9072 /* MSI test failed, go back to INTx mode */
9073 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9074 "to INTx mode. Please report this failure to the PCI "
9075 "maintainer and include system chipset information\n");
9077 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9079 pci_disable_msi(tp->pdev);
9081 tg3_flag_clear(tp, USING_MSI);
9082 tp->napi[0].irq_vec = tp->pdev->irq;
9084 err = tg3_request_irq(tp, 0);
9085 if (err)
9086 return err;
9088 /* Need to reset the chip because the MSI cycle may have terminated
9089 * with Master Abort.
9091 tg3_full_lock(tp, 1);
9093 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9094 err = tg3_init_hw(tp, 1);
9096 tg3_full_unlock(tp);
9098 if (err)
9099 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9101 return err;
9104 static int tg3_request_firmware(struct tg3 *tp)
9106 const __be32 *fw_data;
9108 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9109 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9110 tp->fw_needed);
9111 return -ENOENT;
9114 fw_data = (void *)tp->fw->data;
9116 /* Firmware blob starts with version numbers, followed by
9117 * start address and _full_ length including BSS sections
9118 * (which must be longer than the actual data, of course
9121 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
9122 if (tp->fw_len < (tp->fw->size - 12)) {
9123 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9124 tp->fw_len, tp->fw_needed);
9125 release_firmware(tp->fw);
9126 tp->fw = NULL;
9127 return -EINVAL;
9130 /* We no longer need firmware; we have it. */
9131 tp->fw_needed = NULL;
9132 return 0;
9135 static bool tg3_enable_msix(struct tg3 *tp)
9137 int i, rc, cpus = num_online_cpus();
9138 struct msix_entry msix_ent[tp->irq_max];
9140 if (cpus == 1)
9141 /* Just fallback to the simpler MSI mode. */
9142 return false;
9145 * We want as many rx rings enabled as there are cpus.
9146 * The first MSIX vector only deals with link interrupts, etc,
9147 * so we add one to the number of vectors we are requesting.
9149 tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
9151 for (i = 0; i < tp->irq_max; i++) {
9152 msix_ent[i].entry = i;
9153 msix_ent[i].vector = 0;
9156 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9157 if (rc < 0) {
9158 return false;
9159 } else if (rc != 0) {
9160 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9161 return false;
9162 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9163 tp->irq_cnt, rc);
9164 tp->irq_cnt = rc;
9167 for (i = 0; i < tp->irq_max; i++)
9168 tp->napi[i].irq_vec = msix_ent[i].vector;
9170 netif_set_real_num_tx_queues(tp->dev, 1);
9171 rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9172 if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9173 pci_disable_msix(tp->pdev);
9174 return false;
9177 if (tp->irq_cnt > 1) {
9178 tg3_flag_set(tp, ENABLE_RSS);
9180 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9181 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9182 tg3_flag_set(tp, ENABLE_TSS);
9183 netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9187 return true;
9190 static void tg3_ints_init(struct tg3 *tp)
9192 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9193 !tg3_flag(tp, TAGGED_STATUS)) {
9194 /* All MSI supporting chips should support tagged
9195 * status. Assert that this is the case.
9197 netdev_warn(tp->dev,
9198 "MSI without TAGGED_STATUS? Not using MSI\n");
9199 goto defcfg;
9202 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9203 tg3_flag_set(tp, USING_MSIX);
9204 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9205 tg3_flag_set(tp, USING_MSI);
9207 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9208 u32 msi_mode = tr32(MSGINT_MODE);
9209 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9210 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9211 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9213 defcfg:
9214 if (!tg3_flag(tp, USING_MSIX)) {
9215 tp->irq_cnt = 1;
9216 tp->napi[0].irq_vec = tp->pdev->irq;
9217 netif_set_real_num_tx_queues(tp->dev, 1);
9218 netif_set_real_num_rx_queues(tp->dev, 1);
9222 static void tg3_ints_fini(struct tg3 *tp)
9224 if (tg3_flag(tp, USING_MSIX))
9225 pci_disable_msix(tp->pdev);
9226 else if (tg3_flag(tp, USING_MSI))
9227 pci_disable_msi(tp->pdev);
9228 tg3_flag_clear(tp, USING_MSI);
9229 tg3_flag_clear(tp, USING_MSIX);
9230 tg3_flag_clear(tp, ENABLE_RSS);
9231 tg3_flag_clear(tp, ENABLE_TSS);
9234 static int tg3_open(struct net_device *dev)
9236 struct tg3 *tp = netdev_priv(dev);
9237 int i, err;
9239 if (tp->fw_needed) {
9240 err = tg3_request_firmware(tp);
9241 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9242 if (err)
9243 return err;
9244 } else if (err) {
9245 netdev_warn(tp->dev, "TSO capability disabled\n");
9246 tg3_flag_clear(tp, TSO_CAPABLE);
9247 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
9248 netdev_notice(tp->dev, "TSO capability restored\n");
9249 tg3_flag_set(tp, TSO_CAPABLE);
9253 netif_carrier_off(tp->dev);
9255 err = tg3_power_up(tp);
9256 if (err)
9257 return err;
9259 tg3_full_lock(tp, 0);
9261 tg3_disable_ints(tp);
9262 tg3_flag_clear(tp, INIT_COMPLETE);
9264 tg3_full_unlock(tp);
9267 * Setup interrupts first so we know how
9268 * many NAPI resources to allocate
9270 tg3_ints_init(tp);
9272 /* The placement of this call is tied
9273 * to the setup and use of Host TX descriptors.
9275 err = tg3_alloc_consistent(tp);
9276 if (err)
9277 goto err_out1;
9279 tg3_napi_init(tp);
9281 tg3_napi_enable(tp);
9283 for (i = 0; i < tp->irq_cnt; i++) {
9284 struct tg3_napi *tnapi = &tp->napi[i];
9285 err = tg3_request_irq(tp, i);
9286 if (err) {
9287 for (i--; i >= 0; i--)
9288 free_irq(tnapi->irq_vec, tnapi);
9289 break;
9293 if (err)
9294 goto err_out2;
9296 tg3_full_lock(tp, 0);
9298 err = tg3_init_hw(tp, 1);
9299 if (err) {
9300 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9301 tg3_free_rings(tp);
9302 } else {
9303 if (tg3_flag(tp, TAGGED_STATUS))
9304 tp->timer_offset = HZ;
9305 else
9306 tp->timer_offset = HZ / 10;
9308 BUG_ON(tp->timer_offset > HZ);
9309 tp->timer_counter = tp->timer_multiplier =
9310 (HZ / tp->timer_offset);
9311 tp->asf_counter = tp->asf_multiplier =
9312 ((HZ / tp->timer_offset) * 2);
9314 init_timer(&tp->timer);
9315 tp->timer.expires = jiffies + tp->timer_offset;
9316 tp->timer.data = (unsigned long) tp;
9317 tp->timer.function = tg3_timer;
9320 tg3_full_unlock(tp);
9322 if (err)
9323 goto err_out3;
9325 if (tg3_flag(tp, USING_MSI)) {
9326 err = tg3_test_msi(tp);
9328 if (err) {
9329 tg3_full_lock(tp, 0);
9330 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9331 tg3_free_rings(tp);
9332 tg3_full_unlock(tp);
9334 goto err_out2;
9337 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9338 u32 val = tr32(PCIE_TRANSACTION_CFG);
9340 tw32(PCIE_TRANSACTION_CFG,
9341 val | PCIE_TRANS_CFG_1SHOT_MSI);
9345 tg3_phy_start(tp);
9347 tg3_full_lock(tp, 0);
9349 add_timer(&tp->timer);
9350 tg3_flag_set(tp, INIT_COMPLETE);
9351 tg3_enable_ints(tp);
9353 tg3_full_unlock(tp);
9355 netif_tx_start_all_queues(dev);
9358 * Reset loopback feature if it was turned on while the device was down
9359 * make sure that it's installed properly now.
9361 if (dev->features & NETIF_F_LOOPBACK)
9362 tg3_set_loopback(dev, dev->features);
9364 return 0;
9366 err_out3:
9367 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9368 struct tg3_napi *tnapi = &tp->napi[i];
9369 free_irq(tnapi->irq_vec, tnapi);
9372 err_out2:
9373 tg3_napi_disable(tp);
9374 tg3_napi_fini(tp);
9375 tg3_free_consistent(tp);
9377 err_out1:
9378 tg3_ints_fini(tp);
9379 return err;
9382 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
9383 struct rtnl_link_stats64 *);
9384 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
9386 static int tg3_close(struct net_device *dev)
9388 int i;
9389 struct tg3 *tp = netdev_priv(dev);
9391 tg3_napi_disable(tp);
9392 cancel_work_sync(&tp->reset_task);
9394 netif_tx_stop_all_queues(dev);
9396 del_timer_sync(&tp->timer);
9398 tg3_phy_stop(tp);
9400 tg3_full_lock(tp, 1);
9402 tg3_disable_ints(tp);
9404 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9405 tg3_free_rings(tp);
9406 tg3_flag_clear(tp, INIT_COMPLETE);
9408 tg3_full_unlock(tp);
9410 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9411 struct tg3_napi *tnapi = &tp->napi[i];
9412 free_irq(tnapi->irq_vec, tnapi);
9415 tg3_ints_fini(tp);
9417 tg3_get_stats64(tp->dev, &tp->net_stats_prev);
9419 memcpy(&tp->estats_prev, tg3_get_estats(tp),
9420 sizeof(tp->estats_prev));
9422 tg3_napi_fini(tp);
9424 tg3_free_consistent(tp);
9426 tg3_power_down(tp);
9428 netif_carrier_off(tp->dev);
9430 return 0;
9433 static inline u64 get_stat64(tg3_stat64_t *val)
9435 return ((u64)val->high << 32) | ((u64)val->low);
9438 static u64 calc_crc_errors(struct tg3 *tp)
9440 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9442 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9443 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9444 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9445 u32 val;
9447 spin_lock_bh(&tp->lock);
9448 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9449 tg3_writephy(tp, MII_TG3_TEST1,
9450 val | MII_TG3_TEST1_CRC_EN);
9451 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
9452 } else
9453 val = 0;
9454 spin_unlock_bh(&tp->lock);
9456 tp->phy_crc_errors += val;
9458 return tp->phy_crc_errors;
9461 return get_stat64(&hw_stats->rx_fcs_errors);
9464 #define ESTAT_ADD(member) \
9465 estats->member = old_estats->member + \
9466 get_stat64(&hw_stats->member)
9468 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
9470 struct tg3_ethtool_stats *estats = &tp->estats;
9471 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9472 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9474 if (!hw_stats)
9475 return old_estats;
9477 ESTAT_ADD(rx_octets);
9478 ESTAT_ADD(rx_fragments);
9479 ESTAT_ADD(rx_ucast_packets);
9480 ESTAT_ADD(rx_mcast_packets);
9481 ESTAT_ADD(rx_bcast_packets);
9482 ESTAT_ADD(rx_fcs_errors);
9483 ESTAT_ADD(rx_align_errors);
9484 ESTAT_ADD(rx_xon_pause_rcvd);
9485 ESTAT_ADD(rx_xoff_pause_rcvd);
9486 ESTAT_ADD(rx_mac_ctrl_rcvd);
9487 ESTAT_ADD(rx_xoff_entered);
9488 ESTAT_ADD(rx_frame_too_long_errors);
9489 ESTAT_ADD(rx_jabbers);
9490 ESTAT_ADD(rx_undersize_packets);
9491 ESTAT_ADD(rx_in_length_errors);
9492 ESTAT_ADD(rx_out_length_errors);
9493 ESTAT_ADD(rx_64_or_less_octet_packets);
9494 ESTAT_ADD(rx_65_to_127_octet_packets);
9495 ESTAT_ADD(rx_128_to_255_octet_packets);
9496 ESTAT_ADD(rx_256_to_511_octet_packets);
9497 ESTAT_ADD(rx_512_to_1023_octet_packets);
9498 ESTAT_ADD(rx_1024_to_1522_octet_packets);
9499 ESTAT_ADD(rx_1523_to_2047_octet_packets);
9500 ESTAT_ADD(rx_2048_to_4095_octet_packets);
9501 ESTAT_ADD(rx_4096_to_8191_octet_packets);
9502 ESTAT_ADD(rx_8192_to_9022_octet_packets);
9504 ESTAT_ADD(tx_octets);
9505 ESTAT_ADD(tx_collisions);
9506 ESTAT_ADD(tx_xon_sent);
9507 ESTAT_ADD(tx_xoff_sent);
9508 ESTAT_ADD(tx_flow_control);
9509 ESTAT_ADD(tx_mac_errors);
9510 ESTAT_ADD(tx_single_collisions);
9511 ESTAT_ADD(tx_mult_collisions);
9512 ESTAT_ADD(tx_deferred);
9513 ESTAT_ADD(tx_excessive_collisions);
9514 ESTAT_ADD(tx_late_collisions);
9515 ESTAT_ADD(tx_collide_2times);
9516 ESTAT_ADD(tx_collide_3times);
9517 ESTAT_ADD(tx_collide_4times);
9518 ESTAT_ADD(tx_collide_5times);
9519 ESTAT_ADD(tx_collide_6times);
9520 ESTAT_ADD(tx_collide_7times);
9521 ESTAT_ADD(tx_collide_8times);
9522 ESTAT_ADD(tx_collide_9times);
9523 ESTAT_ADD(tx_collide_10times);
9524 ESTAT_ADD(tx_collide_11times);
9525 ESTAT_ADD(tx_collide_12times);
9526 ESTAT_ADD(tx_collide_13times);
9527 ESTAT_ADD(tx_collide_14times);
9528 ESTAT_ADD(tx_collide_15times);
9529 ESTAT_ADD(tx_ucast_packets);
9530 ESTAT_ADD(tx_mcast_packets);
9531 ESTAT_ADD(tx_bcast_packets);
9532 ESTAT_ADD(tx_carrier_sense_errors);
9533 ESTAT_ADD(tx_discards);
9534 ESTAT_ADD(tx_errors);
9536 ESTAT_ADD(dma_writeq_full);
9537 ESTAT_ADD(dma_write_prioq_full);
9538 ESTAT_ADD(rxbds_empty);
9539 ESTAT_ADD(rx_discards);
9540 ESTAT_ADD(rx_errors);
9541 ESTAT_ADD(rx_threshold_hit);
9543 ESTAT_ADD(dma_readq_full);
9544 ESTAT_ADD(dma_read_prioq_full);
9545 ESTAT_ADD(tx_comp_queue_full);
9547 ESTAT_ADD(ring_set_send_prod_index);
9548 ESTAT_ADD(ring_status_update);
9549 ESTAT_ADD(nic_irqs);
9550 ESTAT_ADD(nic_avoided_irqs);
9551 ESTAT_ADD(nic_tx_threshold_hit);
9553 ESTAT_ADD(mbuf_lwm_thresh_hit);
9555 return estats;
9558 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
9559 struct rtnl_link_stats64 *stats)
9561 struct tg3 *tp = netdev_priv(dev);
9562 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
9563 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9565 if (!hw_stats)
9566 return old_stats;
9568 stats->rx_packets = old_stats->rx_packets +
9569 get_stat64(&hw_stats->rx_ucast_packets) +
9570 get_stat64(&hw_stats->rx_mcast_packets) +
9571 get_stat64(&hw_stats->rx_bcast_packets);
9573 stats->tx_packets = old_stats->tx_packets +
9574 get_stat64(&hw_stats->tx_ucast_packets) +
9575 get_stat64(&hw_stats->tx_mcast_packets) +
9576 get_stat64(&hw_stats->tx_bcast_packets);
9578 stats->rx_bytes = old_stats->rx_bytes +
9579 get_stat64(&hw_stats->rx_octets);
9580 stats->tx_bytes = old_stats->tx_bytes +
9581 get_stat64(&hw_stats->tx_octets);
9583 stats->rx_errors = old_stats->rx_errors +
9584 get_stat64(&hw_stats->rx_errors);
9585 stats->tx_errors = old_stats->tx_errors +
9586 get_stat64(&hw_stats->tx_errors) +
9587 get_stat64(&hw_stats->tx_mac_errors) +
9588 get_stat64(&hw_stats->tx_carrier_sense_errors) +
9589 get_stat64(&hw_stats->tx_discards);
9591 stats->multicast = old_stats->multicast +
9592 get_stat64(&hw_stats->rx_mcast_packets);
9593 stats->collisions = old_stats->collisions +
9594 get_stat64(&hw_stats->tx_collisions);
9596 stats->rx_length_errors = old_stats->rx_length_errors +
9597 get_stat64(&hw_stats->rx_frame_too_long_errors) +
9598 get_stat64(&hw_stats->rx_undersize_packets);
9600 stats->rx_over_errors = old_stats->rx_over_errors +
9601 get_stat64(&hw_stats->rxbds_empty);
9602 stats->rx_frame_errors = old_stats->rx_frame_errors +
9603 get_stat64(&hw_stats->rx_align_errors);
9604 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
9605 get_stat64(&hw_stats->tx_discards);
9606 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
9607 get_stat64(&hw_stats->tx_carrier_sense_errors);
9609 stats->rx_crc_errors = old_stats->rx_crc_errors +
9610 calc_crc_errors(tp);
9612 stats->rx_missed_errors = old_stats->rx_missed_errors +
9613 get_stat64(&hw_stats->rx_discards);
9615 stats->rx_dropped = tp->rx_dropped;
9617 return stats;
9620 static inline u32 calc_crc(unsigned char *buf, int len)
9622 u32 reg;
9623 u32 tmp;
9624 int j, k;
9626 reg = 0xffffffff;
9628 for (j = 0; j < len; j++) {
9629 reg ^= buf[j];
9631 for (k = 0; k < 8; k++) {
9632 tmp = reg & 0x01;
9634 reg >>= 1;
9636 if (tmp)
9637 reg ^= 0xedb88320;
9641 return ~reg;
9644 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9646 /* accept or reject all multicast frames */
9647 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9648 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9649 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9650 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9653 static void __tg3_set_rx_mode(struct net_device *dev)
9655 struct tg3 *tp = netdev_priv(dev);
9656 u32 rx_mode;
9658 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9659 RX_MODE_KEEP_VLAN_TAG);
9661 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9662 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9663 * flag clear.
9665 if (!tg3_flag(tp, ENABLE_ASF))
9666 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9667 #endif
9669 if (dev->flags & IFF_PROMISC) {
9670 /* Promiscuous mode. */
9671 rx_mode |= RX_MODE_PROMISC;
9672 } else if (dev->flags & IFF_ALLMULTI) {
9673 /* Accept all multicast. */
9674 tg3_set_multi(tp, 1);
9675 } else if (netdev_mc_empty(dev)) {
9676 /* Reject all multicast. */
9677 tg3_set_multi(tp, 0);
9678 } else {
9679 /* Accept one or more multicast(s). */
9680 struct netdev_hw_addr *ha;
9681 u32 mc_filter[4] = { 0, };
9682 u32 regidx;
9683 u32 bit;
9684 u32 crc;
9686 netdev_for_each_mc_addr(ha, dev) {
9687 crc = calc_crc(ha->addr, ETH_ALEN);
9688 bit = ~crc & 0x7f;
9689 regidx = (bit & 0x60) >> 5;
9690 bit &= 0x1f;
9691 mc_filter[regidx] |= (1 << bit);
9694 tw32(MAC_HASH_REG_0, mc_filter[0]);
9695 tw32(MAC_HASH_REG_1, mc_filter[1]);
9696 tw32(MAC_HASH_REG_2, mc_filter[2]);
9697 tw32(MAC_HASH_REG_3, mc_filter[3]);
9700 if (rx_mode != tp->rx_mode) {
9701 tp->rx_mode = rx_mode;
9702 tw32_f(MAC_RX_MODE, rx_mode);
9703 udelay(10);
9707 static void tg3_set_rx_mode(struct net_device *dev)
9709 struct tg3 *tp = netdev_priv(dev);
9711 if (!netif_running(dev))
9712 return;
9714 tg3_full_lock(tp, 0);
9715 __tg3_set_rx_mode(dev);
9716 tg3_full_unlock(tp);
9719 static int tg3_get_regs_len(struct net_device *dev)
9721 return TG3_REG_BLK_SIZE;
9724 static void tg3_get_regs(struct net_device *dev,
9725 struct ethtool_regs *regs, void *_p)
9727 struct tg3 *tp = netdev_priv(dev);
9729 regs->version = 0;
9731 memset(_p, 0, TG3_REG_BLK_SIZE);
9733 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9734 return;
9736 tg3_full_lock(tp, 0);
9738 tg3_dump_legacy_regs(tp, (u32 *)_p);
9740 tg3_full_unlock(tp);
9743 static int tg3_get_eeprom_len(struct net_device *dev)
9745 struct tg3 *tp = netdev_priv(dev);
9747 return tp->nvram_size;
9750 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9752 struct tg3 *tp = netdev_priv(dev);
9753 int ret;
9754 u8 *pd;
9755 u32 i, offset, len, b_offset, b_count;
9756 __be32 val;
9758 if (tg3_flag(tp, NO_NVRAM))
9759 return -EINVAL;
9761 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9762 return -EAGAIN;
9764 offset = eeprom->offset;
9765 len = eeprom->len;
9766 eeprom->len = 0;
9768 eeprom->magic = TG3_EEPROM_MAGIC;
9770 if (offset & 3) {
9771 /* adjustments to start on required 4 byte boundary */
9772 b_offset = offset & 3;
9773 b_count = 4 - b_offset;
9774 if (b_count > len) {
9775 /* i.e. offset=1 len=2 */
9776 b_count = len;
9778 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
9779 if (ret)
9780 return ret;
9781 memcpy(data, ((char *)&val) + b_offset, b_count);
9782 len -= b_count;
9783 offset += b_count;
9784 eeprom->len += b_count;
9787 /* read bytes up to the last 4 byte boundary */
9788 pd = &data[eeprom->len];
9789 for (i = 0; i < (len - (len & 3)); i += 4) {
9790 ret = tg3_nvram_read_be32(tp, offset + i, &val);
9791 if (ret) {
9792 eeprom->len += i;
9793 return ret;
9795 memcpy(pd + i, &val, 4);
9797 eeprom->len += i;
9799 if (len & 3) {
9800 /* read last bytes not ending on 4 byte boundary */
9801 pd = &data[eeprom->len];
9802 b_count = len & 3;
9803 b_offset = offset + len - b_count;
9804 ret = tg3_nvram_read_be32(tp, b_offset, &val);
9805 if (ret)
9806 return ret;
9807 memcpy(pd, &val, b_count);
9808 eeprom->len += b_count;
9810 return 0;
9813 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
9815 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9817 struct tg3 *tp = netdev_priv(dev);
9818 int ret;
9819 u32 offset, len, b_offset, odd_len;
9820 u8 *buf;
9821 __be32 start, end;
9823 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9824 return -EAGAIN;
9826 if (tg3_flag(tp, NO_NVRAM) ||
9827 eeprom->magic != TG3_EEPROM_MAGIC)
9828 return -EINVAL;
9830 offset = eeprom->offset;
9831 len = eeprom->len;
9833 if ((b_offset = (offset & 3))) {
9834 /* adjustments to start on required 4 byte boundary */
9835 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
9836 if (ret)
9837 return ret;
9838 len += b_offset;
9839 offset &= ~3;
9840 if (len < 4)
9841 len = 4;
9844 odd_len = 0;
9845 if (len & 3) {
9846 /* adjustments to end on required 4 byte boundary */
9847 odd_len = 1;
9848 len = (len + 3) & ~3;
9849 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
9850 if (ret)
9851 return ret;
9854 buf = data;
9855 if (b_offset || odd_len) {
9856 buf = kmalloc(len, GFP_KERNEL);
9857 if (!buf)
9858 return -ENOMEM;
9859 if (b_offset)
9860 memcpy(buf, &start, 4);
9861 if (odd_len)
9862 memcpy(buf+len-4, &end, 4);
9863 memcpy(buf + b_offset, data, eeprom->len);
9866 ret = tg3_nvram_write_block(tp, offset, len, buf);
9868 if (buf != data)
9869 kfree(buf);
9871 return ret;
9874 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9876 struct tg3 *tp = netdev_priv(dev);
9878 if (tg3_flag(tp, USE_PHYLIB)) {
9879 struct phy_device *phydev;
9880 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
9881 return -EAGAIN;
9882 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9883 return phy_ethtool_gset(phydev, cmd);
9886 cmd->supported = (SUPPORTED_Autoneg);
9888 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
9889 cmd->supported |= (SUPPORTED_1000baseT_Half |
9890 SUPPORTED_1000baseT_Full);
9892 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
9893 cmd->supported |= (SUPPORTED_100baseT_Half |
9894 SUPPORTED_100baseT_Full |
9895 SUPPORTED_10baseT_Half |
9896 SUPPORTED_10baseT_Full |
9897 SUPPORTED_TP);
9898 cmd->port = PORT_TP;
9899 } else {
9900 cmd->supported |= SUPPORTED_FIBRE;
9901 cmd->port = PORT_FIBRE;
9904 cmd->advertising = tp->link_config.advertising;
9905 if (netif_running(dev)) {
9906 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
9907 cmd->duplex = tp->link_config.active_duplex;
9908 } else {
9909 ethtool_cmd_speed_set(cmd, SPEED_INVALID);
9910 cmd->duplex = DUPLEX_INVALID;
9912 cmd->phy_address = tp->phy_addr;
9913 cmd->transceiver = XCVR_INTERNAL;
9914 cmd->autoneg = tp->link_config.autoneg;
9915 cmd->maxtxpkt = 0;
9916 cmd->maxrxpkt = 0;
9917 return 0;
9920 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9922 struct tg3 *tp = netdev_priv(dev);
9923 u32 speed = ethtool_cmd_speed(cmd);
9925 if (tg3_flag(tp, USE_PHYLIB)) {
9926 struct phy_device *phydev;
9927 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
9928 return -EAGAIN;
9929 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9930 return phy_ethtool_sset(phydev, cmd);
9933 if (cmd->autoneg != AUTONEG_ENABLE &&
9934 cmd->autoneg != AUTONEG_DISABLE)
9935 return -EINVAL;
9937 if (cmd->autoneg == AUTONEG_DISABLE &&
9938 cmd->duplex != DUPLEX_FULL &&
9939 cmd->duplex != DUPLEX_HALF)
9940 return -EINVAL;
9942 if (cmd->autoneg == AUTONEG_ENABLE) {
9943 u32 mask = ADVERTISED_Autoneg |
9944 ADVERTISED_Pause |
9945 ADVERTISED_Asym_Pause;
9947 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
9948 mask |= ADVERTISED_1000baseT_Half |
9949 ADVERTISED_1000baseT_Full;
9951 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
9952 mask |= ADVERTISED_100baseT_Half |
9953 ADVERTISED_100baseT_Full |
9954 ADVERTISED_10baseT_Half |
9955 ADVERTISED_10baseT_Full |
9956 ADVERTISED_TP;
9957 else
9958 mask |= ADVERTISED_FIBRE;
9960 if (cmd->advertising & ~mask)
9961 return -EINVAL;
9963 mask &= (ADVERTISED_1000baseT_Half |
9964 ADVERTISED_1000baseT_Full |
9965 ADVERTISED_100baseT_Half |
9966 ADVERTISED_100baseT_Full |
9967 ADVERTISED_10baseT_Half |
9968 ADVERTISED_10baseT_Full);
9970 cmd->advertising &= mask;
9971 } else {
9972 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
9973 if (speed != SPEED_1000)
9974 return -EINVAL;
9976 if (cmd->duplex != DUPLEX_FULL)
9977 return -EINVAL;
9978 } else {
9979 if (speed != SPEED_100 &&
9980 speed != SPEED_10)
9981 return -EINVAL;
9985 tg3_full_lock(tp, 0);
9987 tp->link_config.autoneg = cmd->autoneg;
9988 if (cmd->autoneg == AUTONEG_ENABLE) {
9989 tp->link_config.advertising = (cmd->advertising |
9990 ADVERTISED_Autoneg);
9991 tp->link_config.speed = SPEED_INVALID;
9992 tp->link_config.duplex = DUPLEX_INVALID;
9993 } else {
9994 tp->link_config.advertising = 0;
9995 tp->link_config.speed = speed;
9996 tp->link_config.duplex = cmd->duplex;
9999 tp->link_config.orig_speed = tp->link_config.speed;
10000 tp->link_config.orig_duplex = tp->link_config.duplex;
10001 tp->link_config.orig_autoneg = tp->link_config.autoneg;
10003 if (netif_running(dev))
10004 tg3_setup_phy(tp, 1);
10006 tg3_full_unlock(tp);
10008 return 0;
10011 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10013 struct tg3 *tp = netdev_priv(dev);
10015 strcpy(info->driver, DRV_MODULE_NAME);
10016 strcpy(info->version, DRV_MODULE_VERSION);
10017 strcpy(info->fw_version, tp->fw_ver);
10018 strcpy(info->bus_info, pci_name(tp->pdev));
10021 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10023 struct tg3 *tp = netdev_priv(dev);
10025 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10026 wol->supported = WAKE_MAGIC;
10027 else
10028 wol->supported = 0;
10029 wol->wolopts = 0;
10030 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10031 wol->wolopts = WAKE_MAGIC;
10032 memset(&wol->sopass, 0, sizeof(wol->sopass));
10035 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10037 struct tg3 *tp = netdev_priv(dev);
10038 struct device *dp = &tp->pdev->dev;
10040 if (wol->wolopts & ~WAKE_MAGIC)
10041 return -EINVAL;
10042 if ((wol->wolopts & WAKE_MAGIC) &&
10043 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10044 return -EINVAL;
10046 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10048 spin_lock_bh(&tp->lock);
10049 if (device_may_wakeup(dp))
10050 tg3_flag_set(tp, WOL_ENABLE);
10051 else
10052 tg3_flag_clear(tp, WOL_ENABLE);
10053 spin_unlock_bh(&tp->lock);
10055 return 0;
10058 static u32 tg3_get_msglevel(struct net_device *dev)
10060 struct tg3 *tp = netdev_priv(dev);
10061 return tp->msg_enable;
10064 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10066 struct tg3 *tp = netdev_priv(dev);
10067 tp->msg_enable = value;
10070 static int tg3_nway_reset(struct net_device *dev)
10072 struct tg3 *tp = netdev_priv(dev);
10073 int r;
10075 if (!netif_running(dev))
10076 return -EAGAIN;
10078 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10079 return -EINVAL;
10081 if (tg3_flag(tp, USE_PHYLIB)) {
10082 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10083 return -EAGAIN;
10084 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10085 } else {
10086 u32 bmcr;
10088 spin_lock_bh(&tp->lock);
10089 r = -EINVAL;
10090 tg3_readphy(tp, MII_BMCR, &bmcr);
10091 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10092 ((bmcr & BMCR_ANENABLE) ||
10093 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10094 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10095 BMCR_ANENABLE);
10096 r = 0;
10098 spin_unlock_bh(&tp->lock);
10101 return r;
10104 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10106 struct tg3 *tp = netdev_priv(dev);
10108 ering->rx_max_pending = tp->rx_std_ring_mask;
10109 ering->rx_mini_max_pending = 0;
10110 if (tg3_flag(tp, JUMBO_RING_ENABLE))
10111 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10112 else
10113 ering->rx_jumbo_max_pending = 0;
10115 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10117 ering->rx_pending = tp->rx_pending;
10118 ering->rx_mini_pending = 0;
10119 if (tg3_flag(tp, JUMBO_RING_ENABLE))
10120 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10121 else
10122 ering->rx_jumbo_pending = 0;
10124 ering->tx_pending = tp->napi[0].tx_pending;
10127 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10129 struct tg3 *tp = netdev_priv(dev);
10130 int i, irq_sync = 0, err = 0;
10132 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10133 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10134 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10135 (ering->tx_pending <= MAX_SKB_FRAGS) ||
10136 (tg3_flag(tp, TSO_BUG) &&
10137 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10138 return -EINVAL;
10140 if (netif_running(dev)) {
10141 tg3_phy_stop(tp);
10142 tg3_netif_stop(tp);
10143 irq_sync = 1;
10146 tg3_full_lock(tp, irq_sync);
10148 tp->rx_pending = ering->rx_pending;
10150 if (tg3_flag(tp, MAX_RXPEND_64) &&
10151 tp->rx_pending > 63)
10152 tp->rx_pending = 63;
10153 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10155 for (i = 0; i < tp->irq_max; i++)
10156 tp->napi[i].tx_pending = ering->tx_pending;
10158 if (netif_running(dev)) {
10159 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10160 err = tg3_restart_hw(tp, 1);
10161 if (!err)
10162 tg3_netif_start(tp);
10165 tg3_full_unlock(tp);
10167 if (irq_sync && !err)
10168 tg3_phy_start(tp);
10170 return err;
10173 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10175 struct tg3 *tp = netdev_priv(dev);
10177 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10179 if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
10180 epause->rx_pause = 1;
10181 else
10182 epause->rx_pause = 0;
10184 if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
10185 epause->tx_pause = 1;
10186 else
10187 epause->tx_pause = 0;
10190 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10192 struct tg3 *tp = netdev_priv(dev);
10193 int err = 0;
10195 if (tg3_flag(tp, USE_PHYLIB)) {
10196 u32 newadv;
10197 struct phy_device *phydev;
10199 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10201 if (!(phydev->supported & SUPPORTED_Pause) ||
10202 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10203 (epause->rx_pause != epause->tx_pause)))
10204 return -EINVAL;
10206 tp->link_config.flowctrl = 0;
10207 if (epause->rx_pause) {
10208 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10210 if (epause->tx_pause) {
10211 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10212 newadv = ADVERTISED_Pause;
10213 } else
10214 newadv = ADVERTISED_Pause |
10215 ADVERTISED_Asym_Pause;
10216 } else if (epause->tx_pause) {
10217 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10218 newadv = ADVERTISED_Asym_Pause;
10219 } else
10220 newadv = 0;
10222 if (epause->autoneg)
10223 tg3_flag_set(tp, PAUSE_AUTONEG);
10224 else
10225 tg3_flag_clear(tp, PAUSE_AUTONEG);
10227 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10228 u32 oldadv = phydev->advertising &
10229 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10230 if (oldadv != newadv) {
10231 phydev->advertising &=
10232 ~(ADVERTISED_Pause |
10233 ADVERTISED_Asym_Pause);
10234 phydev->advertising |= newadv;
10235 if (phydev->autoneg) {
10237 * Always renegotiate the link to
10238 * inform our link partner of our
10239 * flow control settings, even if the
10240 * flow control is forced. Let
10241 * tg3_adjust_link() do the final
10242 * flow control setup.
10244 return phy_start_aneg(phydev);
10248 if (!epause->autoneg)
10249 tg3_setup_flow_control(tp, 0, 0);
10250 } else {
10251 tp->link_config.orig_advertising &=
10252 ~(ADVERTISED_Pause |
10253 ADVERTISED_Asym_Pause);
10254 tp->link_config.orig_advertising |= newadv;
10256 } else {
10257 int irq_sync = 0;
10259 if (netif_running(dev)) {
10260 tg3_netif_stop(tp);
10261 irq_sync = 1;
10264 tg3_full_lock(tp, irq_sync);
10266 if (epause->autoneg)
10267 tg3_flag_set(tp, PAUSE_AUTONEG);
10268 else
10269 tg3_flag_clear(tp, PAUSE_AUTONEG);
10270 if (epause->rx_pause)
10271 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10272 else
10273 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10274 if (epause->tx_pause)
10275 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10276 else
10277 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10279 if (netif_running(dev)) {
10280 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10281 err = tg3_restart_hw(tp, 1);
10282 if (!err)
10283 tg3_netif_start(tp);
10286 tg3_full_unlock(tp);
10289 return err;
10292 static int tg3_get_sset_count(struct net_device *dev, int sset)
10294 switch (sset) {
10295 case ETH_SS_TEST:
10296 return TG3_NUM_TEST;
10297 case ETH_SS_STATS:
10298 return TG3_NUM_STATS;
10299 default:
10300 return -EOPNOTSUPP;
10304 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10306 switch (stringset) {
10307 case ETH_SS_STATS:
10308 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
10309 break;
10310 case ETH_SS_TEST:
10311 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
10312 break;
10313 default:
10314 WARN_ON(1); /* we need a WARN() */
10315 break;
10319 static int tg3_set_phys_id(struct net_device *dev,
10320 enum ethtool_phys_id_state state)
10322 struct tg3 *tp = netdev_priv(dev);
10324 if (!netif_running(tp->dev))
10325 return -EAGAIN;
10327 switch (state) {
10328 case ETHTOOL_ID_ACTIVE:
10329 return 1; /* cycle on/off once per second */
10331 case ETHTOOL_ID_ON:
10332 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10333 LED_CTRL_1000MBPS_ON |
10334 LED_CTRL_100MBPS_ON |
10335 LED_CTRL_10MBPS_ON |
10336 LED_CTRL_TRAFFIC_OVERRIDE |
10337 LED_CTRL_TRAFFIC_BLINK |
10338 LED_CTRL_TRAFFIC_LED);
10339 break;
10341 case ETHTOOL_ID_OFF:
10342 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10343 LED_CTRL_TRAFFIC_OVERRIDE);
10344 break;
10346 case ETHTOOL_ID_INACTIVE:
10347 tw32(MAC_LED_CTRL, tp->led_ctrl);
10348 break;
10351 return 0;
10354 static void tg3_get_ethtool_stats(struct net_device *dev,
10355 struct ethtool_stats *estats, u64 *tmp_stats)
10357 struct tg3 *tp = netdev_priv(dev);
10358 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
10361 static __be32 * tg3_vpd_readblock(struct tg3 *tp)
10363 int i;
10364 __be32 *buf;
10365 u32 offset = 0, len = 0;
10366 u32 magic, val;
10368 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
10369 return NULL;
10371 if (magic == TG3_EEPROM_MAGIC) {
10372 for (offset = TG3_NVM_DIR_START;
10373 offset < TG3_NVM_DIR_END;
10374 offset += TG3_NVM_DIRENT_SIZE) {
10375 if (tg3_nvram_read(tp, offset, &val))
10376 return NULL;
10378 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
10379 TG3_NVM_DIRTYPE_EXTVPD)
10380 break;
10383 if (offset != TG3_NVM_DIR_END) {
10384 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
10385 if (tg3_nvram_read(tp, offset + 4, &offset))
10386 return NULL;
10388 offset = tg3_nvram_logical_addr(tp, offset);
10392 if (!offset || !len) {
10393 offset = TG3_NVM_VPD_OFF;
10394 len = TG3_NVM_VPD_LEN;
10397 buf = kmalloc(len, GFP_KERNEL);
10398 if (buf == NULL)
10399 return NULL;
10401 if (magic == TG3_EEPROM_MAGIC) {
10402 for (i = 0; i < len; i += 4) {
10403 /* The data is in little-endian format in NVRAM.
10404 * Use the big-endian read routines to preserve
10405 * the byte order as it exists in NVRAM.
10407 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
10408 goto error;
10410 } else {
10411 u8 *ptr;
10412 ssize_t cnt;
10413 unsigned int pos = 0;
10415 ptr = (u8 *)&buf[0];
10416 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
10417 cnt = pci_read_vpd(tp->pdev, pos,
10418 len - pos, ptr);
10419 if (cnt == -ETIMEDOUT || cnt == -EINTR)
10420 cnt = 0;
10421 else if (cnt < 0)
10422 goto error;
10424 if (pos != len)
10425 goto error;
10428 return buf;
10430 error:
10431 kfree(buf);
10432 return NULL;
10435 #define NVRAM_TEST_SIZE 0x100
10436 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
10437 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
10438 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
10439 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10440 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10442 static int tg3_test_nvram(struct tg3 *tp)
10444 u32 csum, magic;
10445 __be32 *buf;
10446 int i, j, k, err = 0, size;
10448 if (tg3_flag(tp, NO_NVRAM))
10449 return 0;
10451 if (tg3_nvram_read(tp, 0, &magic) != 0)
10452 return -EIO;
10454 if (magic == TG3_EEPROM_MAGIC)
10455 size = NVRAM_TEST_SIZE;
10456 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10457 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10458 TG3_EEPROM_SB_FORMAT_1) {
10459 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10460 case TG3_EEPROM_SB_REVISION_0:
10461 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10462 break;
10463 case TG3_EEPROM_SB_REVISION_2:
10464 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10465 break;
10466 case TG3_EEPROM_SB_REVISION_3:
10467 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10468 break;
10469 default:
10470 return 0;
10472 } else
10473 return 0;
10474 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10475 size = NVRAM_SELFBOOT_HW_SIZE;
10476 else
10477 return -EIO;
10479 buf = kmalloc(size, GFP_KERNEL);
10480 if (buf == NULL)
10481 return -ENOMEM;
10483 err = -EIO;
10484 for (i = 0, j = 0; i < size; i += 4, j++) {
10485 err = tg3_nvram_read_be32(tp, i, &buf[j]);
10486 if (err)
10487 break;
10489 if (i < size)
10490 goto out;
10492 /* Selfboot format */
10493 magic = be32_to_cpu(buf[0]);
10494 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
10495 TG3_EEPROM_MAGIC_FW) {
10496 u8 *buf8 = (u8 *) buf, csum8 = 0;
10498 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
10499 TG3_EEPROM_SB_REVISION_2) {
10500 /* For rev 2, the csum doesn't include the MBA. */
10501 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
10502 csum8 += buf8[i];
10503 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
10504 csum8 += buf8[i];
10505 } else {
10506 for (i = 0; i < size; i++)
10507 csum8 += buf8[i];
10510 if (csum8 == 0) {
10511 err = 0;
10512 goto out;
10515 err = -EIO;
10516 goto out;
10519 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
10520 TG3_EEPROM_MAGIC_HW) {
10521 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
10522 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
10523 u8 *buf8 = (u8 *) buf;
10525 /* Separate the parity bits and the data bytes. */
10526 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
10527 if ((i == 0) || (i == 8)) {
10528 int l;
10529 u8 msk;
10531 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
10532 parity[k++] = buf8[i] & msk;
10533 i++;
10534 } else if (i == 16) {
10535 int l;
10536 u8 msk;
10538 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
10539 parity[k++] = buf8[i] & msk;
10540 i++;
10542 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
10543 parity[k++] = buf8[i] & msk;
10544 i++;
10546 data[j++] = buf8[i];
10549 err = -EIO;
10550 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
10551 u8 hw8 = hweight8(data[i]);
10553 if ((hw8 & 0x1) && parity[i])
10554 goto out;
10555 else if (!(hw8 & 0x1) && !parity[i])
10556 goto out;
10558 err = 0;
10559 goto out;
10562 err = -EIO;
10564 /* Bootstrap checksum at offset 0x10 */
10565 csum = calc_crc((unsigned char *) buf, 0x10);
10566 if (csum != le32_to_cpu(buf[0x10/4]))
10567 goto out;
10569 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10570 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
10571 if (csum != le32_to_cpu(buf[0xfc/4]))
10572 goto out;
10574 kfree(buf);
10576 buf = tg3_vpd_readblock(tp);
10577 if (!buf)
10578 return -ENOMEM;
10580 i = pci_vpd_find_tag((u8 *)buf, 0, TG3_NVM_VPD_LEN,
10581 PCI_VPD_LRDT_RO_DATA);
10582 if (i > 0) {
10583 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
10584 if (j < 0)
10585 goto out;
10587 if (i + PCI_VPD_LRDT_TAG_SIZE + j > TG3_NVM_VPD_LEN)
10588 goto out;
10590 i += PCI_VPD_LRDT_TAG_SIZE;
10591 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
10592 PCI_VPD_RO_KEYWORD_CHKSUM);
10593 if (j > 0) {
10594 u8 csum8 = 0;
10596 j += PCI_VPD_INFO_FLD_HDR_SIZE;
10598 for (i = 0; i <= j; i++)
10599 csum8 += ((u8 *)buf)[i];
10601 if (csum8)
10602 goto out;
10606 err = 0;
10608 out:
10609 kfree(buf);
10610 return err;
10613 #define TG3_SERDES_TIMEOUT_SEC 2
10614 #define TG3_COPPER_TIMEOUT_SEC 6
10616 static int tg3_test_link(struct tg3 *tp)
10618 int i, max;
10620 if (!netif_running(tp->dev))
10621 return -ENODEV;
10623 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
10624 max = TG3_SERDES_TIMEOUT_SEC;
10625 else
10626 max = TG3_COPPER_TIMEOUT_SEC;
10628 for (i = 0; i < max; i++) {
10629 if (netif_carrier_ok(tp->dev))
10630 return 0;
10632 if (msleep_interruptible(1000))
10633 break;
10636 return -EIO;
10639 /* Only test the commonly used registers */
10640 static int tg3_test_registers(struct tg3 *tp)
10642 int i, is_5705, is_5750;
10643 u32 offset, read_mask, write_mask, val, save_val, read_val;
10644 static struct {
10645 u16 offset;
10646 u16 flags;
10647 #define TG3_FL_5705 0x1
10648 #define TG3_FL_NOT_5705 0x2
10649 #define TG3_FL_NOT_5788 0x4
10650 #define TG3_FL_NOT_5750 0x8
10651 u32 read_mask;
10652 u32 write_mask;
10653 } reg_tbl[] = {
10654 /* MAC Control Registers */
10655 { MAC_MODE, TG3_FL_NOT_5705,
10656 0x00000000, 0x00ef6f8c },
10657 { MAC_MODE, TG3_FL_5705,
10658 0x00000000, 0x01ef6b8c },
10659 { MAC_STATUS, TG3_FL_NOT_5705,
10660 0x03800107, 0x00000000 },
10661 { MAC_STATUS, TG3_FL_5705,
10662 0x03800100, 0x00000000 },
10663 { MAC_ADDR_0_HIGH, 0x0000,
10664 0x00000000, 0x0000ffff },
10665 { MAC_ADDR_0_LOW, 0x0000,
10666 0x00000000, 0xffffffff },
10667 { MAC_RX_MTU_SIZE, 0x0000,
10668 0x00000000, 0x0000ffff },
10669 { MAC_TX_MODE, 0x0000,
10670 0x00000000, 0x00000070 },
10671 { MAC_TX_LENGTHS, 0x0000,
10672 0x00000000, 0x00003fff },
10673 { MAC_RX_MODE, TG3_FL_NOT_5705,
10674 0x00000000, 0x000007fc },
10675 { MAC_RX_MODE, TG3_FL_5705,
10676 0x00000000, 0x000007dc },
10677 { MAC_HASH_REG_0, 0x0000,
10678 0x00000000, 0xffffffff },
10679 { MAC_HASH_REG_1, 0x0000,
10680 0x00000000, 0xffffffff },
10681 { MAC_HASH_REG_2, 0x0000,
10682 0x00000000, 0xffffffff },
10683 { MAC_HASH_REG_3, 0x0000,
10684 0x00000000, 0xffffffff },
10686 /* Receive Data and Receive BD Initiator Control Registers. */
10687 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
10688 0x00000000, 0xffffffff },
10689 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
10690 0x00000000, 0xffffffff },
10691 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
10692 0x00000000, 0x00000003 },
10693 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
10694 0x00000000, 0xffffffff },
10695 { RCVDBDI_STD_BD+0, 0x0000,
10696 0x00000000, 0xffffffff },
10697 { RCVDBDI_STD_BD+4, 0x0000,
10698 0x00000000, 0xffffffff },
10699 { RCVDBDI_STD_BD+8, 0x0000,
10700 0x00000000, 0xffff0002 },
10701 { RCVDBDI_STD_BD+0xc, 0x0000,
10702 0x00000000, 0xffffffff },
10704 /* Receive BD Initiator Control Registers. */
10705 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
10706 0x00000000, 0xffffffff },
10707 { RCVBDI_STD_THRESH, TG3_FL_5705,
10708 0x00000000, 0x000003ff },
10709 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
10710 0x00000000, 0xffffffff },
10712 /* Host Coalescing Control Registers. */
10713 { HOSTCC_MODE, TG3_FL_NOT_5705,
10714 0x00000000, 0x00000004 },
10715 { HOSTCC_MODE, TG3_FL_5705,
10716 0x00000000, 0x000000f6 },
10717 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
10718 0x00000000, 0xffffffff },
10719 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
10720 0x00000000, 0x000003ff },
10721 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
10722 0x00000000, 0xffffffff },
10723 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
10724 0x00000000, 0x000003ff },
10725 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
10726 0x00000000, 0xffffffff },
10727 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10728 0x00000000, 0x000000ff },
10729 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
10730 0x00000000, 0xffffffff },
10731 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10732 0x00000000, 0x000000ff },
10733 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
10734 0x00000000, 0xffffffff },
10735 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
10736 0x00000000, 0xffffffff },
10737 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10738 0x00000000, 0xffffffff },
10739 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10740 0x00000000, 0x000000ff },
10741 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10742 0x00000000, 0xffffffff },
10743 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10744 0x00000000, 0x000000ff },
10745 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
10746 0x00000000, 0xffffffff },
10747 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
10748 0x00000000, 0xffffffff },
10749 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
10750 0x00000000, 0xffffffff },
10751 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
10752 0x00000000, 0xffffffff },
10753 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
10754 0x00000000, 0xffffffff },
10755 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
10756 0xffffffff, 0x00000000 },
10757 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
10758 0xffffffff, 0x00000000 },
10760 /* Buffer Manager Control Registers. */
10761 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
10762 0x00000000, 0x007fff80 },
10763 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
10764 0x00000000, 0x007fffff },
10765 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
10766 0x00000000, 0x0000003f },
10767 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
10768 0x00000000, 0x000001ff },
10769 { BUFMGR_MB_HIGH_WATER, 0x0000,
10770 0x00000000, 0x000001ff },
10771 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
10772 0xffffffff, 0x00000000 },
10773 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
10774 0xffffffff, 0x00000000 },
10776 /* Mailbox Registers */
10777 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
10778 0x00000000, 0x000001ff },
10779 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
10780 0x00000000, 0x000001ff },
10781 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
10782 0x00000000, 0x000007ff },
10783 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
10784 0x00000000, 0x000001ff },
10786 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
10789 is_5705 = is_5750 = 0;
10790 if (tg3_flag(tp, 5705_PLUS)) {
10791 is_5705 = 1;
10792 if (tg3_flag(tp, 5750_PLUS))
10793 is_5750 = 1;
10796 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
10797 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
10798 continue;
10800 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
10801 continue;
10803 if (tg3_flag(tp, IS_5788) &&
10804 (reg_tbl[i].flags & TG3_FL_NOT_5788))
10805 continue;
10807 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
10808 continue;
10810 offset = (u32) reg_tbl[i].offset;
10811 read_mask = reg_tbl[i].read_mask;
10812 write_mask = reg_tbl[i].write_mask;
10814 /* Save the original register content */
10815 save_val = tr32(offset);
10817 /* Determine the read-only value. */
10818 read_val = save_val & read_mask;
10820 /* Write zero to the register, then make sure the read-only bits
10821 * are not changed and the read/write bits are all zeros.
10823 tw32(offset, 0);
10825 val = tr32(offset);
10827 /* Test the read-only and read/write bits. */
10828 if (((val & read_mask) != read_val) || (val & write_mask))
10829 goto out;
10831 /* Write ones to all the bits defined by RdMask and WrMask, then
10832 * make sure the read-only bits are not changed and the
10833 * read/write bits are all ones.
10835 tw32(offset, read_mask | write_mask);
10837 val = tr32(offset);
10839 /* Test the read-only bits. */
10840 if ((val & read_mask) != read_val)
10841 goto out;
10843 /* Test the read/write bits. */
10844 if ((val & write_mask) != write_mask)
10845 goto out;
10847 tw32(offset, save_val);
10850 return 0;
10852 out:
10853 if (netif_msg_hw(tp))
10854 netdev_err(tp->dev,
10855 "Register test failed at offset %x\n", offset);
10856 tw32(offset, save_val);
10857 return -EIO;
10860 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
10862 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
10863 int i;
10864 u32 j;
10866 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
10867 for (j = 0; j < len; j += 4) {
10868 u32 val;
10870 tg3_write_mem(tp, offset + j, test_pattern[i]);
10871 tg3_read_mem(tp, offset + j, &val);
10872 if (val != test_pattern[i])
10873 return -EIO;
10876 return 0;
10879 static int tg3_test_memory(struct tg3 *tp)
10881 static struct mem_entry {
10882 u32 offset;
10883 u32 len;
10884 } mem_tbl_570x[] = {
10885 { 0x00000000, 0x00b50},
10886 { 0x00002000, 0x1c000},
10887 { 0xffffffff, 0x00000}
10888 }, mem_tbl_5705[] = {
10889 { 0x00000100, 0x0000c},
10890 { 0x00000200, 0x00008},
10891 { 0x00004000, 0x00800},
10892 { 0x00006000, 0x01000},
10893 { 0x00008000, 0x02000},
10894 { 0x00010000, 0x0e000},
10895 { 0xffffffff, 0x00000}
10896 }, mem_tbl_5755[] = {
10897 { 0x00000200, 0x00008},
10898 { 0x00004000, 0x00800},
10899 { 0x00006000, 0x00800},
10900 { 0x00008000, 0x02000},
10901 { 0x00010000, 0x0c000},
10902 { 0xffffffff, 0x00000}
10903 }, mem_tbl_5906[] = {
10904 { 0x00000200, 0x00008},
10905 { 0x00004000, 0x00400},
10906 { 0x00006000, 0x00400},
10907 { 0x00008000, 0x01000},
10908 { 0x00010000, 0x01000},
10909 { 0xffffffff, 0x00000}
10910 }, mem_tbl_5717[] = {
10911 { 0x00000200, 0x00008},
10912 { 0x00010000, 0x0a000},
10913 { 0x00020000, 0x13c00},
10914 { 0xffffffff, 0x00000}
10915 }, mem_tbl_57765[] = {
10916 { 0x00000200, 0x00008},
10917 { 0x00004000, 0x00800},
10918 { 0x00006000, 0x09800},
10919 { 0x00010000, 0x0a000},
10920 { 0xffffffff, 0x00000}
10922 struct mem_entry *mem_tbl;
10923 int err = 0;
10924 int i;
10926 if (tg3_flag(tp, 5717_PLUS))
10927 mem_tbl = mem_tbl_5717;
10928 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
10929 mem_tbl = mem_tbl_57765;
10930 else if (tg3_flag(tp, 5755_PLUS))
10931 mem_tbl = mem_tbl_5755;
10932 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10933 mem_tbl = mem_tbl_5906;
10934 else if (tg3_flag(tp, 5705_PLUS))
10935 mem_tbl = mem_tbl_5705;
10936 else
10937 mem_tbl = mem_tbl_570x;
10939 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
10940 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
10941 if (err)
10942 break;
10945 return err;
10948 #define TG3_MAC_LOOPBACK 0
10949 #define TG3_PHY_LOOPBACK 1
10950 #define TG3_TSO_LOOPBACK 2
10952 #define TG3_TSO_MSS 500
10954 #define TG3_TSO_IP_HDR_LEN 20
10955 #define TG3_TSO_TCP_HDR_LEN 20
10956 #define TG3_TSO_TCP_OPT_LEN 12
10958 static const u8 tg3_tso_header[] = {
10959 0x08, 0x00,
10960 0x45, 0x00, 0x00, 0x00,
10961 0x00, 0x00, 0x40, 0x00,
10962 0x40, 0x06, 0x00, 0x00,
10963 0x0a, 0x00, 0x00, 0x01,
10964 0x0a, 0x00, 0x00, 0x02,
10965 0x0d, 0x00, 0xe0, 0x00,
10966 0x00, 0x00, 0x01, 0x00,
10967 0x00, 0x00, 0x02, 0x00,
10968 0x80, 0x10, 0x10, 0x00,
10969 0x14, 0x09, 0x00, 0x00,
10970 0x01, 0x01, 0x08, 0x0a,
10971 0x11, 0x11, 0x11, 0x11,
10972 0x11, 0x11, 0x11, 0x11,
10975 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, int loopback_mode)
10977 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
10978 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
10979 struct sk_buff *skb, *rx_skb;
10980 u8 *tx_data;
10981 dma_addr_t map;
10982 int num_pkts, tx_len, rx_len, i, err;
10983 struct tg3_rx_buffer_desc *desc;
10984 struct tg3_napi *tnapi, *rnapi;
10985 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
10987 tnapi = &tp->napi[0];
10988 rnapi = &tp->napi[0];
10989 if (tp->irq_cnt > 1) {
10990 if (tg3_flag(tp, ENABLE_RSS))
10991 rnapi = &tp->napi[1];
10992 if (tg3_flag(tp, ENABLE_TSS))
10993 tnapi = &tp->napi[1];
10995 coal_now = tnapi->coal_now | rnapi->coal_now;
10997 if (loopback_mode == TG3_MAC_LOOPBACK) {
10998 /* HW errata - mac loopback fails in some cases on 5780.
10999 * Normal traffic and PHY loopback are not affected by
11000 * errata. Also, the MAC loopback test is deprecated for
11001 * all newer ASIC revisions.
11003 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
11004 tg3_flag(tp, CPMU_PRESENT))
11005 return 0;
11007 mac_mode = tp->mac_mode &
11008 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11009 mac_mode |= MAC_MODE_PORT_INT_LPBACK;
11010 if (!tg3_flag(tp, 5705_PLUS))
11011 mac_mode |= MAC_MODE_LINK_POLARITY;
11012 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
11013 mac_mode |= MAC_MODE_PORT_MODE_MII;
11014 else
11015 mac_mode |= MAC_MODE_PORT_MODE_GMII;
11016 tw32(MAC_MODE, mac_mode);
11017 } else {
11018 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
11019 tg3_phy_fet_toggle_apd(tp, false);
11020 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
11021 } else
11022 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
11024 tg3_phy_toggle_automdix(tp, 0);
11026 tg3_writephy(tp, MII_BMCR, val);
11027 udelay(40);
11029 mac_mode = tp->mac_mode &
11030 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11031 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
11032 tg3_writephy(tp, MII_TG3_FET_PTEST,
11033 MII_TG3_FET_PTEST_FRC_TX_LINK |
11034 MII_TG3_FET_PTEST_FRC_TX_LOCK);
11035 /* The write needs to be flushed for the AC131 */
11036 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11037 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
11038 mac_mode |= MAC_MODE_PORT_MODE_MII;
11039 } else
11040 mac_mode |= MAC_MODE_PORT_MODE_GMII;
11042 /* reset to prevent losing 1st rx packet intermittently */
11043 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
11044 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
11045 udelay(10);
11046 tw32_f(MAC_RX_MODE, tp->rx_mode);
11048 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
11049 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
11050 if (masked_phy_id == TG3_PHY_ID_BCM5401)
11051 mac_mode &= ~MAC_MODE_LINK_POLARITY;
11052 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
11053 mac_mode |= MAC_MODE_LINK_POLARITY;
11054 tg3_writephy(tp, MII_TG3_EXT_CTRL,
11055 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
11057 tw32(MAC_MODE, mac_mode);
11059 /* Wait for link */
11060 for (i = 0; i < 100; i++) {
11061 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11062 break;
11063 mdelay(1);
11067 err = -EIO;
11069 tx_len = pktsz;
11070 skb = netdev_alloc_skb(tp->dev, tx_len);
11071 if (!skb)
11072 return -ENOMEM;
11074 tx_data = skb_put(skb, tx_len);
11075 memcpy(tx_data, tp->dev->dev_addr, 6);
11076 memset(tx_data + 6, 0x0, 8);
11078 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11080 if (loopback_mode == TG3_TSO_LOOPBACK) {
11081 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11083 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11084 TG3_TSO_TCP_OPT_LEN;
11086 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11087 sizeof(tg3_tso_header));
11088 mss = TG3_TSO_MSS;
11090 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11091 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11093 /* Set the total length field in the IP header */
11094 iph->tot_len = htons((u16)(mss + hdr_len));
11096 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11097 TXD_FLAG_CPU_POST_DMA);
11099 if (tg3_flag(tp, HW_TSO_1) ||
11100 tg3_flag(tp, HW_TSO_2) ||
11101 tg3_flag(tp, HW_TSO_3)) {
11102 struct tcphdr *th;
11103 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11104 th = (struct tcphdr *)&tx_data[val];
11105 th->check = 0;
11106 } else
11107 base_flags |= TXD_FLAG_TCPUDP_CSUM;
11109 if (tg3_flag(tp, HW_TSO_3)) {
11110 mss |= (hdr_len & 0xc) << 12;
11111 if (hdr_len & 0x10)
11112 base_flags |= 0x00000010;
11113 base_flags |= (hdr_len & 0x3e0) << 5;
11114 } else if (tg3_flag(tp, HW_TSO_2))
11115 mss |= hdr_len << 9;
11116 else if (tg3_flag(tp, HW_TSO_1) ||
11117 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11118 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11119 } else {
11120 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11123 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11124 } else {
11125 num_pkts = 1;
11126 data_off = ETH_HLEN;
11129 for (i = data_off; i < tx_len; i++)
11130 tx_data[i] = (u8) (i & 0xff);
11132 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11133 if (pci_dma_mapping_error(tp->pdev, map)) {
11134 dev_kfree_skb(skb);
11135 return -EIO;
11138 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11139 rnapi->coal_now);
11141 udelay(10);
11143 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11145 tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len,
11146 base_flags, (mss << 1) | 1);
11148 tnapi->tx_prod++;
11150 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11151 tr32_mailbox(tnapi->prodmbox);
11153 udelay(10);
11155 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
11156 for (i = 0; i < 35; i++) {
11157 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11158 coal_now);
11160 udelay(10);
11162 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11163 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11164 if ((tx_idx == tnapi->tx_prod) &&
11165 (rx_idx == (rx_start_idx + num_pkts)))
11166 break;
11169 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
11170 dev_kfree_skb(skb);
11172 if (tx_idx != tnapi->tx_prod)
11173 goto out;
11175 if (rx_idx != rx_start_idx + num_pkts)
11176 goto out;
11178 val = data_off;
11179 while (rx_idx != rx_start_idx) {
11180 desc = &rnapi->rx_rcb[rx_start_idx++];
11181 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11182 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11184 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11185 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11186 goto out;
11188 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11189 - ETH_FCS_LEN;
11191 if (loopback_mode != TG3_TSO_LOOPBACK) {
11192 if (rx_len != tx_len)
11193 goto out;
11195 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11196 if (opaque_key != RXD_OPAQUE_RING_STD)
11197 goto out;
11198 } else {
11199 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11200 goto out;
11202 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11203 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11204 >> RXD_TCPCSUM_SHIFT != 0xffff) {
11205 goto out;
11208 if (opaque_key == RXD_OPAQUE_RING_STD) {
11209 rx_skb = tpr->rx_std_buffers[desc_idx].skb;
11210 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11211 mapping);
11212 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11213 rx_skb = tpr->rx_jmb_buffers[desc_idx].skb;
11214 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11215 mapping);
11216 } else
11217 goto out;
11219 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11220 PCI_DMA_FROMDEVICE);
11222 for (i = data_off; i < rx_len; i++, val++) {
11223 if (*(rx_skb->data + i) != (u8) (val & 0xff))
11224 goto out;
11228 err = 0;
11230 /* tg3_free_rings will unmap and free the rx_skb */
11231 out:
11232 return err;
11235 #define TG3_STD_LOOPBACK_FAILED 1
11236 #define TG3_JMB_LOOPBACK_FAILED 2
11237 #define TG3_TSO_LOOPBACK_FAILED 4
11239 #define TG3_MAC_LOOPBACK_SHIFT 0
11240 #define TG3_PHY_LOOPBACK_SHIFT 4
11241 #define TG3_LOOPBACK_FAILED 0x00000077
11243 static int tg3_test_loopback(struct tg3 *tp)
11245 int err = 0;
11246 u32 eee_cap, cpmuctrl = 0;
11248 if (!netif_running(tp->dev))
11249 return TG3_LOOPBACK_FAILED;
11251 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11252 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11254 err = tg3_reset_hw(tp, 1);
11255 if (err) {
11256 err = TG3_LOOPBACK_FAILED;
11257 goto done;
11260 if (tg3_flag(tp, ENABLE_RSS)) {
11261 int i;
11263 /* Reroute all rx packets to the 1st queue */
11264 for (i = MAC_RSS_INDIR_TBL_0;
11265 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11266 tw32(i, 0x0);
11269 /* Turn off gphy autopowerdown. */
11270 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11271 tg3_phy_toggle_apd(tp, false);
11273 if (tg3_flag(tp, CPMU_PRESENT)) {
11274 int i;
11275 u32 status;
11277 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
11279 /* Wait for up to 40 microseconds to acquire lock. */
11280 for (i = 0; i < 4; i++) {
11281 status = tr32(TG3_CPMU_MUTEX_GNT);
11282 if (status == CPMU_MUTEX_GNT_DRIVER)
11283 break;
11284 udelay(10);
11287 if (status != CPMU_MUTEX_GNT_DRIVER) {
11288 err = TG3_LOOPBACK_FAILED;
11289 goto done;
11292 /* Turn off link-based power management. */
11293 cpmuctrl = tr32(TG3_CPMU_CTRL);
11294 tw32(TG3_CPMU_CTRL,
11295 cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
11296 CPMU_CTRL_LINK_AWARE_MODE));
11299 if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_MAC_LOOPBACK))
11300 err |= TG3_STD_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
11302 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11303 tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_MAC_LOOPBACK))
11304 err |= TG3_JMB_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
11306 if (tg3_flag(tp, CPMU_PRESENT)) {
11307 tw32(TG3_CPMU_CTRL, cpmuctrl);
11309 /* Release the mutex */
11310 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
11313 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11314 !tg3_flag(tp, USE_PHYLIB)) {
11315 if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_PHY_LOOPBACK))
11316 err |= TG3_STD_LOOPBACK_FAILED <<
11317 TG3_PHY_LOOPBACK_SHIFT;
11318 if (tg3_flag(tp, TSO_CAPABLE) &&
11319 tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_TSO_LOOPBACK))
11320 err |= TG3_TSO_LOOPBACK_FAILED <<
11321 TG3_PHY_LOOPBACK_SHIFT;
11322 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11323 tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_PHY_LOOPBACK))
11324 err |= TG3_JMB_LOOPBACK_FAILED <<
11325 TG3_PHY_LOOPBACK_SHIFT;
11328 /* Re-enable gphy autopowerdown. */
11329 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11330 tg3_phy_toggle_apd(tp, true);
11332 done:
11333 tp->phy_flags |= eee_cap;
11335 return err;
11338 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11339 u64 *data)
11341 struct tg3 *tp = netdev_priv(dev);
11343 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11344 tg3_power_up(tp);
11346 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11348 if (tg3_test_nvram(tp) != 0) {
11349 etest->flags |= ETH_TEST_FL_FAILED;
11350 data[0] = 1;
11352 if (tg3_test_link(tp) != 0) {
11353 etest->flags |= ETH_TEST_FL_FAILED;
11354 data[1] = 1;
11356 if (etest->flags & ETH_TEST_FL_OFFLINE) {
11357 int err, err2 = 0, irq_sync = 0;
11359 if (netif_running(dev)) {
11360 tg3_phy_stop(tp);
11361 tg3_netif_stop(tp);
11362 irq_sync = 1;
11365 tg3_full_lock(tp, irq_sync);
11367 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
11368 err = tg3_nvram_lock(tp);
11369 tg3_halt_cpu(tp, RX_CPU_BASE);
11370 if (!tg3_flag(tp, 5705_PLUS))
11371 tg3_halt_cpu(tp, TX_CPU_BASE);
11372 if (!err)
11373 tg3_nvram_unlock(tp);
11375 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
11376 tg3_phy_reset(tp);
11378 if (tg3_test_registers(tp) != 0) {
11379 etest->flags |= ETH_TEST_FL_FAILED;
11380 data[2] = 1;
11382 if (tg3_test_memory(tp) != 0) {
11383 etest->flags |= ETH_TEST_FL_FAILED;
11384 data[3] = 1;
11386 if ((data[4] = tg3_test_loopback(tp)) != 0)
11387 etest->flags |= ETH_TEST_FL_FAILED;
11389 tg3_full_unlock(tp);
11391 if (tg3_test_interrupt(tp) != 0) {
11392 etest->flags |= ETH_TEST_FL_FAILED;
11393 data[5] = 1;
11396 tg3_full_lock(tp, 0);
11398 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11399 if (netif_running(dev)) {
11400 tg3_flag_set(tp, INIT_COMPLETE);
11401 err2 = tg3_restart_hw(tp, 1);
11402 if (!err2)
11403 tg3_netif_start(tp);
11406 tg3_full_unlock(tp);
11408 if (irq_sync && !err2)
11409 tg3_phy_start(tp);
11411 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11412 tg3_power_down(tp);
11416 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11418 struct mii_ioctl_data *data = if_mii(ifr);
11419 struct tg3 *tp = netdev_priv(dev);
11420 int err;
11422 if (tg3_flag(tp, USE_PHYLIB)) {
11423 struct phy_device *phydev;
11424 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11425 return -EAGAIN;
11426 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11427 return phy_mii_ioctl(phydev, ifr, cmd);
11430 switch (cmd) {
11431 case SIOCGMIIPHY:
11432 data->phy_id = tp->phy_addr;
11434 /* fallthru */
11435 case SIOCGMIIREG: {
11436 u32 mii_regval;
11438 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11439 break; /* We have no PHY */
11441 if (!netif_running(dev))
11442 return -EAGAIN;
11444 spin_lock_bh(&tp->lock);
11445 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
11446 spin_unlock_bh(&tp->lock);
11448 data->val_out = mii_regval;
11450 return err;
11453 case SIOCSMIIREG:
11454 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11455 break; /* We have no PHY */
11457 if (!netif_running(dev))
11458 return -EAGAIN;
11460 spin_lock_bh(&tp->lock);
11461 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
11462 spin_unlock_bh(&tp->lock);
11464 return err;
11466 default:
11467 /* do nothing */
11468 break;
11470 return -EOPNOTSUPP;
11473 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11475 struct tg3 *tp = netdev_priv(dev);
11477 memcpy(ec, &tp->coal, sizeof(*ec));
11478 return 0;
11481 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11483 struct tg3 *tp = netdev_priv(dev);
11484 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11485 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11487 if (!tg3_flag(tp, 5705_PLUS)) {
11488 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11489 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11490 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
11491 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
11494 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11495 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11496 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11497 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11498 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11499 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11500 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11501 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11502 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11503 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11504 return -EINVAL;
11506 /* No rx interrupts will be generated if both are zero */
11507 if ((ec->rx_coalesce_usecs == 0) &&
11508 (ec->rx_max_coalesced_frames == 0))
11509 return -EINVAL;
11511 /* No tx interrupts will be generated if both are zero */
11512 if ((ec->tx_coalesce_usecs == 0) &&
11513 (ec->tx_max_coalesced_frames == 0))
11514 return -EINVAL;
11516 /* Only copy relevant parameters, ignore all others. */
11517 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11518 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
11519 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
11520 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
11521 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
11522 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
11523 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
11524 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
11525 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
11527 if (netif_running(dev)) {
11528 tg3_full_lock(tp, 0);
11529 __tg3_set_coalesce(tp, &tp->coal);
11530 tg3_full_unlock(tp);
11532 return 0;
11535 static const struct ethtool_ops tg3_ethtool_ops = {
11536 .get_settings = tg3_get_settings,
11537 .set_settings = tg3_set_settings,
11538 .get_drvinfo = tg3_get_drvinfo,
11539 .get_regs_len = tg3_get_regs_len,
11540 .get_regs = tg3_get_regs,
11541 .get_wol = tg3_get_wol,
11542 .set_wol = tg3_set_wol,
11543 .get_msglevel = tg3_get_msglevel,
11544 .set_msglevel = tg3_set_msglevel,
11545 .nway_reset = tg3_nway_reset,
11546 .get_link = ethtool_op_get_link,
11547 .get_eeprom_len = tg3_get_eeprom_len,
11548 .get_eeprom = tg3_get_eeprom,
11549 .set_eeprom = tg3_set_eeprom,
11550 .get_ringparam = tg3_get_ringparam,
11551 .set_ringparam = tg3_set_ringparam,
11552 .get_pauseparam = tg3_get_pauseparam,
11553 .set_pauseparam = tg3_set_pauseparam,
11554 .self_test = tg3_self_test,
11555 .get_strings = tg3_get_strings,
11556 .set_phys_id = tg3_set_phys_id,
11557 .get_ethtool_stats = tg3_get_ethtool_stats,
11558 .get_coalesce = tg3_get_coalesce,
11559 .set_coalesce = tg3_set_coalesce,
11560 .get_sset_count = tg3_get_sset_count,
11563 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
11565 u32 cursize, val, magic;
11567 tp->nvram_size = EEPROM_CHIP_SIZE;
11569 if (tg3_nvram_read(tp, 0, &magic) != 0)
11570 return;
11572 if ((magic != TG3_EEPROM_MAGIC) &&
11573 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
11574 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
11575 return;
11578 * Size the chip by reading offsets at increasing powers of two.
11579 * When we encounter our validation signature, we know the addressing
11580 * has wrapped around, and thus have our chip size.
11582 cursize = 0x10;
11584 while (cursize < tp->nvram_size) {
11585 if (tg3_nvram_read(tp, cursize, &val) != 0)
11586 return;
11588 if (val == magic)
11589 break;
11591 cursize <<= 1;
11594 tp->nvram_size = cursize;
11597 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
11599 u32 val;
11601 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
11602 return;
11604 /* Selfboot format */
11605 if (val != TG3_EEPROM_MAGIC) {
11606 tg3_get_eeprom_size(tp);
11607 return;
11610 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
11611 if (val != 0) {
11612 /* This is confusing. We want to operate on the
11613 * 16-bit value at offset 0xf2. The tg3_nvram_read()
11614 * call will read from NVRAM and byteswap the data
11615 * according to the byteswapping settings for all
11616 * other register accesses. This ensures the data we
11617 * want will always reside in the lower 16-bits.
11618 * However, the data in NVRAM is in LE format, which
11619 * means the data from the NVRAM read will always be
11620 * opposite the endianness of the CPU. The 16-bit
11621 * byteswap then brings the data to CPU endianness.
11623 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
11624 return;
11627 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11630 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
11632 u32 nvcfg1;
11634 nvcfg1 = tr32(NVRAM_CFG1);
11635 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
11636 tg3_flag_set(tp, FLASH);
11637 } else {
11638 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11639 tw32(NVRAM_CFG1, nvcfg1);
11642 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11643 tg3_flag(tp, 5780_CLASS)) {
11644 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
11645 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
11646 tp->nvram_jedecnum = JEDEC_ATMEL;
11647 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11648 tg3_flag_set(tp, NVRAM_BUFFERED);
11649 break;
11650 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
11651 tp->nvram_jedecnum = JEDEC_ATMEL;
11652 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
11653 break;
11654 case FLASH_VENDOR_ATMEL_EEPROM:
11655 tp->nvram_jedecnum = JEDEC_ATMEL;
11656 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11657 tg3_flag_set(tp, NVRAM_BUFFERED);
11658 break;
11659 case FLASH_VENDOR_ST:
11660 tp->nvram_jedecnum = JEDEC_ST;
11661 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
11662 tg3_flag_set(tp, NVRAM_BUFFERED);
11663 break;
11664 case FLASH_VENDOR_SAIFUN:
11665 tp->nvram_jedecnum = JEDEC_SAIFUN;
11666 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
11667 break;
11668 case FLASH_VENDOR_SST_SMALL:
11669 case FLASH_VENDOR_SST_LARGE:
11670 tp->nvram_jedecnum = JEDEC_SST;
11671 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
11672 break;
11674 } else {
11675 tp->nvram_jedecnum = JEDEC_ATMEL;
11676 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11677 tg3_flag_set(tp, NVRAM_BUFFERED);
11681 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
11683 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
11684 case FLASH_5752PAGE_SIZE_256:
11685 tp->nvram_pagesize = 256;
11686 break;
11687 case FLASH_5752PAGE_SIZE_512:
11688 tp->nvram_pagesize = 512;
11689 break;
11690 case FLASH_5752PAGE_SIZE_1K:
11691 tp->nvram_pagesize = 1024;
11692 break;
11693 case FLASH_5752PAGE_SIZE_2K:
11694 tp->nvram_pagesize = 2048;
11695 break;
11696 case FLASH_5752PAGE_SIZE_4K:
11697 tp->nvram_pagesize = 4096;
11698 break;
11699 case FLASH_5752PAGE_SIZE_264:
11700 tp->nvram_pagesize = 264;
11701 break;
11702 case FLASH_5752PAGE_SIZE_528:
11703 tp->nvram_pagesize = 528;
11704 break;
11708 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
11710 u32 nvcfg1;
11712 nvcfg1 = tr32(NVRAM_CFG1);
11714 /* NVRAM protection for TPM */
11715 if (nvcfg1 & (1 << 27))
11716 tg3_flag_set(tp, PROTECTED_NVRAM);
11718 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11719 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
11720 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
11721 tp->nvram_jedecnum = JEDEC_ATMEL;
11722 tg3_flag_set(tp, NVRAM_BUFFERED);
11723 break;
11724 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11725 tp->nvram_jedecnum = JEDEC_ATMEL;
11726 tg3_flag_set(tp, NVRAM_BUFFERED);
11727 tg3_flag_set(tp, FLASH);
11728 break;
11729 case FLASH_5752VENDOR_ST_M45PE10:
11730 case FLASH_5752VENDOR_ST_M45PE20:
11731 case FLASH_5752VENDOR_ST_M45PE40:
11732 tp->nvram_jedecnum = JEDEC_ST;
11733 tg3_flag_set(tp, NVRAM_BUFFERED);
11734 tg3_flag_set(tp, FLASH);
11735 break;
11738 if (tg3_flag(tp, FLASH)) {
11739 tg3_nvram_get_pagesize(tp, nvcfg1);
11740 } else {
11741 /* For eeprom, set pagesize to maximum eeprom size */
11742 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11744 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11745 tw32(NVRAM_CFG1, nvcfg1);
11749 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
11751 u32 nvcfg1, protect = 0;
11753 nvcfg1 = tr32(NVRAM_CFG1);
11755 /* NVRAM protection for TPM */
11756 if (nvcfg1 & (1 << 27)) {
11757 tg3_flag_set(tp, PROTECTED_NVRAM);
11758 protect = 1;
11761 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11762 switch (nvcfg1) {
11763 case FLASH_5755VENDOR_ATMEL_FLASH_1:
11764 case FLASH_5755VENDOR_ATMEL_FLASH_2:
11765 case FLASH_5755VENDOR_ATMEL_FLASH_3:
11766 case FLASH_5755VENDOR_ATMEL_FLASH_5:
11767 tp->nvram_jedecnum = JEDEC_ATMEL;
11768 tg3_flag_set(tp, NVRAM_BUFFERED);
11769 tg3_flag_set(tp, FLASH);
11770 tp->nvram_pagesize = 264;
11771 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
11772 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
11773 tp->nvram_size = (protect ? 0x3e200 :
11774 TG3_NVRAM_SIZE_512KB);
11775 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
11776 tp->nvram_size = (protect ? 0x1f200 :
11777 TG3_NVRAM_SIZE_256KB);
11778 else
11779 tp->nvram_size = (protect ? 0x1f200 :
11780 TG3_NVRAM_SIZE_128KB);
11781 break;
11782 case FLASH_5752VENDOR_ST_M45PE10:
11783 case FLASH_5752VENDOR_ST_M45PE20:
11784 case FLASH_5752VENDOR_ST_M45PE40:
11785 tp->nvram_jedecnum = JEDEC_ST;
11786 tg3_flag_set(tp, NVRAM_BUFFERED);
11787 tg3_flag_set(tp, FLASH);
11788 tp->nvram_pagesize = 256;
11789 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
11790 tp->nvram_size = (protect ?
11791 TG3_NVRAM_SIZE_64KB :
11792 TG3_NVRAM_SIZE_128KB);
11793 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
11794 tp->nvram_size = (protect ?
11795 TG3_NVRAM_SIZE_64KB :
11796 TG3_NVRAM_SIZE_256KB);
11797 else
11798 tp->nvram_size = (protect ?
11799 TG3_NVRAM_SIZE_128KB :
11800 TG3_NVRAM_SIZE_512KB);
11801 break;
11805 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
11807 u32 nvcfg1;
11809 nvcfg1 = tr32(NVRAM_CFG1);
11811 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11812 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
11813 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11814 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
11815 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11816 tp->nvram_jedecnum = JEDEC_ATMEL;
11817 tg3_flag_set(tp, NVRAM_BUFFERED);
11818 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11820 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11821 tw32(NVRAM_CFG1, nvcfg1);
11822 break;
11823 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11824 case FLASH_5755VENDOR_ATMEL_FLASH_1:
11825 case FLASH_5755VENDOR_ATMEL_FLASH_2:
11826 case FLASH_5755VENDOR_ATMEL_FLASH_3:
11827 tp->nvram_jedecnum = JEDEC_ATMEL;
11828 tg3_flag_set(tp, NVRAM_BUFFERED);
11829 tg3_flag_set(tp, FLASH);
11830 tp->nvram_pagesize = 264;
11831 break;
11832 case FLASH_5752VENDOR_ST_M45PE10:
11833 case FLASH_5752VENDOR_ST_M45PE20:
11834 case FLASH_5752VENDOR_ST_M45PE40:
11835 tp->nvram_jedecnum = JEDEC_ST;
11836 tg3_flag_set(tp, NVRAM_BUFFERED);
11837 tg3_flag_set(tp, FLASH);
11838 tp->nvram_pagesize = 256;
11839 break;
11843 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
11845 u32 nvcfg1, protect = 0;
11847 nvcfg1 = tr32(NVRAM_CFG1);
11849 /* NVRAM protection for TPM */
11850 if (nvcfg1 & (1 << 27)) {
11851 tg3_flag_set(tp, PROTECTED_NVRAM);
11852 protect = 1;
11855 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11856 switch (nvcfg1) {
11857 case FLASH_5761VENDOR_ATMEL_ADB021D:
11858 case FLASH_5761VENDOR_ATMEL_ADB041D:
11859 case FLASH_5761VENDOR_ATMEL_ADB081D:
11860 case FLASH_5761VENDOR_ATMEL_ADB161D:
11861 case FLASH_5761VENDOR_ATMEL_MDB021D:
11862 case FLASH_5761VENDOR_ATMEL_MDB041D:
11863 case FLASH_5761VENDOR_ATMEL_MDB081D:
11864 case FLASH_5761VENDOR_ATMEL_MDB161D:
11865 tp->nvram_jedecnum = JEDEC_ATMEL;
11866 tg3_flag_set(tp, NVRAM_BUFFERED);
11867 tg3_flag_set(tp, FLASH);
11868 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
11869 tp->nvram_pagesize = 256;
11870 break;
11871 case FLASH_5761VENDOR_ST_A_M45PE20:
11872 case FLASH_5761VENDOR_ST_A_M45PE40:
11873 case FLASH_5761VENDOR_ST_A_M45PE80:
11874 case FLASH_5761VENDOR_ST_A_M45PE16:
11875 case FLASH_5761VENDOR_ST_M_M45PE20:
11876 case FLASH_5761VENDOR_ST_M_M45PE40:
11877 case FLASH_5761VENDOR_ST_M_M45PE80:
11878 case FLASH_5761VENDOR_ST_M_M45PE16:
11879 tp->nvram_jedecnum = JEDEC_ST;
11880 tg3_flag_set(tp, NVRAM_BUFFERED);
11881 tg3_flag_set(tp, FLASH);
11882 tp->nvram_pagesize = 256;
11883 break;
11886 if (protect) {
11887 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
11888 } else {
11889 switch (nvcfg1) {
11890 case FLASH_5761VENDOR_ATMEL_ADB161D:
11891 case FLASH_5761VENDOR_ATMEL_MDB161D:
11892 case FLASH_5761VENDOR_ST_A_M45PE16:
11893 case FLASH_5761VENDOR_ST_M_M45PE16:
11894 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
11895 break;
11896 case FLASH_5761VENDOR_ATMEL_ADB081D:
11897 case FLASH_5761VENDOR_ATMEL_MDB081D:
11898 case FLASH_5761VENDOR_ST_A_M45PE80:
11899 case FLASH_5761VENDOR_ST_M_M45PE80:
11900 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
11901 break;
11902 case FLASH_5761VENDOR_ATMEL_ADB041D:
11903 case FLASH_5761VENDOR_ATMEL_MDB041D:
11904 case FLASH_5761VENDOR_ST_A_M45PE40:
11905 case FLASH_5761VENDOR_ST_M_M45PE40:
11906 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11907 break;
11908 case FLASH_5761VENDOR_ATMEL_ADB021D:
11909 case FLASH_5761VENDOR_ATMEL_MDB021D:
11910 case FLASH_5761VENDOR_ST_A_M45PE20:
11911 case FLASH_5761VENDOR_ST_M_M45PE20:
11912 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11913 break;
11918 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
11920 tp->nvram_jedecnum = JEDEC_ATMEL;
11921 tg3_flag_set(tp, NVRAM_BUFFERED);
11922 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11925 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
11927 u32 nvcfg1;
11929 nvcfg1 = tr32(NVRAM_CFG1);
11931 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11932 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11933 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11934 tp->nvram_jedecnum = JEDEC_ATMEL;
11935 tg3_flag_set(tp, NVRAM_BUFFERED);
11936 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11938 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11939 tw32(NVRAM_CFG1, nvcfg1);
11940 return;
11941 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11942 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
11943 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
11944 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
11945 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
11946 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
11947 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
11948 tp->nvram_jedecnum = JEDEC_ATMEL;
11949 tg3_flag_set(tp, NVRAM_BUFFERED);
11950 tg3_flag_set(tp, FLASH);
11952 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11953 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11954 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
11955 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
11956 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11957 break;
11958 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
11959 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
11960 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11961 break;
11962 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
11963 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
11964 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11965 break;
11967 break;
11968 case FLASH_5752VENDOR_ST_M45PE10:
11969 case FLASH_5752VENDOR_ST_M45PE20:
11970 case FLASH_5752VENDOR_ST_M45PE40:
11971 tp->nvram_jedecnum = JEDEC_ST;
11972 tg3_flag_set(tp, NVRAM_BUFFERED);
11973 tg3_flag_set(tp, FLASH);
11975 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11976 case FLASH_5752VENDOR_ST_M45PE10:
11977 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11978 break;
11979 case FLASH_5752VENDOR_ST_M45PE20:
11980 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11981 break;
11982 case FLASH_5752VENDOR_ST_M45PE40:
11983 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11984 break;
11986 break;
11987 default:
11988 tg3_flag_set(tp, NO_NVRAM);
11989 return;
11992 tg3_nvram_get_pagesize(tp, nvcfg1);
11993 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
11994 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
11998 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12000 u32 nvcfg1;
12002 nvcfg1 = tr32(NVRAM_CFG1);
12004 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12005 case FLASH_5717VENDOR_ATMEL_EEPROM:
12006 case FLASH_5717VENDOR_MICRO_EEPROM:
12007 tp->nvram_jedecnum = JEDEC_ATMEL;
12008 tg3_flag_set(tp, NVRAM_BUFFERED);
12009 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12011 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12012 tw32(NVRAM_CFG1, nvcfg1);
12013 return;
12014 case FLASH_5717VENDOR_ATMEL_MDB011D:
12015 case FLASH_5717VENDOR_ATMEL_ADB011B:
12016 case FLASH_5717VENDOR_ATMEL_ADB011D:
12017 case FLASH_5717VENDOR_ATMEL_MDB021D:
12018 case FLASH_5717VENDOR_ATMEL_ADB021B:
12019 case FLASH_5717VENDOR_ATMEL_ADB021D:
12020 case FLASH_5717VENDOR_ATMEL_45USPT:
12021 tp->nvram_jedecnum = JEDEC_ATMEL;
12022 tg3_flag_set(tp, NVRAM_BUFFERED);
12023 tg3_flag_set(tp, FLASH);
12025 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12026 case FLASH_5717VENDOR_ATMEL_MDB021D:
12027 /* Detect size with tg3_nvram_get_size() */
12028 break;
12029 case FLASH_5717VENDOR_ATMEL_ADB021B:
12030 case FLASH_5717VENDOR_ATMEL_ADB021D:
12031 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12032 break;
12033 default:
12034 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12035 break;
12037 break;
12038 case FLASH_5717VENDOR_ST_M_M25PE10:
12039 case FLASH_5717VENDOR_ST_A_M25PE10:
12040 case FLASH_5717VENDOR_ST_M_M45PE10:
12041 case FLASH_5717VENDOR_ST_A_M45PE10:
12042 case FLASH_5717VENDOR_ST_M_M25PE20:
12043 case FLASH_5717VENDOR_ST_A_M25PE20:
12044 case FLASH_5717VENDOR_ST_M_M45PE20:
12045 case FLASH_5717VENDOR_ST_A_M45PE20:
12046 case FLASH_5717VENDOR_ST_25USPT:
12047 case FLASH_5717VENDOR_ST_45USPT:
12048 tp->nvram_jedecnum = JEDEC_ST;
12049 tg3_flag_set(tp, NVRAM_BUFFERED);
12050 tg3_flag_set(tp, FLASH);
12052 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12053 case FLASH_5717VENDOR_ST_M_M25PE20:
12054 case FLASH_5717VENDOR_ST_M_M45PE20:
12055 /* Detect size with tg3_nvram_get_size() */
12056 break;
12057 case FLASH_5717VENDOR_ST_A_M25PE20:
12058 case FLASH_5717VENDOR_ST_A_M45PE20:
12059 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12060 break;
12061 default:
12062 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12063 break;
12065 break;
12066 default:
12067 tg3_flag_set(tp, NO_NVRAM);
12068 return;
12071 tg3_nvram_get_pagesize(tp, nvcfg1);
12072 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12073 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12076 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12078 u32 nvcfg1, nvmpinstrp;
12080 nvcfg1 = tr32(NVRAM_CFG1);
12081 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12083 switch (nvmpinstrp) {
12084 case FLASH_5720_EEPROM_HD:
12085 case FLASH_5720_EEPROM_LD:
12086 tp->nvram_jedecnum = JEDEC_ATMEL;
12087 tg3_flag_set(tp, NVRAM_BUFFERED);
12089 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12090 tw32(NVRAM_CFG1, nvcfg1);
12091 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12092 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12093 else
12094 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12095 return;
12096 case FLASH_5720VENDOR_M_ATMEL_DB011D:
12097 case FLASH_5720VENDOR_A_ATMEL_DB011B:
12098 case FLASH_5720VENDOR_A_ATMEL_DB011D:
12099 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12100 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12101 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12102 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12103 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12104 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12105 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12106 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12107 case FLASH_5720VENDOR_ATMEL_45USPT:
12108 tp->nvram_jedecnum = JEDEC_ATMEL;
12109 tg3_flag_set(tp, NVRAM_BUFFERED);
12110 tg3_flag_set(tp, FLASH);
12112 switch (nvmpinstrp) {
12113 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12114 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12115 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12116 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12117 break;
12118 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12119 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12120 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12121 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12122 break;
12123 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12124 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12125 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12126 break;
12127 default:
12128 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12129 break;
12131 break;
12132 case FLASH_5720VENDOR_M_ST_M25PE10:
12133 case FLASH_5720VENDOR_M_ST_M45PE10:
12134 case FLASH_5720VENDOR_A_ST_M25PE10:
12135 case FLASH_5720VENDOR_A_ST_M45PE10:
12136 case FLASH_5720VENDOR_M_ST_M25PE20:
12137 case FLASH_5720VENDOR_M_ST_M45PE20:
12138 case FLASH_5720VENDOR_A_ST_M25PE20:
12139 case FLASH_5720VENDOR_A_ST_M45PE20:
12140 case FLASH_5720VENDOR_M_ST_M25PE40:
12141 case FLASH_5720VENDOR_M_ST_M45PE40:
12142 case FLASH_5720VENDOR_A_ST_M25PE40:
12143 case FLASH_5720VENDOR_A_ST_M45PE40:
12144 case FLASH_5720VENDOR_M_ST_M25PE80:
12145 case FLASH_5720VENDOR_M_ST_M45PE80:
12146 case FLASH_5720VENDOR_A_ST_M25PE80:
12147 case FLASH_5720VENDOR_A_ST_M45PE80:
12148 case FLASH_5720VENDOR_ST_25USPT:
12149 case FLASH_5720VENDOR_ST_45USPT:
12150 tp->nvram_jedecnum = JEDEC_ST;
12151 tg3_flag_set(tp, NVRAM_BUFFERED);
12152 tg3_flag_set(tp, FLASH);
12154 switch (nvmpinstrp) {
12155 case FLASH_5720VENDOR_M_ST_M25PE20:
12156 case FLASH_5720VENDOR_M_ST_M45PE20:
12157 case FLASH_5720VENDOR_A_ST_M25PE20:
12158 case FLASH_5720VENDOR_A_ST_M45PE20:
12159 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12160 break;
12161 case FLASH_5720VENDOR_M_ST_M25PE40:
12162 case FLASH_5720VENDOR_M_ST_M45PE40:
12163 case FLASH_5720VENDOR_A_ST_M25PE40:
12164 case FLASH_5720VENDOR_A_ST_M45PE40:
12165 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12166 break;
12167 case FLASH_5720VENDOR_M_ST_M25PE80:
12168 case FLASH_5720VENDOR_M_ST_M45PE80:
12169 case FLASH_5720VENDOR_A_ST_M25PE80:
12170 case FLASH_5720VENDOR_A_ST_M45PE80:
12171 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12172 break;
12173 default:
12174 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12175 break;
12177 break;
12178 default:
12179 tg3_flag_set(tp, NO_NVRAM);
12180 return;
12183 tg3_nvram_get_pagesize(tp, nvcfg1);
12184 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12185 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12188 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12189 static void __devinit tg3_nvram_init(struct tg3 *tp)
12191 tw32_f(GRC_EEPROM_ADDR,
12192 (EEPROM_ADDR_FSM_RESET |
12193 (EEPROM_DEFAULT_CLOCK_PERIOD <<
12194 EEPROM_ADDR_CLKPERD_SHIFT)));
12196 msleep(1);
12198 /* Enable seeprom accesses. */
12199 tw32_f(GRC_LOCAL_CTRL,
12200 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12201 udelay(100);
12203 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12204 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12205 tg3_flag_set(tp, NVRAM);
12207 if (tg3_nvram_lock(tp)) {
12208 netdev_warn(tp->dev,
12209 "Cannot get nvram lock, %s failed\n",
12210 __func__);
12211 return;
12213 tg3_enable_nvram_access(tp);
12215 tp->nvram_size = 0;
12217 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12218 tg3_get_5752_nvram_info(tp);
12219 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12220 tg3_get_5755_nvram_info(tp);
12221 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12222 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12223 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12224 tg3_get_5787_nvram_info(tp);
12225 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12226 tg3_get_5761_nvram_info(tp);
12227 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12228 tg3_get_5906_nvram_info(tp);
12229 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12230 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
12231 tg3_get_57780_nvram_info(tp);
12232 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12233 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
12234 tg3_get_5717_nvram_info(tp);
12235 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12236 tg3_get_5720_nvram_info(tp);
12237 else
12238 tg3_get_nvram_info(tp);
12240 if (tp->nvram_size == 0)
12241 tg3_get_nvram_size(tp);
12243 tg3_disable_nvram_access(tp);
12244 tg3_nvram_unlock(tp);
12246 } else {
12247 tg3_flag_clear(tp, NVRAM);
12248 tg3_flag_clear(tp, NVRAM_BUFFERED);
12250 tg3_get_eeprom_size(tp);
12254 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
12255 u32 offset, u32 len, u8 *buf)
12257 int i, j, rc = 0;
12258 u32 val;
12260 for (i = 0; i < len; i += 4) {
12261 u32 addr;
12262 __be32 data;
12264 addr = offset + i;
12266 memcpy(&data, buf + i, 4);
12269 * The SEEPROM interface expects the data to always be opposite
12270 * the native endian format. We accomplish this by reversing
12271 * all the operations that would have been performed on the
12272 * data from a call to tg3_nvram_read_be32().
12274 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
12276 val = tr32(GRC_EEPROM_ADDR);
12277 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
12279 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
12280 EEPROM_ADDR_READ);
12281 tw32(GRC_EEPROM_ADDR, val |
12282 (0 << EEPROM_ADDR_DEVID_SHIFT) |
12283 (addr & EEPROM_ADDR_ADDR_MASK) |
12284 EEPROM_ADDR_START |
12285 EEPROM_ADDR_WRITE);
12287 for (j = 0; j < 1000; j++) {
12288 val = tr32(GRC_EEPROM_ADDR);
12290 if (val & EEPROM_ADDR_COMPLETE)
12291 break;
12292 msleep(1);
12294 if (!(val & EEPROM_ADDR_COMPLETE)) {
12295 rc = -EBUSY;
12296 break;
12300 return rc;
12303 /* offset and length are dword aligned */
12304 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
12305 u8 *buf)
12307 int ret = 0;
12308 u32 pagesize = tp->nvram_pagesize;
12309 u32 pagemask = pagesize - 1;
12310 u32 nvram_cmd;
12311 u8 *tmp;
12313 tmp = kmalloc(pagesize, GFP_KERNEL);
12314 if (tmp == NULL)
12315 return -ENOMEM;
12317 while (len) {
12318 int j;
12319 u32 phy_addr, page_off, size;
12321 phy_addr = offset & ~pagemask;
12323 for (j = 0; j < pagesize; j += 4) {
12324 ret = tg3_nvram_read_be32(tp, phy_addr + j,
12325 (__be32 *) (tmp + j));
12326 if (ret)
12327 break;
12329 if (ret)
12330 break;
12332 page_off = offset & pagemask;
12333 size = pagesize;
12334 if (len < size)
12335 size = len;
12337 len -= size;
12339 memcpy(tmp + page_off, buf, size);
12341 offset = offset + (pagesize - page_off);
12343 tg3_enable_nvram_access(tp);
12346 * Before we can erase the flash page, we need
12347 * to issue a special "write enable" command.
12349 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12351 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12352 break;
12354 /* Erase the target page */
12355 tw32(NVRAM_ADDR, phy_addr);
12357 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
12358 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
12360 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12361 break;
12363 /* Issue another write enable to start the write. */
12364 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12366 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12367 break;
12369 for (j = 0; j < pagesize; j += 4) {
12370 __be32 data;
12372 data = *((__be32 *) (tmp + j));
12374 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12376 tw32(NVRAM_ADDR, phy_addr + j);
12378 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
12379 NVRAM_CMD_WR;
12381 if (j == 0)
12382 nvram_cmd |= NVRAM_CMD_FIRST;
12383 else if (j == (pagesize - 4))
12384 nvram_cmd |= NVRAM_CMD_LAST;
12386 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12387 break;
12389 if (ret)
12390 break;
12393 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12394 tg3_nvram_exec_cmd(tp, nvram_cmd);
12396 kfree(tmp);
12398 return ret;
12401 /* offset and length are dword aligned */
12402 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
12403 u8 *buf)
12405 int i, ret = 0;
12407 for (i = 0; i < len; i += 4, offset += 4) {
12408 u32 page_off, phy_addr, nvram_cmd;
12409 __be32 data;
12411 memcpy(&data, buf + i, 4);
12412 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12414 page_off = offset % tp->nvram_pagesize;
12416 phy_addr = tg3_nvram_phys_addr(tp, offset);
12418 tw32(NVRAM_ADDR, phy_addr);
12420 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
12422 if (page_off == 0 || i == 0)
12423 nvram_cmd |= NVRAM_CMD_FIRST;
12424 if (page_off == (tp->nvram_pagesize - 4))
12425 nvram_cmd |= NVRAM_CMD_LAST;
12427 if (i == (len - 4))
12428 nvram_cmd |= NVRAM_CMD_LAST;
12430 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
12431 !tg3_flag(tp, 5755_PLUS) &&
12432 (tp->nvram_jedecnum == JEDEC_ST) &&
12433 (nvram_cmd & NVRAM_CMD_FIRST)) {
12435 if ((ret = tg3_nvram_exec_cmd(tp,
12436 NVRAM_CMD_WREN | NVRAM_CMD_GO |
12437 NVRAM_CMD_DONE)))
12439 break;
12441 if (!tg3_flag(tp, FLASH)) {
12442 /* We always do complete word writes to eeprom. */
12443 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
12446 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12447 break;
12449 return ret;
12452 /* offset and length are dword aligned */
12453 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12455 int ret;
12457 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12458 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
12459 ~GRC_LCLCTRL_GPIO_OUTPUT1);
12460 udelay(40);
12463 if (!tg3_flag(tp, NVRAM)) {
12464 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
12465 } else {
12466 u32 grc_mode;
12468 ret = tg3_nvram_lock(tp);
12469 if (ret)
12470 return ret;
12472 tg3_enable_nvram_access(tp);
12473 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
12474 tw32(NVRAM_WRITE1, 0x406);
12476 grc_mode = tr32(GRC_MODE);
12477 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
12479 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
12480 ret = tg3_nvram_write_block_buffered(tp, offset, len,
12481 buf);
12482 } else {
12483 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
12484 buf);
12487 grc_mode = tr32(GRC_MODE);
12488 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
12490 tg3_disable_nvram_access(tp);
12491 tg3_nvram_unlock(tp);
12494 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12495 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
12496 udelay(40);
12499 return ret;
12502 struct subsys_tbl_ent {
12503 u16 subsys_vendor, subsys_devid;
12504 u32 phy_id;
12507 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
12508 /* Broadcom boards. */
12509 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12510 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
12511 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12512 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
12513 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12514 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
12515 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12516 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
12517 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12518 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
12519 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12520 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
12521 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12522 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
12523 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12524 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
12525 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12526 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
12527 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12528 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
12529 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12530 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
12532 /* 3com boards. */
12533 { TG3PCI_SUBVENDOR_ID_3COM,
12534 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
12535 { TG3PCI_SUBVENDOR_ID_3COM,
12536 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
12537 { TG3PCI_SUBVENDOR_ID_3COM,
12538 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
12539 { TG3PCI_SUBVENDOR_ID_3COM,
12540 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
12541 { TG3PCI_SUBVENDOR_ID_3COM,
12542 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
12544 /* DELL boards. */
12545 { TG3PCI_SUBVENDOR_ID_DELL,
12546 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
12547 { TG3PCI_SUBVENDOR_ID_DELL,
12548 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
12549 { TG3PCI_SUBVENDOR_ID_DELL,
12550 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
12551 { TG3PCI_SUBVENDOR_ID_DELL,
12552 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
12554 /* Compaq boards. */
12555 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12556 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
12557 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12558 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
12559 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12560 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
12561 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12562 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
12563 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12564 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
12566 /* IBM boards. */
12567 { TG3PCI_SUBVENDOR_ID_IBM,
12568 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
12571 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
12573 int i;
12575 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
12576 if ((subsys_id_to_phy_id[i].subsys_vendor ==
12577 tp->pdev->subsystem_vendor) &&
12578 (subsys_id_to_phy_id[i].subsys_devid ==
12579 tp->pdev->subsystem_device))
12580 return &subsys_id_to_phy_id[i];
12582 return NULL;
12585 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12587 u32 val;
12588 u16 pmcsr;
12590 /* On some early chips the SRAM cannot be accessed in D3hot state,
12591 * so need make sure we're in D0.
12593 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
12594 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
12595 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
12596 msleep(1);
12598 /* Make sure register accesses (indirect or otherwise)
12599 * will function correctly.
12601 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12602 tp->misc_host_ctrl);
12604 /* The memory arbiter has to be enabled in order for SRAM accesses
12605 * to succeed. Normally on powerup the tg3 chip firmware will make
12606 * sure it is enabled, but other entities such as system netboot
12607 * code might disable it.
12609 val = tr32(MEMARB_MODE);
12610 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
12612 tp->phy_id = TG3_PHY_ID_INVALID;
12613 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12615 /* Assume an onboard device and WOL capable by default. */
12616 tg3_flag_set(tp, EEPROM_WRITE_PROT);
12617 tg3_flag_set(tp, WOL_CAP);
12619 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12620 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
12621 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12622 tg3_flag_set(tp, IS_NIC);
12624 val = tr32(VCPU_CFGSHDW);
12625 if (val & VCPU_CFGSHDW_ASPM_DBNC)
12626 tg3_flag_set(tp, ASPM_WORKAROUND);
12627 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
12628 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
12629 tg3_flag_set(tp, WOL_ENABLE);
12630 device_set_wakeup_enable(&tp->pdev->dev, true);
12632 goto done;
12635 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
12636 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
12637 u32 nic_cfg, led_cfg;
12638 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
12639 int eeprom_phy_serdes = 0;
12641 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
12642 tp->nic_sram_data_cfg = nic_cfg;
12644 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
12645 ver >>= NIC_SRAM_DATA_VER_SHIFT;
12646 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12647 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12648 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
12649 (ver > 0) && (ver < 0x100))
12650 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
12652 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12653 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
12655 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
12656 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
12657 eeprom_phy_serdes = 1;
12659 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
12660 if (nic_phy_id != 0) {
12661 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
12662 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
12664 eeprom_phy_id = (id1 >> 16) << 10;
12665 eeprom_phy_id |= (id2 & 0xfc00) << 16;
12666 eeprom_phy_id |= (id2 & 0x03ff) << 0;
12667 } else
12668 eeprom_phy_id = 0;
12670 tp->phy_id = eeprom_phy_id;
12671 if (eeprom_phy_serdes) {
12672 if (!tg3_flag(tp, 5705_PLUS))
12673 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12674 else
12675 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
12678 if (tg3_flag(tp, 5750_PLUS))
12679 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
12680 SHASTA_EXT_LED_MODE_MASK);
12681 else
12682 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
12684 switch (led_cfg) {
12685 default:
12686 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
12687 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12688 break;
12690 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
12691 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12692 break;
12694 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
12695 tp->led_ctrl = LED_CTRL_MODE_MAC;
12697 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
12698 * read on some older 5700/5701 bootcode.
12700 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12701 ASIC_REV_5700 ||
12702 GET_ASIC_REV(tp->pci_chip_rev_id) ==
12703 ASIC_REV_5701)
12704 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12706 break;
12708 case SHASTA_EXT_LED_SHARED:
12709 tp->led_ctrl = LED_CTRL_MODE_SHARED;
12710 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
12711 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
12712 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12713 LED_CTRL_MODE_PHY_2);
12714 break;
12716 case SHASTA_EXT_LED_MAC:
12717 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
12718 break;
12720 case SHASTA_EXT_LED_COMBO:
12721 tp->led_ctrl = LED_CTRL_MODE_COMBO;
12722 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
12723 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12724 LED_CTRL_MODE_PHY_2);
12725 break;
12729 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12730 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
12731 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
12732 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12734 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
12735 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12737 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
12738 tg3_flag_set(tp, EEPROM_WRITE_PROT);
12739 if ((tp->pdev->subsystem_vendor ==
12740 PCI_VENDOR_ID_ARIMA) &&
12741 (tp->pdev->subsystem_device == 0x205a ||
12742 tp->pdev->subsystem_device == 0x2063))
12743 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12744 } else {
12745 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12746 tg3_flag_set(tp, IS_NIC);
12749 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
12750 tg3_flag_set(tp, ENABLE_ASF);
12751 if (tg3_flag(tp, 5750_PLUS))
12752 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
12755 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
12756 tg3_flag(tp, 5750_PLUS))
12757 tg3_flag_set(tp, ENABLE_APE);
12759 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
12760 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
12761 tg3_flag_clear(tp, WOL_CAP);
12763 if (tg3_flag(tp, WOL_CAP) &&
12764 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
12765 tg3_flag_set(tp, WOL_ENABLE);
12766 device_set_wakeup_enable(&tp->pdev->dev, true);
12769 if (cfg2 & (1 << 17))
12770 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
12772 /* serdes signal pre-emphasis in register 0x590 set by */
12773 /* bootcode if bit 18 is set */
12774 if (cfg2 & (1 << 18))
12775 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
12777 if ((tg3_flag(tp, 57765_PLUS) ||
12778 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12779 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
12780 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
12781 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
12783 if (tg3_flag(tp, PCI_EXPRESS) &&
12784 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
12785 !tg3_flag(tp, 57765_PLUS)) {
12786 u32 cfg3;
12788 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
12789 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
12790 tg3_flag_set(tp, ASPM_WORKAROUND);
12793 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
12794 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
12795 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
12796 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
12797 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
12798 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
12800 done:
12801 if (tg3_flag(tp, WOL_CAP))
12802 device_set_wakeup_enable(&tp->pdev->dev,
12803 tg3_flag(tp, WOL_ENABLE));
12804 else
12805 device_set_wakeup_capable(&tp->pdev->dev, false);
12808 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
12810 int i;
12811 u32 val;
12813 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
12814 tw32(OTP_CTRL, cmd);
12816 /* Wait for up to 1 ms for command to execute. */
12817 for (i = 0; i < 100; i++) {
12818 val = tr32(OTP_STATUS);
12819 if (val & OTP_STATUS_CMD_DONE)
12820 break;
12821 udelay(10);
12824 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
12827 /* Read the gphy configuration from the OTP region of the chip. The gphy
12828 * configuration is a 32-bit value that straddles the alignment boundary.
12829 * We do two 32-bit reads and then shift and merge the results.
12831 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
12833 u32 bhalf_otp, thalf_otp;
12835 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
12837 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
12838 return 0;
12840 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
12842 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12843 return 0;
12845 thalf_otp = tr32(OTP_READ_DATA);
12847 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
12849 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12850 return 0;
12852 bhalf_otp = tr32(OTP_READ_DATA);
12854 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
12857 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
12859 u32 adv = ADVERTISED_Autoneg |
12860 ADVERTISED_Pause;
12862 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12863 adv |= ADVERTISED_1000baseT_Half |
12864 ADVERTISED_1000baseT_Full;
12866 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12867 adv |= ADVERTISED_100baseT_Half |
12868 ADVERTISED_100baseT_Full |
12869 ADVERTISED_10baseT_Half |
12870 ADVERTISED_10baseT_Full |
12871 ADVERTISED_TP;
12872 else
12873 adv |= ADVERTISED_FIBRE;
12875 tp->link_config.advertising = adv;
12876 tp->link_config.speed = SPEED_INVALID;
12877 tp->link_config.duplex = DUPLEX_INVALID;
12878 tp->link_config.autoneg = AUTONEG_ENABLE;
12879 tp->link_config.active_speed = SPEED_INVALID;
12880 tp->link_config.active_duplex = DUPLEX_INVALID;
12881 tp->link_config.orig_speed = SPEED_INVALID;
12882 tp->link_config.orig_duplex = DUPLEX_INVALID;
12883 tp->link_config.orig_autoneg = AUTONEG_INVALID;
12886 static int __devinit tg3_phy_probe(struct tg3 *tp)
12888 u32 hw_phy_id_1, hw_phy_id_2;
12889 u32 hw_phy_id, hw_phy_id_masked;
12890 int err;
12892 /* flow control autonegotiation is default behavior */
12893 tg3_flag_set(tp, PAUSE_AUTONEG);
12894 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
12896 if (tg3_flag(tp, USE_PHYLIB))
12897 return tg3_phy_init(tp);
12899 /* Reading the PHY ID register can conflict with ASF
12900 * firmware access to the PHY hardware.
12902 err = 0;
12903 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
12904 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
12905 } else {
12906 /* Now read the physical PHY_ID from the chip and verify
12907 * that it is sane. If it doesn't look good, we fall back
12908 * to either the hard-coded table based PHY_ID and failing
12909 * that the value found in the eeprom area.
12911 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
12912 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
12914 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
12915 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
12916 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
12918 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
12921 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
12922 tp->phy_id = hw_phy_id;
12923 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
12924 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12925 else
12926 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
12927 } else {
12928 if (tp->phy_id != TG3_PHY_ID_INVALID) {
12929 /* Do nothing, phy ID already set up in
12930 * tg3_get_eeprom_hw_cfg().
12932 } else {
12933 struct subsys_tbl_ent *p;
12935 /* No eeprom signature? Try the hardcoded
12936 * subsys device table.
12938 p = tg3_lookup_by_subsys(tp);
12939 if (!p)
12940 return -ENODEV;
12942 tp->phy_id = p->phy_id;
12943 if (!tp->phy_id ||
12944 tp->phy_id == TG3_PHY_ID_BCM8002)
12945 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12949 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
12950 ((tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
12951 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
12952 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12953 tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
12954 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
12956 tg3_phy_init_link_config(tp);
12958 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
12959 !tg3_flag(tp, ENABLE_APE) &&
12960 !tg3_flag(tp, ENABLE_ASF)) {
12961 u32 bmsr, mask;
12963 tg3_readphy(tp, MII_BMSR, &bmsr);
12964 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
12965 (bmsr & BMSR_LSTATUS))
12966 goto skip_phy_reset;
12968 err = tg3_phy_reset(tp);
12969 if (err)
12970 return err;
12972 tg3_phy_set_wirespeed(tp);
12974 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
12975 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
12976 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
12977 if (!tg3_copper_is_advertising_all(tp, mask)) {
12978 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
12979 tp->link_config.flowctrl);
12981 tg3_writephy(tp, MII_BMCR,
12982 BMCR_ANENABLE | BMCR_ANRESTART);
12986 skip_phy_reset:
12987 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
12988 err = tg3_init_5401phy_dsp(tp);
12989 if (err)
12990 return err;
12992 err = tg3_init_5401phy_dsp(tp);
12995 return err;
12998 static void __devinit tg3_read_vpd(struct tg3 *tp)
13000 u8 *vpd_data;
13001 unsigned int block_end, rosize, len;
13002 int j, i = 0;
13004 vpd_data = (u8 *)tg3_vpd_readblock(tp);
13005 if (!vpd_data)
13006 goto out_no_vpd;
13008 i = pci_vpd_find_tag(vpd_data, 0, TG3_NVM_VPD_LEN,
13009 PCI_VPD_LRDT_RO_DATA);
13010 if (i < 0)
13011 goto out_not_found;
13013 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13014 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13015 i += PCI_VPD_LRDT_TAG_SIZE;
13017 if (block_end > TG3_NVM_VPD_LEN)
13018 goto out_not_found;
13020 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13021 PCI_VPD_RO_KEYWORD_MFR_ID);
13022 if (j > 0) {
13023 len = pci_vpd_info_field_size(&vpd_data[j]);
13025 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13026 if (j + len > block_end || len != 4 ||
13027 memcmp(&vpd_data[j], "1028", 4))
13028 goto partno;
13030 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13031 PCI_VPD_RO_KEYWORD_VENDOR0);
13032 if (j < 0)
13033 goto partno;
13035 len = pci_vpd_info_field_size(&vpd_data[j]);
13037 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13038 if (j + len > block_end)
13039 goto partno;
13041 memcpy(tp->fw_ver, &vpd_data[j], len);
13042 strncat(tp->fw_ver, " bc ", TG3_NVM_VPD_LEN - len - 1);
13045 partno:
13046 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13047 PCI_VPD_RO_KEYWORD_PARTNO);
13048 if (i < 0)
13049 goto out_not_found;
13051 len = pci_vpd_info_field_size(&vpd_data[i]);
13053 i += PCI_VPD_INFO_FLD_HDR_SIZE;
13054 if (len > TG3_BPN_SIZE ||
13055 (len + i) > TG3_NVM_VPD_LEN)
13056 goto out_not_found;
13058 memcpy(tp->board_part_number, &vpd_data[i], len);
13060 out_not_found:
13061 kfree(vpd_data);
13062 if (tp->board_part_number[0])
13063 return;
13065 out_no_vpd:
13066 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13067 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13068 strcpy(tp->board_part_number, "BCM5717");
13069 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13070 strcpy(tp->board_part_number, "BCM5718");
13071 else
13072 goto nomatch;
13073 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13074 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13075 strcpy(tp->board_part_number, "BCM57780");
13076 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13077 strcpy(tp->board_part_number, "BCM57760");
13078 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13079 strcpy(tp->board_part_number, "BCM57790");
13080 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13081 strcpy(tp->board_part_number, "BCM57788");
13082 else
13083 goto nomatch;
13084 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13085 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13086 strcpy(tp->board_part_number, "BCM57761");
13087 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13088 strcpy(tp->board_part_number, "BCM57765");
13089 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13090 strcpy(tp->board_part_number, "BCM57781");
13091 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13092 strcpy(tp->board_part_number, "BCM57785");
13093 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13094 strcpy(tp->board_part_number, "BCM57791");
13095 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13096 strcpy(tp->board_part_number, "BCM57795");
13097 else
13098 goto nomatch;
13099 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13100 strcpy(tp->board_part_number, "BCM95906");
13101 } else {
13102 nomatch:
13103 strcpy(tp->board_part_number, "none");
13107 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13109 u32 val;
13111 if (tg3_nvram_read(tp, offset, &val) ||
13112 (val & 0xfc000000) != 0x0c000000 ||
13113 tg3_nvram_read(tp, offset + 4, &val) ||
13114 val != 0)
13115 return 0;
13117 return 1;
13120 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13122 u32 val, offset, start, ver_offset;
13123 int i, dst_off;
13124 bool newver = false;
13126 if (tg3_nvram_read(tp, 0xc, &offset) ||
13127 tg3_nvram_read(tp, 0x4, &start))
13128 return;
13130 offset = tg3_nvram_logical_addr(tp, offset);
13132 if (tg3_nvram_read(tp, offset, &val))
13133 return;
13135 if ((val & 0xfc000000) == 0x0c000000) {
13136 if (tg3_nvram_read(tp, offset + 4, &val))
13137 return;
13139 if (val == 0)
13140 newver = true;
13143 dst_off = strlen(tp->fw_ver);
13145 if (newver) {
13146 if (TG3_VER_SIZE - dst_off < 16 ||
13147 tg3_nvram_read(tp, offset + 8, &ver_offset))
13148 return;
13150 offset = offset + ver_offset - start;
13151 for (i = 0; i < 16; i += 4) {
13152 __be32 v;
13153 if (tg3_nvram_read_be32(tp, offset + i, &v))
13154 return;
13156 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13158 } else {
13159 u32 major, minor;
13161 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13162 return;
13164 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13165 TG3_NVM_BCVER_MAJSFT;
13166 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13167 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13168 "v%d.%02d", major, minor);
13172 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13174 u32 val, major, minor;
13176 /* Use native endian representation */
13177 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13178 return;
13180 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13181 TG3_NVM_HWSB_CFG1_MAJSFT;
13182 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13183 TG3_NVM_HWSB_CFG1_MINSFT;
13185 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13188 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13190 u32 offset, major, minor, build;
13192 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13194 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13195 return;
13197 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13198 case TG3_EEPROM_SB_REVISION_0:
13199 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13200 break;
13201 case TG3_EEPROM_SB_REVISION_2:
13202 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13203 break;
13204 case TG3_EEPROM_SB_REVISION_3:
13205 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13206 break;
13207 case TG3_EEPROM_SB_REVISION_4:
13208 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13209 break;
13210 case TG3_EEPROM_SB_REVISION_5:
13211 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13212 break;
13213 case TG3_EEPROM_SB_REVISION_6:
13214 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13215 break;
13216 default:
13217 return;
13220 if (tg3_nvram_read(tp, offset, &val))
13221 return;
13223 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13224 TG3_EEPROM_SB_EDH_BLD_SHFT;
13225 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13226 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13227 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
13229 if (minor > 99 || build > 26)
13230 return;
13232 offset = strlen(tp->fw_ver);
13233 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13234 " v%d.%02d", major, minor);
13236 if (build > 0) {
13237 offset = strlen(tp->fw_ver);
13238 if (offset < TG3_VER_SIZE - 1)
13239 tp->fw_ver[offset] = 'a' + build - 1;
13243 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13245 u32 val, offset, start;
13246 int i, vlen;
13248 for (offset = TG3_NVM_DIR_START;
13249 offset < TG3_NVM_DIR_END;
13250 offset += TG3_NVM_DIRENT_SIZE) {
13251 if (tg3_nvram_read(tp, offset, &val))
13252 return;
13254 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13255 break;
13258 if (offset == TG3_NVM_DIR_END)
13259 return;
13261 if (!tg3_flag(tp, 5705_PLUS))
13262 start = 0x08000000;
13263 else if (tg3_nvram_read(tp, offset - 4, &start))
13264 return;
13266 if (tg3_nvram_read(tp, offset + 4, &offset) ||
13267 !tg3_fw_img_is_valid(tp, offset) ||
13268 tg3_nvram_read(tp, offset + 8, &val))
13269 return;
13271 offset += val - start;
13273 vlen = strlen(tp->fw_ver);
13275 tp->fw_ver[vlen++] = ',';
13276 tp->fw_ver[vlen++] = ' ';
13278 for (i = 0; i < 4; i++) {
13279 __be32 v;
13280 if (tg3_nvram_read_be32(tp, offset, &v))
13281 return;
13283 offset += sizeof(v);
13285 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13286 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13287 break;
13290 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13291 vlen += sizeof(v);
13295 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13297 int vlen;
13298 u32 apedata;
13299 char *fwtype;
13301 if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13302 return;
13304 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13305 if (apedata != APE_SEG_SIG_MAGIC)
13306 return;
13308 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13309 if (!(apedata & APE_FW_STATUS_READY))
13310 return;
13312 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13314 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13315 tg3_flag_set(tp, APE_HAS_NCSI);
13316 fwtype = "NCSI";
13317 } else {
13318 fwtype = "DASH";
13321 vlen = strlen(tp->fw_ver);
13323 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13324 fwtype,
13325 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13326 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13327 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13328 (apedata & APE_FW_VERSION_BLDMSK));
13331 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13333 u32 val;
13334 bool vpd_vers = false;
13336 if (tp->fw_ver[0] != 0)
13337 vpd_vers = true;
13339 if (tg3_flag(tp, NO_NVRAM)) {
13340 strcat(tp->fw_ver, "sb");
13341 return;
13344 if (tg3_nvram_read(tp, 0, &val))
13345 return;
13347 if (val == TG3_EEPROM_MAGIC)
13348 tg3_read_bc_ver(tp);
13349 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13350 tg3_read_sb_ver(tp, val);
13351 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13352 tg3_read_hwsb_ver(tp);
13353 else
13354 return;
13356 if (!tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || vpd_vers)
13357 goto done;
13359 tg3_read_mgmtfw_ver(tp);
13361 done:
13362 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13365 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
13367 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13369 if (tg3_flag(tp, LRG_PROD_RING_CAP))
13370 return TG3_RX_RET_MAX_SIZE_5717;
13371 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13372 return TG3_RX_RET_MAX_SIZE_5700;
13373 else
13374 return TG3_RX_RET_MAX_SIZE_5705;
13377 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13378 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13379 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13380 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13381 { },
13384 static int __devinit tg3_get_invariants(struct tg3 *tp)
13386 u32 misc_ctrl_reg;
13387 u32 pci_state_reg, grc_misc_cfg;
13388 u32 val;
13389 u16 pci_cmd;
13390 int err;
13392 /* Force memory write invalidate off. If we leave it on,
13393 * then on 5700_BX chips we have to enable a workaround.
13394 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13395 * to match the cacheline size. The Broadcom driver have this
13396 * workaround but turns MWI off all the times so never uses
13397 * it. This seems to suggest that the workaround is insufficient.
13399 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13400 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13401 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13403 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
13404 * has the register indirect write enable bit set before
13405 * we try to access any of the MMIO registers. It is also
13406 * critical that the PCI-X hw workaround situation is decided
13407 * before that as well.
13409 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13410 &misc_ctrl_reg);
13412 tp->pci_chip_rev_id = (misc_ctrl_reg >>
13413 MISC_HOST_CTRL_CHIPREV_SHIFT);
13414 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13415 u32 prod_id_asic_rev;
13417 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13418 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13419 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13420 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13421 pci_read_config_dword(tp->pdev,
13422 TG3PCI_GEN2_PRODID_ASICREV,
13423 &prod_id_asic_rev);
13424 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13425 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13426 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13427 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13428 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13429 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13430 pci_read_config_dword(tp->pdev,
13431 TG3PCI_GEN15_PRODID_ASICREV,
13432 &prod_id_asic_rev);
13433 else
13434 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
13435 &prod_id_asic_rev);
13437 tp->pci_chip_rev_id = prod_id_asic_rev;
13440 /* Wrong chip ID in 5752 A0. This code can be removed later
13441 * as A0 is not in production.
13443 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13444 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13446 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13447 * we need to disable memory and use config. cycles
13448 * only to access all registers. The 5702/03 chips
13449 * can mistakenly decode the special cycles from the
13450 * ICH chipsets as memory write cycles, causing corruption
13451 * of register and memory space. Only certain ICH bridges
13452 * will drive special cycles with non-zero data during the
13453 * address phase which can fall within the 5703's address
13454 * range. This is not an ICH bug as the PCI spec allows
13455 * non-zero address during special cycles. However, only
13456 * these ICH bridges are known to drive non-zero addresses
13457 * during special cycles.
13459 * Since special cycles do not cross PCI bridges, we only
13460 * enable this workaround if the 5703 is on the secondary
13461 * bus of these ICH bridges.
13463 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
13464 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
13465 static struct tg3_dev_id {
13466 u32 vendor;
13467 u32 device;
13468 u32 rev;
13469 } ich_chipsets[] = {
13470 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
13471 PCI_ANY_ID },
13472 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
13473 PCI_ANY_ID },
13474 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
13475 0xa },
13476 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
13477 PCI_ANY_ID },
13478 { },
13480 struct tg3_dev_id *pci_id = &ich_chipsets[0];
13481 struct pci_dev *bridge = NULL;
13483 while (pci_id->vendor != 0) {
13484 bridge = pci_get_device(pci_id->vendor, pci_id->device,
13485 bridge);
13486 if (!bridge) {
13487 pci_id++;
13488 continue;
13490 if (pci_id->rev != PCI_ANY_ID) {
13491 if (bridge->revision > pci_id->rev)
13492 continue;
13494 if (bridge->subordinate &&
13495 (bridge->subordinate->number ==
13496 tp->pdev->bus->number)) {
13497 tg3_flag_set(tp, ICH_WORKAROUND);
13498 pci_dev_put(bridge);
13499 break;
13504 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13505 static struct tg3_dev_id {
13506 u32 vendor;
13507 u32 device;
13508 } bridge_chipsets[] = {
13509 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
13510 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
13511 { },
13513 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
13514 struct pci_dev *bridge = NULL;
13516 while (pci_id->vendor != 0) {
13517 bridge = pci_get_device(pci_id->vendor,
13518 pci_id->device,
13519 bridge);
13520 if (!bridge) {
13521 pci_id++;
13522 continue;
13524 if (bridge->subordinate &&
13525 (bridge->subordinate->number <=
13526 tp->pdev->bus->number) &&
13527 (bridge->subordinate->subordinate >=
13528 tp->pdev->bus->number)) {
13529 tg3_flag_set(tp, 5701_DMA_BUG);
13530 pci_dev_put(bridge);
13531 break;
13536 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
13537 * DMA addresses > 40-bit. This bridge may have other additional
13538 * 57xx devices behind it in some 4-port NIC designs for example.
13539 * Any tg3 device found behind the bridge will also need the 40-bit
13540 * DMA workaround.
13542 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
13543 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13544 tg3_flag_set(tp, 5780_CLASS);
13545 tg3_flag_set(tp, 40BIT_DMA_BUG);
13546 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
13547 } else {
13548 struct pci_dev *bridge = NULL;
13550 do {
13551 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
13552 PCI_DEVICE_ID_SERVERWORKS_EPB,
13553 bridge);
13554 if (bridge && bridge->subordinate &&
13555 (bridge->subordinate->number <=
13556 tp->pdev->bus->number) &&
13557 (bridge->subordinate->subordinate >=
13558 tp->pdev->bus->number)) {
13559 tg3_flag_set(tp, 40BIT_DMA_BUG);
13560 pci_dev_put(bridge);
13561 break;
13563 } while (bridge);
13566 /* Initialize misc host control in PCI block. */
13567 tp->misc_host_ctrl |= (misc_ctrl_reg &
13568 MISC_HOST_CTRL_CHIPREV);
13569 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13570 tp->misc_host_ctrl);
13572 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
13573 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
13574 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13575 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13576 tp->pdev_peer = tg3_find_peer(tp);
13578 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13579 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13580 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13581 tg3_flag_set(tp, 5717_PLUS);
13583 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
13584 tg3_flag(tp, 5717_PLUS))
13585 tg3_flag_set(tp, 57765_PLUS);
13587 /* Intentionally exclude ASIC_REV_5906 */
13588 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13589 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13590 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13591 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13592 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13593 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13594 tg3_flag(tp, 57765_PLUS))
13595 tg3_flag_set(tp, 5755_PLUS);
13597 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13598 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13599 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13600 tg3_flag(tp, 5755_PLUS) ||
13601 tg3_flag(tp, 5780_CLASS))
13602 tg3_flag_set(tp, 5750_PLUS);
13604 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
13605 tg3_flag(tp, 5750_PLUS))
13606 tg3_flag_set(tp, 5705_PLUS);
13608 /* Determine TSO capabilities */
13609 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13610 ; /* Do nothing. HW bug. */
13611 else if (tg3_flag(tp, 57765_PLUS))
13612 tg3_flag_set(tp, HW_TSO_3);
13613 else if (tg3_flag(tp, 5755_PLUS) ||
13614 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13615 tg3_flag_set(tp, HW_TSO_2);
13616 else if (tg3_flag(tp, 5750_PLUS)) {
13617 tg3_flag_set(tp, HW_TSO_1);
13618 tg3_flag_set(tp, TSO_BUG);
13619 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
13620 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
13621 tg3_flag_clear(tp, TSO_BUG);
13622 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13623 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13624 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
13625 tg3_flag_set(tp, TSO_BUG);
13626 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13627 tp->fw_needed = FIRMWARE_TG3TSO5;
13628 else
13629 tp->fw_needed = FIRMWARE_TG3TSO;
13632 /* Selectively allow TSO based on operating conditions */
13633 if (tg3_flag(tp, HW_TSO_1) ||
13634 tg3_flag(tp, HW_TSO_2) ||
13635 tg3_flag(tp, HW_TSO_3) ||
13636 (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF)))
13637 tg3_flag_set(tp, TSO_CAPABLE);
13638 else {
13639 tg3_flag_clear(tp, TSO_CAPABLE);
13640 tg3_flag_clear(tp, TSO_BUG);
13641 tp->fw_needed = NULL;
13644 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
13645 tp->fw_needed = FIRMWARE_TG3;
13647 tp->irq_max = 1;
13649 if (tg3_flag(tp, 5750_PLUS)) {
13650 tg3_flag_set(tp, SUPPORT_MSI);
13651 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
13652 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
13653 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
13654 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
13655 tp->pdev_peer == tp->pdev))
13656 tg3_flag_clear(tp, SUPPORT_MSI);
13658 if (tg3_flag(tp, 5755_PLUS) ||
13659 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13660 tg3_flag_set(tp, 1SHOT_MSI);
13663 if (tg3_flag(tp, 57765_PLUS)) {
13664 tg3_flag_set(tp, SUPPORT_MSIX);
13665 tp->irq_max = TG3_IRQ_MAX_VECS;
13669 /* All chips can get confused if TX buffers
13670 * straddle the 4GB address boundary.
13672 tg3_flag_set(tp, 4G_DMA_BNDRY_BUG);
13674 if (tg3_flag(tp, 5755_PLUS))
13675 tg3_flag_set(tp, SHORT_DMA_BUG);
13676 else
13677 tg3_flag_set(tp, 40BIT_DMA_LIMIT_BUG);
13679 if (tg3_flag(tp, 5717_PLUS))
13680 tg3_flag_set(tp, LRG_PROD_RING_CAP);
13682 if (tg3_flag(tp, 57765_PLUS) &&
13683 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
13684 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
13686 if (!tg3_flag(tp, 5705_PLUS) ||
13687 tg3_flag(tp, 5780_CLASS) ||
13688 tg3_flag(tp, USE_JUMBO_BDFLAG))
13689 tg3_flag_set(tp, JUMBO_CAPABLE);
13691 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
13692 &pci_state_reg);
13694 tp->pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
13695 if (tp->pcie_cap != 0) {
13696 u16 lnkctl;
13698 tg3_flag_set(tp, PCI_EXPRESS);
13700 tp->pcie_readrq = 4096;
13701 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13702 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13703 tp->pcie_readrq = 2048;
13705 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
13707 pci_read_config_word(tp->pdev,
13708 tp->pcie_cap + PCI_EXP_LNKCTL,
13709 &lnkctl);
13710 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
13711 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13712 ASIC_REV_5906) {
13713 tg3_flag_clear(tp, HW_TSO_2);
13714 tg3_flag_clear(tp, TSO_CAPABLE);
13716 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13717 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13718 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
13719 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
13720 tg3_flag_set(tp, CLKREQ_BUG);
13721 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
13722 tg3_flag_set(tp, L1PLLPD_EN);
13724 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
13725 tg3_flag_set(tp, PCI_EXPRESS);
13726 } else if (!tg3_flag(tp, 5705_PLUS) ||
13727 tg3_flag(tp, 5780_CLASS)) {
13728 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
13729 if (!tp->pcix_cap) {
13730 dev_err(&tp->pdev->dev,
13731 "Cannot find PCI-X capability, aborting\n");
13732 return -EIO;
13735 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
13736 tg3_flag_set(tp, PCIX_MODE);
13739 /* If we have an AMD 762 or VIA K8T800 chipset, write
13740 * reordering to the mailbox registers done by the host
13741 * controller can cause major troubles. We read back from
13742 * every mailbox register write to force the writes to be
13743 * posted to the chip in order.
13745 if (pci_dev_present(tg3_write_reorder_chipsets) &&
13746 !tg3_flag(tp, PCI_EXPRESS))
13747 tg3_flag_set(tp, MBOX_WRITE_REORDER);
13749 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
13750 &tp->pci_cacheline_sz);
13751 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13752 &tp->pci_lat_timer);
13753 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
13754 tp->pci_lat_timer < 64) {
13755 tp->pci_lat_timer = 64;
13756 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13757 tp->pci_lat_timer);
13760 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
13761 /* 5700 BX chips need to have their TX producer index
13762 * mailboxes written twice to workaround a bug.
13764 tg3_flag_set(tp, TXD_MBOX_HWBUG);
13766 /* If we are in PCI-X mode, enable register write workaround.
13768 * The workaround is to use indirect register accesses
13769 * for all chip writes not to mailbox registers.
13771 if (tg3_flag(tp, PCIX_MODE)) {
13772 u32 pm_reg;
13774 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
13776 /* The chip can have it's power management PCI config
13777 * space registers clobbered due to this bug.
13778 * So explicitly force the chip into D0 here.
13780 pci_read_config_dword(tp->pdev,
13781 tp->pm_cap + PCI_PM_CTRL,
13782 &pm_reg);
13783 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
13784 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
13785 pci_write_config_dword(tp->pdev,
13786 tp->pm_cap + PCI_PM_CTRL,
13787 pm_reg);
13789 /* Also, force SERR#/PERR# in PCI command. */
13790 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13791 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
13792 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13796 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
13797 tg3_flag_set(tp, PCI_HIGH_SPEED);
13798 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
13799 tg3_flag_set(tp, PCI_32BIT);
13801 /* Chip-specific fixup from Broadcom driver */
13802 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
13803 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
13804 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
13805 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
13808 /* Default fast path register access methods */
13809 tp->read32 = tg3_read32;
13810 tp->write32 = tg3_write32;
13811 tp->read32_mbox = tg3_read32;
13812 tp->write32_mbox = tg3_write32;
13813 tp->write32_tx_mbox = tg3_write32;
13814 tp->write32_rx_mbox = tg3_write32;
13816 /* Various workaround register access methods */
13817 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
13818 tp->write32 = tg3_write_indirect_reg32;
13819 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13820 (tg3_flag(tp, PCI_EXPRESS) &&
13821 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
13823 * Back to back register writes can cause problems on these
13824 * chips, the workaround is to read back all reg writes
13825 * except those to mailbox regs.
13827 * See tg3_write_indirect_reg32().
13829 tp->write32 = tg3_write_flush_reg32;
13832 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
13833 tp->write32_tx_mbox = tg3_write32_tx_mbox;
13834 if (tg3_flag(tp, MBOX_WRITE_REORDER))
13835 tp->write32_rx_mbox = tg3_write_flush_reg32;
13838 if (tg3_flag(tp, ICH_WORKAROUND)) {
13839 tp->read32 = tg3_read_indirect_reg32;
13840 tp->write32 = tg3_write_indirect_reg32;
13841 tp->read32_mbox = tg3_read_indirect_mbox;
13842 tp->write32_mbox = tg3_write_indirect_mbox;
13843 tp->write32_tx_mbox = tg3_write_indirect_mbox;
13844 tp->write32_rx_mbox = tg3_write_indirect_mbox;
13846 iounmap(tp->regs);
13847 tp->regs = NULL;
13849 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13850 pci_cmd &= ~PCI_COMMAND_MEMORY;
13851 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13853 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13854 tp->read32_mbox = tg3_read32_mbox_5906;
13855 tp->write32_mbox = tg3_write32_mbox_5906;
13856 tp->write32_tx_mbox = tg3_write32_mbox_5906;
13857 tp->write32_rx_mbox = tg3_write32_mbox_5906;
13860 if (tp->write32 == tg3_write_indirect_reg32 ||
13861 (tg3_flag(tp, PCIX_MODE) &&
13862 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13863 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
13864 tg3_flag_set(tp, SRAM_USE_CONFIG);
13866 /* Get eeprom hw config before calling tg3_set_power_state().
13867 * In particular, the TG3_FLAG_IS_NIC flag must be
13868 * determined before calling tg3_set_power_state() so that
13869 * we know whether or not to switch out of Vaux power.
13870 * When the flag is set, it means that GPIO1 is used for eeprom
13871 * write protect and also implies that it is a LOM where GPIOs
13872 * are not used to switch power.
13874 tg3_get_eeprom_hw_cfg(tp);
13876 if (tg3_flag(tp, ENABLE_APE)) {
13877 /* Allow reads and writes to the
13878 * APE register and memory space.
13880 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
13881 PCISTATE_ALLOW_APE_SHMEM_WR |
13882 PCISTATE_ALLOW_APE_PSPACE_WR;
13883 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
13884 pci_state_reg);
13887 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13888 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13889 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13890 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13891 tg3_flag(tp, 57765_PLUS))
13892 tg3_flag_set(tp, CPMU_PRESENT);
13894 /* Set up tp->grc_local_ctrl before calling tg3_power_up().
13895 * GPIO1 driven high will bring 5700's external PHY out of reset.
13896 * It is also used as eeprom write protect on LOMs.
13898 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
13899 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13900 tg3_flag(tp, EEPROM_WRITE_PROT))
13901 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
13902 GRC_LCLCTRL_GPIO_OUTPUT1);
13903 /* Unused GPIO3 must be driven as output on 5752 because there
13904 * are no pull-up resistors on unused GPIO pins.
13906 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13907 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
13909 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13910 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13911 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
13912 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
13914 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
13915 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
13916 /* Turn off the debug UART. */
13917 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
13918 if (tg3_flag(tp, IS_NIC))
13919 /* Keep VMain power. */
13920 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
13921 GRC_LCLCTRL_GPIO_OUTPUT0;
13924 /* Force the chip into D0. */
13925 err = tg3_power_up(tp);
13926 if (err) {
13927 dev_err(&tp->pdev->dev, "Transition to D0 failed\n");
13928 return err;
13931 /* Derive initial jumbo mode from MTU assigned in
13932 * ether_setup() via the alloc_etherdev() call
13934 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
13935 tg3_flag_set(tp, JUMBO_RING_ENABLE);
13937 /* Determine WakeOnLan speed to use. */
13938 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13939 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
13940 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
13941 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
13942 tg3_flag_clear(tp, WOL_SPEED_100MB);
13943 } else {
13944 tg3_flag_set(tp, WOL_SPEED_100MB);
13947 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13948 tp->phy_flags |= TG3_PHYFLG_IS_FET;
13950 /* A few boards don't want Ethernet@WireSpeed phy feature */
13951 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13952 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
13953 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
13954 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
13955 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
13956 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13957 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
13959 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
13960 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
13961 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
13962 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
13963 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
13965 if (tg3_flag(tp, 5705_PLUS) &&
13966 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
13967 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13968 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
13969 !tg3_flag(tp, 57765_PLUS)) {
13970 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13971 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13972 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13973 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
13974 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
13975 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
13976 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
13977 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
13978 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
13979 } else
13980 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
13983 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13984 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
13985 tp->phy_otp = tg3_read_otp_phycfg(tp);
13986 if (tp->phy_otp == 0)
13987 tp->phy_otp = TG3_OTP_DEFAULT;
13990 if (tg3_flag(tp, CPMU_PRESENT))
13991 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
13992 else
13993 tp->mi_mode = MAC_MI_MODE_BASE;
13995 tp->coalesce_mode = 0;
13996 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
13997 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
13998 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14000 /* Set these bits to enable statistics workaround. */
14001 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14002 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14003 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14004 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14005 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14008 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14009 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14010 tg3_flag_set(tp, USE_PHYLIB);
14012 err = tg3_mdio_init(tp);
14013 if (err)
14014 return err;
14016 /* Initialize data/descriptor byte/word swapping. */
14017 val = tr32(GRC_MODE);
14018 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14019 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14020 GRC_MODE_WORD_SWAP_B2HRX_DATA |
14021 GRC_MODE_B2HRX_ENABLE |
14022 GRC_MODE_HTX2B_ENABLE |
14023 GRC_MODE_HOST_STACKUP);
14024 else
14025 val &= GRC_MODE_HOST_STACKUP;
14027 tw32(GRC_MODE, val | tp->grc_mode);
14029 tg3_switch_clocks(tp);
14031 /* Clear this out for sanity. */
14032 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14034 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14035 &pci_state_reg);
14036 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14037 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14038 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14040 if (chiprevid == CHIPREV_ID_5701_A0 ||
14041 chiprevid == CHIPREV_ID_5701_B0 ||
14042 chiprevid == CHIPREV_ID_5701_B2 ||
14043 chiprevid == CHIPREV_ID_5701_B5) {
14044 void __iomem *sram_base;
14046 /* Write some dummy words into the SRAM status block
14047 * area, see if it reads back correctly. If the return
14048 * value is bad, force enable the PCIX workaround.
14050 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14052 writel(0x00000000, sram_base);
14053 writel(0x00000000, sram_base + 4);
14054 writel(0xffffffff, sram_base + 4);
14055 if (readl(sram_base) != 0x00000000)
14056 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14060 udelay(50);
14061 tg3_nvram_init(tp);
14063 grc_misc_cfg = tr32(GRC_MISC_CFG);
14064 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14066 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14067 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14068 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14069 tg3_flag_set(tp, IS_5788);
14071 if (!tg3_flag(tp, IS_5788) &&
14072 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14073 tg3_flag_set(tp, TAGGED_STATUS);
14074 if (tg3_flag(tp, TAGGED_STATUS)) {
14075 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14076 HOSTCC_MODE_CLRTICK_TXBD);
14078 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14079 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14080 tp->misc_host_ctrl);
14083 /* Preserve the APE MAC_MODE bits */
14084 if (tg3_flag(tp, ENABLE_APE))
14085 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14086 else
14087 tp->mac_mode = TG3_DEF_MAC_MODE;
14089 /* these are limited to 10/100 only */
14090 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14091 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14092 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14093 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14094 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14095 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14096 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14097 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14098 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14099 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14100 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14101 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14102 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14103 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14104 (tp->phy_flags & TG3_PHYFLG_IS_FET))
14105 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14107 err = tg3_phy_probe(tp);
14108 if (err) {
14109 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14110 /* ... but do not return immediately ... */
14111 tg3_mdio_fini(tp);
14114 tg3_read_vpd(tp);
14115 tg3_read_fw_ver(tp);
14117 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14118 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14119 } else {
14120 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14121 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14122 else
14123 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14126 /* 5700 {AX,BX} chips have a broken status block link
14127 * change bit implementation, so we must use the
14128 * status register in those cases.
14130 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14131 tg3_flag_set(tp, USE_LINKCHG_REG);
14132 else
14133 tg3_flag_clear(tp, USE_LINKCHG_REG);
14135 /* The led_ctrl is set during tg3_phy_probe, here we might
14136 * have to force the link status polling mechanism based
14137 * upon subsystem IDs.
14139 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14140 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14141 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14142 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14143 tg3_flag_set(tp, USE_LINKCHG_REG);
14146 /* For all SERDES we poll the MAC status register. */
14147 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14148 tg3_flag_set(tp, POLL_SERDES);
14149 else
14150 tg3_flag_clear(tp, POLL_SERDES);
14152 tp->rx_offset = NET_IP_ALIGN;
14153 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14154 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14155 tg3_flag(tp, PCIX_MODE)) {
14156 tp->rx_offset = 0;
14157 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14158 tp->rx_copy_thresh = ~(u16)0;
14159 #endif
14162 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14163 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14164 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14166 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14168 /* Increment the rx prod index on the rx std ring by at most
14169 * 8 for these chips to workaround hw errata.
14171 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14172 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14173 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14174 tp->rx_std_max_post = 8;
14176 if (tg3_flag(tp, ASPM_WORKAROUND))
14177 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14178 PCIE_PWR_MGMT_L1_THRESH_MSK;
14180 return err;
14183 #ifdef CONFIG_SPARC
14184 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14186 struct net_device *dev = tp->dev;
14187 struct pci_dev *pdev = tp->pdev;
14188 struct device_node *dp = pci_device_to_OF_node(pdev);
14189 const unsigned char *addr;
14190 int len;
14192 addr = of_get_property(dp, "local-mac-address", &len);
14193 if (addr && len == 6) {
14194 memcpy(dev->dev_addr, addr, 6);
14195 memcpy(dev->perm_addr, dev->dev_addr, 6);
14196 return 0;
14198 return -ENODEV;
14201 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14203 struct net_device *dev = tp->dev;
14205 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14206 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14207 return 0;
14209 #endif
14211 static int __devinit tg3_get_device_address(struct tg3 *tp)
14213 struct net_device *dev = tp->dev;
14214 u32 hi, lo, mac_offset;
14215 int addr_ok = 0;
14217 #ifdef CONFIG_SPARC
14218 if (!tg3_get_macaddr_sparc(tp))
14219 return 0;
14220 #endif
14222 mac_offset = 0x7c;
14223 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14224 tg3_flag(tp, 5780_CLASS)) {
14225 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14226 mac_offset = 0xcc;
14227 if (tg3_nvram_lock(tp))
14228 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14229 else
14230 tg3_nvram_unlock(tp);
14231 } else if (tg3_flag(tp, 5717_PLUS)) {
14232 if (PCI_FUNC(tp->pdev->devfn) & 1)
14233 mac_offset = 0xcc;
14234 if (PCI_FUNC(tp->pdev->devfn) > 1)
14235 mac_offset += 0x18c;
14236 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14237 mac_offset = 0x10;
14239 /* First try to get it from MAC address mailbox. */
14240 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14241 if ((hi >> 16) == 0x484b) {
14242 dev->dev_addr[0] = (hi >> 8) & 0xff;
14243 dev->dev_addr[1] = (hi >> 0) & 0xff;
14245 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14246 dev->dev_addr[2] = (lo >> 24) & 0xff;
14247 dev->dev_addr[3] = (lo >> 16) & 0xff;
14248 dev->dev_addr[4] = (lo >> 8) & 0xff;
14249 dev->dev_addr[5] = (lo >> 0) & 0xff;
14251 /* Some old bootcode may report a 0 MAC address in SRAM */
14252 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14254 if (!addr_ok) {
14255 /* Next, try NVRAM. */
14256 if (!tg3_flag(tp, NO_NVRAM) &&
14257 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14258 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14259 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14260 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14262 /* Finally just fetch it out of the MAC control regs. */
14263 else {
14264 hi = tr32(MAC_ADDR_0_HIGH);
14265 lo = tr32(MAC_ADDR_0_LOW);
14267 dev->dev_addr[5] = lo & 0xff;
14268 dev->dev_addr[4] = (lo >> 8) & 0xff;
14269 dev->dev_addr[3] = (lo >> 16) & 0xff;
14270 dev->dev_addr[2] = (lo >> 24) & 0xff;
14271 dev->dev_addr[1] = hi & 0xff;
14272 dev->dev_addr[0] = (hi >> 8) & 0xff;
14276 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14277 #ifdef CONFIG_SPARC
14278 if (!tg3_get_default_macaddr_sparc(tp))
14279 return 0;
14280 #endif
14281 return -EINVAL;
14283 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14284 return 0;
14287 #define BOUNDARY_SINGLE_CACHELINE 1
14288 #define BOUNDARY_MULTI_CACHELINE 2
14290 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14292 int cacheline_size;
14293 u8 byte;
14294 int goal;
14296 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14297 if (byte == 0)
14298 cacheline_size = 1024;
14299 else
14300 cacheline_size = (int) byte * 4;
14302 /* On 5703 and later chips, the boundary bits have no
14303 * effect.
14305 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14306 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14307 !tg3_flag(tp, PCI_EXPRESS))
14308 goto out;
14310 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14311 goal = BOUNDARY_MULTI_CACHELINE;
14312 #else
14313 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14314 goal = BOUNDARY_SINGLE_CACHELINE;
14315 #else
14316 goal = 0;
14317 #endif
14318 #endif
14320 if (tg3_flag(tp, 57765_PLUS)) {
14321 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14322 goto out;
14325 if (!goal)
14326 goto out;
14328 /* PCI controllers on most RISC systems tend to disconnect
14329 * when a device tries to burst across a cache-line boundary.
14330 * Therefore, letting tg3 do so just wastes PCI bandwidth.
14332 * Unfortunately, for PCI-E there are only limited
14333 * write-side controls for this, and thus for reads
14334 * we will still get the disconnects. We'll also waste
14335 * these PCI cycles for both read and write for chips
14336 * other than 5700 and 5701 which do not implement the
14337 * boundary bits.
14339 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14340 switch (cacheline_size) {
14341 case 16:
14342 case 32:
14343 case 64:
14344 case 128:
14345 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14346 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14347 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14348 } else {
14349 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14350 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14352 break;
14354 case 256:
14355 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14356 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14357 break;
14359 default:
14360 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14361 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14362 break;
14364 } else if (tg3_flag(tp, PCI_EXPRESS)) {
14365 switch (cacheline_size) {
14366 case 16:
14367 case 32:
14368 case 64:
14369 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14370 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14371 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14372 break;
14374 /* fallthrough */
14375 case 128:
14376 default:
14377 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14378 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14379 break;
14381 } else {
14382 switch (cacheline_size) {
14383 case 16:
14384 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14385 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14386 DMA_RWCTRL_WRITE_BNDRY_16);
14387 break;
14389 /* fallthrough */
14390 case 32:
14391 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14392 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14393 DMA_RWCTRL_WRITE_BNDRY_32);
14394 break;
14396 /* fallthrough */
14397 case 64:
14398 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14399 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14400 DMA_RWCTRL_WRITE_BNDRY_64);
14401 break;
14403 /* fallthrough */
14404 case 128:
14405 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14406 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14407 DMA_RWCTRL_WRITE_BNDRY_128);
14408 break;
14410 /* fallthrough */
14411 case 256:
14412 val |= (DMA_RWCTRL_READ_BNDRY_256 |
14413 DMA_RWCTRL_WRITE_BNDRY_256);
14414 break;
14415 case 512:
14416 val |= (DMA_RWCTRL_READ_BNDRY_512 |
14417 DMA_RWCTRL_WRITE_BNDRY_512);
14418 break;
14419 case 1024:
14420 default:
14421 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14422 DMA_RWCTRL_WRITE_BNDRY_1024);
14423 break;
14427 out:
14428 return val;
14431 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
14433 struct tg3_internal_buffer_desc test_desc;
14434 u32 sram_dma_descs;
14435 int i, ret;
14437 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
14439 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
14440 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
14441 tw32(RDMAC_STATUS, 0);
14442 tw32(WDMAC_STATUS, 0);
14444 tw32(BUFMGR_MODE, 0);
14445 tw32(FTQ_RESET, 0);
14447 test_desc.addr_hi = ((u64) buf_dma) >> 32;
14448 test_desc.addr_lo = buf_dma & 0xffffffff;
14449 test_desc.nic_mbuf = 0x00002100;
14450 test_desc.len = size;
14453 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14454 * the *second* time the tg3 driver was getting loaded after an
14455 * initial scan.
14457 * Broadcom tells me:
14458 * ...the DMA engine is connected to the GRC block and a DMA
14459 * reset may affect the GRC block in some unpredictable way...
14460 * The behavior of resets to individual blocks has not been tested.
14462 * Broadcom noted the GRC reset will also reset all sub-components.
14464 if (to_device) {
14465 test_desc.cqid_sqid = (13 << 8) | 2;
14467 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
14468 udelay(40);
14469 } else {
14470 test_desc.cqid_sqid = (16 << 8) | 7;
14472 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
14473 udelay(40);
14475 test_desc.flags = 0x00000005;
14477 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
14478 u32 val;
14480 val = *(((u32 *)&test_desc) + i);
14481 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
14482 sram_dma_descs + (i * sizeof(u32)));
14483 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
14485 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
14487 if (to_device)
14488 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
14489 else
14490 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
14492 ret = -ENODEV;
14493 for (i = 0; i < 40; i++) {
14494 u32 val;
14496 if (to_device)
14497 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
14498 else
14499 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
14500 if ((val & 0xffff) == sram_dma_descs) {
14501 ret = 0;
14502 break;
14505 udelay(100);
14508 return ret;
14511 #define TEST_BUFFER_SIZE 0x2000
14513 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
14514 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
14515 { },
14518 static int __devinit tg3_test_dma(struct tg3 *tp)
14520 dma_addr_t buf_dma;
14521 u32 *buf, saved_dma_rwctrl;
14522 int ret = 0;
14524 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
14525 &buf_dma, GFP_KERNEL);
14526 if (!buf) {
14527 ret = -ENOMEM;
14528 goto out_nofree;
14531 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
14532 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
14534 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
14536 if (tg3_flag(tp, 57765_PLUS))
14537 goto out;
14539 if (tg3_flag(tp, PCI_EXPRESS)) {
14540 /* DMA read watermark not used on PCIE */
14541 tp->dma_rwctrl |= 0x00180000;
14542 } else if (!tg3_flag(tp, PCIX_MODE)) {
14543 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14544 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
14545 tp->dma_rwctrl |= 0x003f0000;
14546 else
14547 tp->dma_rwctrl |= 0x003f000f;
14548 } else {
14549 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14550 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
14551 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
14552 u32 read_water = 0x7;
14554 /* If the 5704 is behind the EPB bridge, we can
14555 * do the less restrictive ONE_DMA workaround for
14556 * better performance.
14558 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
14559 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14560 tp->dma_rwctrl |= 0x8000;
14561 else if (ccval == 0x6 || ccval == 0x7)
14562 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
14564 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
14565 read_water = 4;
14566 /* Set bit 23 to enable PCIX hw bug fix */
14567 tp->dma_rwctrl |=
14568 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
14569 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
14570 (1 << 23);
14571 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
14572 /* 5780 always in PCIX mode */
14573 tp->dma_rwctrl |= 0x00144000;
14574 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
14575 /* 5714 always in PCIX mode */
14576 tp->dma_rwctrl |= 0x00148000;
14577 } else {
14578 tp->dma_rwctrl |= 0x001b000f;
14582 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14583 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14584 tp->dma_rwctrl &= 0xfffffff0;
14586 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14587 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14588 /* Remove this if it causes problems for some boards. */
14589 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
14591 /* On 5700/5701 chips, we need to set this bit.
14592 * Otherwise the chip will issue cacheline transactions
14593 * to streamable DMA memory with not all the byte
14594 * enables turned on. This is an error on several
14595 * RISC PCI controllers, in particular sparc64.
14597 * On 5703/5704 chips, this bit has been reassigned
14598 * a different meaning. In particular, it is used
14599 * on those chips to enable a PCI-X workaround.
14601 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
14604 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14606 #if 0
14607 /* Unneeded, already done by tg3_get_invariants. */
14608 tg3_switch_clocks(tp);
14609 #endif
14611 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14612 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
14613 goto out;
14615 /* It is best to perform DMA test with maximum write burst size
14616 * to expose the 5700/5701 write DMA bug.
14618 saved_dma_rwctrl = tp->dma_rwctrl;
14619 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14620 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14622 while (1) {
14623 u32 *p = buf, i;
14625 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
14626 p[i] = i;
14628 /* Send the buffer to the chip. */
14629 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
14630 if (ret) {
14631 dev_err(&tp->pdev->dev,
14632 "%s: Buffer write failed. err = %d\n",
14633 __func__, ret);
14634 break;
14637 #if 0
14638 /* validate data reached card RAM correctly. */
14639 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14640 u32 val;
14641 tg3_read_mem(tp, 0x2100 + (i*4), &val);
14642 if (le32_to_cpu(val) != p[i]) {
14643 dev_err(&tp->pdev->dev,
14644 "%s: Buffer corrupted on device! "
14645 "(%d != %d)\n", __func__, val, i);
14646 /* ret = -ENODEV here? */
14648 p[i] = 0;
14650 #endif
14651 /* Now read it back. */
14652 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
14653 if (ret) {
14654 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
14655 "err = %d\n", __func__, ret);
14656 break;
14659 /* Verify it. */
14660 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14661 if (p[i] == i)
14662 continue;
14664 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14665 DMA_RWCTRL_WRITE_BNDRY_16) {
14666 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14667 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14668 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14669 break;
14670 } else {
14671 dev_err(&tp->pdev->dev,
14672 "%s: Buffer corrupted on read back! "
14673 "(%d != %d)\n", __func__, p[i], i);
14674 ret = -ENODEV;
14675 goto out;
14679 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
14680 /* Success. */
14681 ret = 0;
14682 break;
14685 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14686 DMA_RWCTRL_WRITE_BNDRY_16) {
14687 /* DMA test passed without adjusting DMA boundary,
14688 * now look for chipsets that are known to expose the
14689 * DMA bug without failing the test.
14691 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
14692 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14693 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14694 } else {
14695 /* Safe to use the calculated DMA boundary. */
14696 tp->dma_rwctrl = saved_dma_rwctrl;
14699 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14702 out:
14703 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
14704 out_nofree:
14705 return ret;
14708 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
14710 if (tg3_flag(tp, 57765_PLUS)) {
14711 tp->bufmgr_config.mbuf_read_dma_low_water =
14712 DEFAULT_MB_RDMA_LOW_WATER_5705;
14713 tp->bufmgr_config.mbuf_mac_rx_low_water =
14714 DEFAULT_MB_MACRX_LOW_WATER_57765;
14715 tp->bufmgr_config.mbuf_high_water =
14716 DEFAULT_MB_HIGH_WATER_57765;
14718 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14719 DEFAULT_MB_RDMA_LOW_WATER_5705;
14720 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14721 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
14722 tp->bufmgr_config.mbuf_high_water_jumbo =
14723 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
14724 } else if (tg3_flag(tp, 5705_PLUS)) {
14725 tp->bufmgr_config.mbuf_read_dma_low_water =
14726 DEFAULT_MB_RDMA_LOW_WATER_5705;
14727 tp->bufmgr_config.mbuf_mac_rx_low_water =
14728 DEFAULT_MB_MACRX_LOW_WATER_5705;
14729 tp->bufmgr_config.mbuf_high_water =
14730 DEFAULT_MB_HIGH_WATER_5705;
14731 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14732 tp->bufmgr_config.mbuf_mac_rx_low_water =
14733 DEFAULT_MB_MACRX_LOW_WATER_5906;
14734 tp->bufmgr_config.mbuf_high_water =
14735 DEFAULT_MB_HIGH_WATER_5906;
14738 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14739 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
14740 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14741 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
14742 tp->bufmgr_config.mbuf_high_water_jumbo =
14743 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
14744 } else {
14745 tp->bufmgr_config.mbuf_read_dma_low_water =
14746 DEFAULT_MB_RDMA_LOW_WATER;
14747 tp->bufmgr_config.mbuf_mac_rx_low_water =
14748 DEFAULT_MB_MACRX_LOW_WATER;
14749 tp->bufmgr_config.mbuf_high_water =
14750 DEFAULT_MB_HIGH_WATER;
14752 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14753 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
14754 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14755 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
14756 tp->bufmgr_config.mbuf_high_water_jumbo =
14757 DEFAULT_MB_HIGH_WATER_JUMBO;
14760 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
14761 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
14764 static char * __devinit tg3_phy_string(struct tg3 *tp)
14766 switch (tp->phy_id & TG3_PHY_ID_MASK) {
14767 case TG3_PHY_ID_BCM5400: return "5400";
14768 case TG3_PHY_ID_BCM5401: return "5401";
14769 case TG3_PHY_ID_BCM5411: return "5411";
14770 case TG3_PHY_ID_BCM5701: return "5701";
14771 case TG3_PHY_ID_BCM5703: return "5703";
14772 case TG3_PHY_ID_BCM5704: return "5704";
14773 case TG3_PHY_ID_BCM5705: return "5705";
14774 case TG3_PHY_ID_BCM5750: return "5750";
14775 case TG3_PHY_ID_BCM5752: return "5752";
14776 case TG3_PHY_ID_BCM5714: return "5714";
14777 case TG3_PHY_ID_BCM5780: return "5780";
14778 case TG3_PHY_ID_BCM5755: return "5755";
14779 case TG3_PHY_ID_BCM5787: return "5787";
14780 case TG3_PHY_ID_BCM5784: return "5784";
14781 case TG3_PHY_ID_BCM5756: return "5722/5756";
14782 case TG3_PHY_ID_BCM5906: return "5906";
14783 case TG3_PHY_ID_BCM5761: return "5761";
14784 case TG3_PHY_ID_BCM5718C: return "5718C";
14785 case TG3_PHY_ID_BCM5718S: return "5718S";
14786 case TG3_PHY_ID_BCM57765: return "57765";
14787 case TG3_PHY_ID_BCM5719C: return "5719C";
14788 case TG3_PHY_ID_BCM5720C: return "5720C";
14789 case TG3_PHY_ID_BCM8002: return "8002/serdes";
14790 case 0: return "serdes";
14791 default: return "unknown";
14795 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
14797 if (tg3_flag(tp, PCI_EXPRESS)) {
14798 strcpy(str, "PCI Express");
14799 return str;
14800 } else if (tg3_flag(tp, PCIX_MODE)) {
14801 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
14803 strcpy(str, "PCIX:");
14805 if ((clock_ctrl == 7) ||
14806 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
14807 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
14808 strcat(str, "133MHz");
14809 else if (clock_ctrl == 0)
14810 strcat(str, "33MHz");
14811 else if (clock_ctrl == 2)
14812 strcat(str, "50MHz");
14813 else if (clock_ctrl == 4)
14814 strcat(str, "66MHz");
14815 else if (clock_ctrl == 6)
14816 strcat(str, "100MHz");
14817 } else {
14818 strcpy(str, "PCI:");
14819 if (tg3_flag(tp, PCI_HIGH_SPEED))
14820 strcat(str, "66MHz");
14821 else
14822 strcat(str, "33MHz");
14824 if (tg3_flag(tp, PCI_32BIT))
14825 strcat(str, ":32-bit");
14826 else
14827 strcat(str, ":64-bit");
14828 return str;
14831 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
14833 struct pci_dev *peer;
14834 unsigned int func, devnr = tp->pdev->devfn & ~7;
14836 for (func = 0; func < 8; func++) {
14837 peer = pci_get_slot(tp->pdev->bus, devnr | func);
14838 if (peer && peer != tp->pdev)
14839 break;
14840 pci_dev_put(peer);
14842 /* 5704 can be configured in single-port mode, set peer to
14843 * tp->pdev in that case.
14845 if (!peer) {
14846 peer = tp->pdev;
14847 return peer;
14851 * We don't need to keep the refcount elevated; there's no way
14852 * to remove one half of this device without removing the other
14854 pci_dev_put(peer);
14856 return peer;
14859 static void __devinit tg3_init_coal(struct tg3 *tp)
14861 struct ethtool_coalesce *ec = &tp->coal;
14863 memset(ec, 0, sizeof(*ec));
14864 ec->cmd = ETHTOOL_GCOALESCE;
14865 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
14866 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
14867 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
14868 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
14869 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
14870 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
14871 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
14872 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
14873 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
14875 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
14876 HOSTCC_MODE_CLRTICK_TXBD)) {
14877 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
14878 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
14879 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
14880 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
14883 if (tg3_flag(tp, 5705_PLUS)) {
14884 ec->rx_coalesce_usecs_irq = 0;
14885 ec->tx_coalesce_usecs_irq = 0;
14886 ec->stats_block_coalesce_usecs = 0;
14890 static const struct net_device_ops tg3_netdev_ops = {
14891 .ndo_open = tg3_open,
14892 .ndo_stop = tg3_close,
14893 .ndo_start_xmit = tg3_start_xmit,
14894 .ndo_get_stats64 = tg3_get_stats64,
14895 .ndo_validate_addr = eth_validate_addr,
14896 .ndo_set_multicast_list = tg3_set_rx_mode,
14897 .ndo_set_mac_address = tg3_set_mac_addr,
14898 .ndo_do_ioctl = tg3_ioctl,
14899 .ndo_tx_timeout = tg3_tx_timeout,
14900 .ndo_change_mtu = tg3_change_mtu,
14901 .ndo_fix_features = tg3_fix_features,
14902 .ndo_set_features = tg3_set_features,
14903 #ifdef CONFIG_NET_POLL_CONTROLLER
14904 .ndo_poll_controller = tg3_poll_controller,
14905 #endif
14908 static int __devinit tg3_init_one(struct pci_dev *pdev,
14909 const struct pci_device_id *ent)
14911 struct net_device *dev;
14912 struct tg3 *tp;
14913 int i, err, pm_cap;
14914 u32 sndmbx, rcvmbx, intmbx;
14915 char str[40];
14916 u64 dma_mask, persist_dma_mask;
14917 u32 features = 0;
14919 printk_once(KERN_INFO "%s\n", version);
14921 err = pci_enable_device(pdev);
14922 if (err) {
14923 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
14924 return err;
14927 err = pci_request_regions(pdev, DRV_MODULE_NAME);
14928 if (err) {
14929 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
14930 goto err_out_disable_pdev;
14933 pci_set_master(pdev);
14935 /* Find power-management capability. */
14936 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
14937 if (pm_cap == 0) {
14938 dev_err(&pdev->dev,
14939 "Cannot find Power Management capability, aborting\n");
14940 err = -EIO;
14941 goto err_out_free_res;
14944 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
14945 if (!dev) {
14946 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
14947 err = -ENOMEM;
14948 goto err_out_free_res;
14951 SET_NETDEV_DEV(dev, &pdev->dev);
14953 tp = netdev_priv(dev);
14954 tp->pdev = pdev;
14955 tp->dev = dev;
14956 tp->pm_cap = pm_cap;
14957 tp->rx_mode = TG3_DEF_RX_MODE;
14958 tp->tx_mode = TG3_DEF_TX_MODE;
14960 if (tg3_debug > 0)
14961 tp->msg_enable = tg3_debug;
14962 else
14963 tp->msg_enable = TG3_DEF_MSG_ENABLE;
14965 /* The word/byte swap controls here control register access byte
14966 * swapping. DMA data byte swapping is controlled in the GRC_MODE
14967 * setting below.
14969 tp->misc_host_ctrl =
14970 MISC_HOST_CTRL_MASK_PCI_INT |
14971 MISC_HOST_CTRL_WORD_SWAP |
14972 MISC_HOST_CTRL_INDIR_ACCESS |
14973 MISC_HOST_CTRL_PCISTATE_RW;
14975 /* The NONFRM (non-frame) byte/word swap controls take effect
14976 * on descriptor entries, anything which isn't packet data.
14978 * The StrongARM chips on the board (one for tx, one for rx)
14979 * are running in big-endian mode.
14981 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
14982 GRC_MODE_WSWAP_NONFRM_DATA);
14983 #ifdef __BIG_ENDIAN
14984 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
14985 #endif
14986 spin_lock_init(&tp->lock);
14987 spin_lock_init(&tp->indirect_lock);
14988 INIT_WORK(&tp->reset_task, tg3_reset_task);
14990 tp->regs = pci_ioremap_bar(pdev, BAR_0);
14991 if (!tp->regs) {
14992 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
14993 err = -ENOMEM;
14994 goto err_out_free_dev;
14997 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
14998 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15000 dev->ethtool_ops = &tg3_ethtool_ops;
15001 dev->watchdog_timeo = TG3_TX_TIMEOUT;
15002 dev->netdev_ops = &tg3_netdev_ops;
15003 dev->irq = pdev->irq;
15005 err = tg3_get_invariants(tp);
15006 if (err) {
15007 dev_err(&pdev->dev,
15008 "Problem fetching invariants of chip, aborting\n");
15009 goto err_out_iounmap;
15012 /* The EPB bridge inside 5714, 5715, and 5780 and any
15013 * device behind the EPB cannot support DMA addresses > 40-bit.
15014 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15015 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15016 * do DMA address check in tg3_start_xmit().
15018 if (tg3_flag(tp, IS_5788))
15019 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15020 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15021 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15022 #ifdef CONFIG_HIGHMEM
15023 dma_mask = DMA_BIT_MASK(64);
15024 #endif
15025 } else
15026 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15028 /* Configure DMA attributes. */
15029 if (dma_mask > DMA_BIT_MASK(32)) {
15030 err = pci_set_dma_mask(pdev, dma_mask);
15031 if (!err) {
15032 features |= NETIF_F_HIGHDMA;
15033 err = pci_set_consistent_dma_mask(pdev,
15034 persist_dma_mask);
15035 if (err < 0) {
15036 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15037 "DMA for consistent allocations\n");
15038 goto err_out_iounmap;
15042 if (err || dma_mask == DMA_BIT_MASK(32)) {
15043 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15044 if (err) {
15045 dev_err(&pdev->dev,
15046 "No usable DMA configuration, aborting\n");
15047 goto err_out_iounmap;
15051 tg3_init_bufmgr_config(tp);
15053 features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15055 /* 5700 B0 chips do not support checksumming correctly due
15056 * to hardware bugs.
15058 if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15059 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15061 if (tg3_flag(tp, 5755_PLUS))
15062 features |= NETIF_F_IPV6_CSUM;
15065 /* TSO is on by default on chips that support hardware TSO.
15066 * Firmware TSO on older chips gives lower performance, so it
15067 * is off by default, but can be enabled using ethtool.
15069 if ((tg3_flag(tp, HW_TSO_1) ||
15070 tg3_flag(tp, HW_TSO_2) ||
15071 tg3_flag(tp, HW_TSO_3)) &&
15072 (features & NETIF_F_IP_CSUM))
15073 features |= NETIF_F_TSO;
15074 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15075 if (features & NETIF_F_IPV6_CSUM)
15076 features |= NETIF_F_TSO6;
15077 if (tg3_flag(tp, HW_TSO_3) ||
15078 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15079 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15080 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15081 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15082 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15083 features |= NETIF_F_TSO_ECN;
15086 dev->features |= features;
15087 dev->vlan_features |= features;
15090 * Add loopback capability only for a subset of devices that support
15091 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15092 * loopback for the remaining devices.
15094 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15095 !tg3_flag(tp, CPMU_PRESENT))
15096 /* Add the loopback capability */
15097 features |= NETIF_F_LOOPBACK;
15099 dev->hw_features |= features;
15101 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15102 !tg3_flag(tp, TSO_CAPABLE) &&
15103 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15104 tg3_flag_set(tp, MAX_RXPEND_64);
15105 tp->rx_pending = 63;
15108 err = tg3_get_device_address(tp);
15109 if (err) {
15110 dev_err(&pdev->dev,
15111 "Could not obtain valid ethernet address, aborting\n");
15112 goto err_out_iounmap;
15115 if (tg3_flag(tp, ENABLE_APE)) {
15116 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15117 if (!tp->aperegs) {
15118 dev_err(&pdev->dev,
15119 "Cannot map APE registers, aborting\n");
15120 err = -ENOMEM;
15121 goto err_out_iounmap;
15124 tg3_ape_lock_init(tp);
15126 if (tg3_flag(tp, ENABLE_ASF))
15127 tg3_read_dash_ver(tp);
15131 * Reset chip in case UNDI or EFI driver did not shutdown
15132 * DMA self test will enable WDMAC and we'll see (spurious)
15133 * pending DMA on the PCI bus at that point.
15135 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15136 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15137 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15138 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15141 err = tg3_test_dma(tp);
15142 if (err) {
15143 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15144 goto err_out_apeunmap;
15147 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15148 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15149 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15150 for (i = 0; i < tp->irq_max; i++) {
15151 struct tg3_napi *tnapi = &tp->napi[i];
15153 tnapi->tp = tp;
15154 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15156 tnapi->int_mbox = intmbx;
15157 if (i < 4)
15158 intmbx += 0x8;
15159 else
15160 intmbx += 0x4;
15162 tnapi->consmbox = rcvmbx;
15163 tnapi->prodmbox = sndmbx;
15165 if (i)
15166 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15167 else
15168 tnapi->coal_now = HOSTCC_MODE_NOW;
15170 if (!tg3_flag(tp, SUPPORT_MSIX))
15171 break;
15174 * If we support MSIX, we'll be using RSS. If we're using
15175 * RSS, the first vector only handles link interrupts and the
15176 * remaining vectors handle rx and tx interrupts. Reuse the
15177 * mailbox values for the next iteration. The values we setup
15178 * above are still useful for the single vectored mode.
15180 if (!i)
15181 continue;
15183 rcvmbx += 0x8;
15185 if (sndmbx & 0x4)
15186 sndmbx -= 0x4;
15187 else
15188 sndmbx += 0xc;
15191 tg3_init_coal(tp);
15193 pci_set_drvdata(pdev, dev);
15195 err = register_netdev(dev);
15196 if (err) {
15197 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15198 goto err_out_apeunmap;
15201 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15202 tp->board_part_number,
15203 tp->pci_chip_rev_id,
15204 tg3_bus_string(tp, str),
15205 dev->dev_addr);
15207 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15208 struct phy_device *phydev;
15209 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15210 netdev_info(dev,
15211 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15212 phydev->drv->name, dev_name(&phydev->dev));
15213 } else {
15214 char *ethtype;
15216 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15217 ethtype = "10/100Base-TX";
15218 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15219 ethtype = "1000Base-SX";
15220 else
15221 ethtype = "10/100/1000Base-T";
15223 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15224 "(WireSpeed[%d], EEE[%d])\n",
15225 tg3_phy_string(tp), ethtype,
15226 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15227 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15230 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15231 (dev->features & NETIF_F_RXCSUM) != 0,
15232 tg3_flag(tp, USE_LINKCHG_REG) != 0,
15233 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15234 tg3_flag(tp, ENABLE_ASF) != 0,
15235 tg3_flag(tp, TSO_CAPABLE) != 0);
15236 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15237 tp->dma_rwctrl,
15238 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15239 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15241 pci_save_state(pdev);
15243 return 0;
15245 err_out_apeunmap:
15246 if (tp->aperegs) {
15247 iounmap(tp->aperegs);
15248 tp->aperegs = NULL;
15251 err_out_iounmap:
15252 if (tp->regs) {
15253 iounmap(tp->regs);
15254 tp->regs = NULL;
15257 err_out_free_dev:
15258 free_netdev(dev);
15260 err_out_free_res:
15261 pci_release_regions(pdev);
15263 err_out_disable_pdev:
15264 pci_disable_device(pdev);
15265 pci_set_drvdata(pdev, NULL);
15266 return err;
15269 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15271 struct net_device *dev = pci_get_drvdata(pdev);
15273 if (dev) {
15274 struct tg3 *tp = netdev_priv(dev);
15276 if (tp->fw)
15277 release_firmware(tp->fw);
15279 cancel_work_sync(&tp->reset_task);
15281 if (!tg3_flag(tp, USE_PHYLIB)) {
15282 tg3_phy_fini(tp);
15283 tg3_mdio_fini(tp);
15286 unregister_netdev(dev);
15287 if (tp->aperegs) {
15288 iounmap(tp->aperegs);
15289 tp->aperegs = NULL;
15291 if (tp->regs) {
15292 iounmap(tp->regs);
15293 tp->regs = NULL;
15295 free_netdev(dev);
15296 pci_release_regions(pdev);
15297 pci_disable_device(pdev);
15298 pci_set_drvdata(pdev, NULL);
15302 #ifdef CONFIG_PM_SLEEP
15303 static int tg3_suspend(struct device *device)
15305 struct pci_dev *pdev = to_pci_dev(device);
15306 struct net_device *dev = pci_get_drvdata(pdev);
15307 struct tg3 *tp = netdev_priv(dev);
15308 int err;
15310 if (!netif_running(dev))
15311 return 0;
15313 flush_work_sync(&tp->reset_task);
15314 tg3_phy_stop(tp);
15315 tg3_netif_stop(tp);
15317 del_timer_sync(&tp->timer);
15319 tg3_full_lock(tp, 1);
15320 tg3_disable_ints(tp);
15321 tg3_full_unlock(tp);
15323 netif_device_detach(dev);
15325 tg3_full_lock(tp, 0);
15326 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15327 tg3_flag_clear(tp, INIT_COMPLETE);
15328 tg3_full_unlock(tp);
15330 err = tg3_power_down_prepare(tp);
15331 if (err) {
15332 int err2;
15334 tg3_full_lock(tp, 0);
15336 tg3_flag_set(tp, INIT_COMPLETE);
15337 err2 = tg3_restart_hw(tp, 1);
15338 if (err2)
15339 goto out;
15341 tp->timer.expires = jiffies + tp->timer_offset;
15342 add_timer(&tp->timer);
15344 netif_device_attach(dev);
15345 tg3_netif_start(tp);
15347 out:
15348 tg3_full_unlock(tp);
15350 if (!err2)
15351 tg3_phy_start(tp);
15354 return err;
15357 static int tg3_resume(struct device *device)
15359 struct pci_dev *pdev = to_pci_dev(device);
15360 struct net_device *dev = pci_get_drvdata(pdev);
15361 struct tg3 *tp = netdev_priv(dev);
15362 int err;
15364 if (!netif_running(dev))
15365 return 0;
15367 netif_device_attach(dev);
15369 tg3_full_lock(tp, 0);
15371 tg3_flag_set(tp, INIT_COMPLETE);
15372 err = tg3_restart_hw(tp, 1);
15373 if (err)
15374 goto out;
15376 tp->timer.expires = jiffies + tp->timer_offset;
15377 add_timer(&tp->timer);
15379 tg3_netif_start(tp);
15381 out:
15382 tg3_full_unlock(tp);
15384 if (!err)
15385 tg3_phy_start(tp);
15387 return err;
15390 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15391 #define TG3_PM_OPS (&tg3_pm_ops)
15393 #else
15395 #define TG3_PM_OPS NULL
15397 #endif /* CONFIG_PM_SLEEP */
15400 * tg3_io_error_detected - called when PCI error is detected
15401 * @pdev: Pointer to PCI device
15402 * @state: The current pci connection state
15404 * This function is called after a PCI bus error affecting
15405 * this device has been detected.
15407 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15408 pci_channel_state_t state)
15410 struct net_device *netdev = pci_get_drvdata(pdev);
15411 struct tg3 *tp = netdev_priv(netdev);
15412 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15414 netdev_info(netdev, "PCI I/O error detected\n");
15416 rtnl_lock();
15418 if (!netif_running(netdev))
15419 goto done;
15421 tg3_phy_stop(tp);
15423 tg3_netif_stop(tp);
15425 del_timer_sync(&tp->timer);
15426 tg3_flag_clear(tp, RESTART_TIMER);
15428 /* Want to make sure that the reset task doesn't run */
15429 cancel_work_sync(&tp->reset_task);
15430 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
15431 tg3_flag_clear(tp, RESTART_TIMER);
15433 netif_device_detach(netdev);
15435 /* Clean up software state, even if MMIO is blocked */
15436 tg3_full_lock(tp, 0);
15437 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
15438 tg3_full_unlock(tp);
15440 done:
15441 if (state == pci_channel_io_perm_failure)
15442 err = PCI_ERS_RESULT_DISCONNECT;
15443 else
15444 pci_disable_device(pdev);
15446 rtnl_unlock();
15448 return err;
15452 * tg3_io_slot_reset - called after the pci bus has been reset.
15453 * @pdev: Pointer to PCI device
15455 * Restart the card from scratch, as if from a cold-boot.
15456 * At this point, the card has exprienced a hard reset,
15457 * followed by fixups by BIOS, and has its config space
15458 * set up identically to what it was at cold boot.
15460 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
15462 struct net_device *netdev = pci_get_drvdata(pdev);
15463 struct tg3 *tp = netdev_priv(netdev);
15464 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15465 int err;
15467 rtnl_lock();
15469 if (pci_enable_device(pdev)) {
15470 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
15471 goto done;
15474 pci_set_master(pdev);
15475 pci_restore_state(pdev);
15476 pci_save_state(pdev);
15478 if (!netif_running(netdev)) {
15479 rc = PCI_ERS_RESULT_RECOVERED;
15480 goto done;
15483 err = tg3_power_up(tp);
15484 if (err) {
15485 netdev_err(netdev, "Failed to restore register access.\n");
15486 goto done;
15489 rc = PCI_ERS_RESULT_RECOVERED;
15491 done:
15492 rtnl_unlock();
15494 return rc;
15498 * tg3_io_resume - called when traffic can start flowing again.
15499 * @pdev: Pointer to PCI device
15501 * This callback is called when the error recovery driver tells
15502 * us that its OK to resume normal operation.
15504 static void tg3_io_resume(struct pci_dev *pdev)
15506 struct net_device *netdev = pci_get_drvdata(pdev);
15507 struct tg3 *tp = netdev_priv(netdev);
15508 int err;
15510 rtnl_lock();
15512 if (!netif_running(netdev))
15513 goto done;
15515 tg3_full_lock(tp, 0);
15516 tg3_flag_set(tp, INIT_COMPLETE);
15517 err = tg3_restart_hw(tp, 1);
15518 tg3_full_unlock(tp);
15519 if (err) {
15520 netdev_err(netdev, "Cannot restart hardware after reset.\n");
15521 goto done;
15524 netif_device_attach(netdev);
15526 tp->timer.expires = jiffies + tp->timer_offset;
15527 add_timer(&tp->timer);
15529 tg3_netif_start(tp);
15531 tg3_phy_start(tp);
15533 done:
15534 rtnl_unlock();
15537 static struct pci_error_handlers tg3_err_handler = {
15538 .error_detected = tg3_io_error_detected,
15539 .slot_reset = tg3_io_slot_reset,
15540 .resume = tg3_io_resume
15543 static struct pci_driver tg3_driver = {
15544 .name = DRV_MODULE_NAME,
15545 .id_table = tg3_pci_tbl,
15546 .probe = tg3_init_one,
15547 .remove = __devexit_p(tg3_remove_one),
15548 .err_handler = &tg3_err_handler,
15549 .driver.pm = TG3_PM_OPS,
15552 static int __init tg3_init(void)
15554 return pci_register_driver(&tg3_driver);
15557 static void __exit tg3_cleanup(void)
15559 pci_unregister_driver(&tg3_driver);
15562 module_init(tg3_init);
15563 module_exit(tg3_cleanup);