Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux...
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / net / tg3.c
blob8b3f8468538794ff08c66b616a7bcdd848a127e2
1 /*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2007 Broadcom Corporation.
9 * Firmware is:
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/in.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/phy.h>
36 #include <linux/brcmphy.h>
37 #include <linux/if_vlan.h>
38 #include <linux/ip.h>
39 #include <linux/tcp.h>
40 #include <linux/workqueue.h>
41 #include <linux/prefetch.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/firmware.h>
45 #include <net/checksum.h>
46 #include <net/ip.h>
48 #include <asm/system.h>
49 #include <asm/io.h>
50 #include <asm/byteorder.h>
51 #include <asm/uaccess.h>
53 #ifdef CONFIG_SPARC
54 #include <asm/idprom.h>
55 #include <asm/prom.h>
56 #endif
58 #define BAR_0 0
59 #define BAR_2 2
61 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
62 #define TG3_VLAN_TAG_USED 1
63 #else
64 #define TG3_VLAN_TAG_USED 0
65 #endif
67 #include "tg3.h"
69 #define DRV_MODULE_NAME "tg3"
70 #define PFX DRV_MODULE_NAME ": "
71 #define DRV_MODULE_VERSION "3.97"
72 #define DRV_MODULE_RELDATE "December 10, 2008"
74 #define TG3_DEF_MAC_MODE 0
75 #define TG3_DEF_RX_MODE 0
76 #define TG3_DEF_TX_MODE 0
77 #define TG3_DEF_MSG_ENABLE \
78 (NETIF_MSG_DRV | \
79 NETIF_MSG_PROBE | \
80 NETIF_MSG_LINK | \
81 NETIF_MSG_TIMER | \
82 NETIF_MSG_IFDOWN | \
83 NETIF_MSG_IFUP | \
84 NETIF_MSG_RX_ERR | \
85 NETIF_MSG_TX_ERR)
87 /* length of time before we decide the hardware is borked,
88 * and dev->tx_timeout() should be called to fix the problem
90 #define TG3_TX_TIMEOUT (5 * HZ)
92 /* hardware minimum and maximum for a single frame's data payload */
93 #define TG3_MIN_MTU 60
94 #define TG3_MAX_MTU(tp) \
95 ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
97 /* These numbers seem to be hard coded in the NIC firmware somehow.
98 * You can't change the ring sizes, but you can change where you place
99 * them in the NIC onboard memory.
101 #define TG3_RX_RING_SIZE 512
102 #define TG3_DEF_RX_RING_PENDING 200
103 #define TG3_RX_JUMBO_RING_SIZE 256
104 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
106 /* Do not place this n-ring entries value into the tp struct itself,
107 * we really want to expose these constants to GCC so that modulo et
108 * al. operations are done with shifts and masks instead of with
109 * hw multiply/modulo instructions. Another solution would be to
110 * replace things like '% foo' with '& (foo - 1)'.
112 #define TG3_RX_RCB_RING_SIZE(tp) \
113 ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 512 : 1024)
115 #define TG3_TX_RING_SIZE 512
116 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
118 #define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
119 TG3_RX_RING_SIZE)
120 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
121 TG3_RX_JUMBO_RING_SIZE)
122 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
123 TG3_RX_RCB_RING_SIZE(tp))
124 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
125 TG3_TX_RING_SIZE)
126 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
128 #define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
129 #define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64)
131 /* minimum number of free TX descriptors required to wake up TX process */
132 #define TG3_TX_WAKEUP_THRESH(tp) ((tp)->tx_pending / 4)
134 #define TG3_RAW_IP_ALIGN 2
136 /* number of ETHTOOL_GSTATS u64's */
137 #define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
139 #define TG3_NUM_TEST 6
141 #define FIRMWARE_TG3 "tigon/tg3.bin"
142 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
143 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
145 static char version[] __devinitdata =
146 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
148 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
149 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
150 MODULE_LICENSE("GPL");
151 MODULE_VERSION(DRV_MODULE_VERSION);
152 MODULE_FIRMWARE(FIRMWARE_TG3);
153 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
154 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
157 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
158 module_param(tg3_debug, int, 0);
159 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
161 static struct pci_device_id tg3_pci_tbl[] = {
162 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
163 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
164 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
165 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
166 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
167 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
168 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
169 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
170 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
171 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
172 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
173 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
174 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
175 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
176 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
177 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
178 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
179 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
180 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
181 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
182 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
183 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
184 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
185 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
186 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
187 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
188 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
189 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
190 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
191 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
192 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
193 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
194 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
195 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
196 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
197 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
198 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
199 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
200 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
201 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
202 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
203 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
204 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
205 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
206 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
207 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
208 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
209 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
210 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
211 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
212 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
213 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
214 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
215 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
216 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
217 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
218 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
219 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
220 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
221 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
222 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5785)},
223 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
224 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
225 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
226 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57720)},
227 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
228 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
229 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
230 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
231 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
232 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
233 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
237 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
239 static const struct {
240 const char string[ETH_GSTRING_LEN];
241 } ethtool_stats_keys[TG3_NUM_STATS] = {
242 { "rx_octets" },
243 { "rx_fragments" },
244 { "rx_ucast_packets" },
245 { "rx_mcast_packets" },
246 { "rx_bcast_packets" },
247 { "rx_fcs_errors" },
248 { "rx_align_errors" },
249 { "rx_xon_pause_rcvd" },
250 { "rx_xoff_pause_rcvd" },
251 { "rx_mac_ctrl_rcvd" },
252 { "rx_xoff_entered" },
253 { "rx_frame_too_long_errors" },
254 { "rx_jabbers" },
255 { "rx_undersize_packets" },
256 { "rx_in_length_errors" },
257 { "rx_out_length_errors" },
258 { "rx_64_or_less_octet_packets" },
259 { "rx_65_to_127_octet_packets" },
260 { "rx_128_to_255_octet_packets" },
261 { "rx_256_to_511_octet_packets" },
262 { "rx_512_to_1023_octet_packets" },
263 { "rx_1024_to_1522_octet_packets" },
264 { "rx_1523_to_2047_octet_packets" },
265 { "rx_2048_to_4095_octet_packets" },
266 { "rx_4096_to_8191_octet_packets" },
267 { "rx_8192_to_9022_octet_packets" },
269 { "tx_octets" },
270 { "tx_collisions" },
272 { "tx_xon_sent" },
273 { "tx_xoff_sent" },
274 { "tx_flow_control" },
275 { "tx_mac_errors" },
276 { "tx_single_collisions" },
277 { "tx_mult_collisions" },
278 { "tx_deferred" },
279 { "tx_excessive_collisions" },
280 { "tx_late_collisions" },
281 { "tx_collide_2times" },
282 { "tx_collide_3times" },
283 { "tx_collide_4times" },
284 { "tx_collide_5times" },
285 { "tx_collide_6times" },
286 { "tx_collide_7times" },
287 { "tx_collide_8times" },
288 { "tx_collide_9times" },
289 { "tx_collide_10times" },
290 { "tx_collide_11times" },
291 { "tx_collide_12times" },
292 { "tx_collide_13times" },
293 { "tx_collide_14times" },
294 { "tx_collide_15times" },
295 { "tx_ucast_packets" },
296 { "tx_mcast_packets" },
297 { "tx_bcast_packets" },
298 { "tx_carrier_sense_errors" },
299 { "tx_discards" },
300 { "tx_errors" },
302 { "dma_writeq_full" },
303 { "dma_write_prioq_full" },
304 { "rxbds_empty" },
305 { "rx_discards" },
306 { "rx_errors" },
307 { "rx_threshold_hit" },
309 { "dma_readq_full" },
310 { "dma_read_prioq_full" },
311 { "tx_comp_queue_full" },
313 { "ring_set_send_prod_index" },
314 { "ring_status_update" },
315 { "nic_irqs" },
316 { "nic_avoided_irqs" },
317 { "nic_tx_threshold_hit" }
320 static const struct {
321 const char string[ETH_GSTRING_LEN];
322 } ethtool_test_keys[TG3_NUM_TEST] = {
323 { "nvram test (online) " },
324 { "link test (online) " },
325 { "register test (offline)" },
326 { "memory test (offline)" },
327 { "loopback test (offline)" },
328 { "interrupt test (offline)" },
331 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
333 writel(val, tp->regs + off);
336 static u32 tg3_read32(struct tg3 *tp, u32 off)
338 return (readl(tp->regs + off));
341 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
343 writel(val, tp->aperegs + off);
346 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
348 return (readl(tp->aperegs + off));
351 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
353 unsigned long flags;
355 spin_lock_irqsave(&tp->indirect_lock, flags);
356 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
357 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
358 spin_unlock_irqrestore(&tp->indirect_lock, flags);
361 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
363 writel(val, tp->regs + off);
364 readl(tp->regs + off);
367 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
369 unsigned long flags;
370 u32 val;
372 spin_lock_irqsave(&tp->indirect_lock, flags);
373 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
374 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
375 spin_unlock_irqrestore(&tp->indirect_lock, flags);
376 return val;
379 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
381 unsigned long flags;
383 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
384 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
385 TG3_64BIT_REG_LOW, val);
386 return;
388 if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
389 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
390 TG3_64BIT_REG_LOW, val);
391 return;
394 spin_lock_irqsave(&tp->indirect_lock, flags);
395 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
396 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
397 spin_unlock_irqrestore(&tp->indirect_lock, flags);
399 /* In indirect mode when disabling interrupts, we also need
400 * to clear the interrupt bit in the GRC local ctrl register.
402 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
403 (val == 0x1)) {
404 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
405 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
409 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
411 unsigned long flags;
412 u32 val;
414 spin_lock_irqsave(&tp->indirect_lock, flags);
415 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
416 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
417 spin_unlock_irqrestore(&tp->indirect_lock, flags);
418 return val;
421 /* usec_wait specifies the wait time in usec when writing to certain registers
422 * where it is unsafe to read back the register without some delay.
423 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
424 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
426 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
428 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
429 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
430 /* Non-posted methods */
431 tp->write32(tp, off, val);
432 else {
433 /* Posted method */
434 tg3_write32(tp, off, val);
435 if (usec_wait)
436 udelay(usec_wait);
437 tp->read32(tp, off);
439 /* Wait again after the read for the posted method to guarantee that
440 * the wait time is met.
442 if (usec_wait)
443 udelay(usec_wait);
446 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
448 tp->write32_mbox(tp, off, val);
449 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
450 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
451 tp->read32_mbox(tp, off);
454 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
456 void __iomem *mbox = tp->regs + off;
457 writel(val, mbox);
458 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
459 writel(val, mbox);
460 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
461 readl(mbox);
464 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
466 return (readl(tp->regs + off + GRCMBOX_BASE));
469 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
471 writel(val, tp->regs + off + GRCMBOX_BASE);
474 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
475 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
476 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
477 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
478 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
480 #define tw32(reg,val) tp->write32(tp, reg, val)
481 #define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0)
482 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
483 #define tr32(reg) tp->read32(tp, reg)
485 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
487 unsigned long flags;
489 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
490 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
491 return;
493 spin_lock_irqsave(&tp->indirect_lock, flags);
494 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
495 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
496 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
498 /* Always leave this as zero. */
499 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
500 } else {
501 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
502 tw32_f(TG3PCI_MEM_WIN_DATA, val);
504 /* Always leave this as zero. */
505 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
507 spin_unlock_irqrestore(&tp->indirect_lock, flags);
510 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
512 unsigned long flags;
514 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
515 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
516 *val = 0;
517 return;
520 spin_lock_irqsave(&tp->indirect_lock, flags);
521 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
522 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
523 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
525 /* Always leave this as zero. */
526 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
527 } else {
528 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
529 *val = tr32(TG3PCI_MEM_WIN_DATA);
531 /* Always leave this as zero. */
532 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
534 spin_unlock_irqrestore(&tp->indirect_lock, flags);
537 static void tg3_ape_lock_init(struct tg3 *tp)
539 int i;
541 /* Make sure the driver hasn't any stale locks. */
542 for (i = 0; i < 8; i++)
543 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
544 APE_LOCK_GRANT_DRIVER);
547 static int tg3_ape_lock(struct tg3 *tp, int locknum)
549 int i, off;
550 int ret = 0;
551 u32 status;
553 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
554 return 0;
556 switch (locknum) {
557 case TG3_APE_LOCK_GRC:
558 case TG3_APE_LOCK_MEM:
559 break;
560 default:
561 return -EINVAL;
564 off = 4 * locknum;
566 tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
568 /* Wait for up to 1 millisecond to acquire lock. */
569 for (i = 0; i < 100; i++) {
570 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
571 if (status == APE_LOCK_GRANT_DRIVER)
572 break;
573 udelay(10);
576 if (status != APE_LOCK_GRANT_DRIVER) {
577 /* Revoke the lock request. */
578 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
579 APE_LOCK_GRANT_DRIVER);
581 ret = -EBUSY;
584 return ret;
587 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
589 int off;
591 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
592 return;
594 switch (locknum) {
595 case TG3_APE_LOCK_GRC:
596 case TG3_APE_LOCK_MEM:
597 break;
598 default:
599 return;
602 off = 4 * locknum;
603 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
606 static void tg3_disable_ints(struct tg3 *tp)
608 tw32(TG3PCI_MISC_HOST_CTRL,
609 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
610 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
613 static inline void tg3_cond_int(struct tg3 *tp)
615 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
616 (tp->hw_status->status & SD_STATUS_UPDATED))
617 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
618 else
619 tw32(HOSTCC_MODE, tp->coalesce_mode |
620 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
623 static void tg3_enable_ints(struct tg3 *tp)
625 tp->irq_sync = 0;
626 wmb();
628 tw32(TG3PCI_MISC_HOST_CTRL,
629 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
630 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
631 (tp->last_tag << 24));
632 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
633 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
634 (tp->last_tag << 24));
635 tg3_cond_int(tp);
638 static inline unsigned int tg3_has_work(struct tg3 *tp)
640 struct tg3_hw_status *sblk = tp->hw_status;
641 unsigned int work_exists = 0;
643 /* check for phy events */
644 if (!(tp->tg3_flags &
645 (TG3_FLAG_USE_LINKCHG_REG |
646 TG3_FLAG_POLL_SERDES))) {
647 if (sblk->status & SD_STATUS_LINK_CHG)
648 work_exists = 1;
650 /* check for RX/TX work to do */
651 if (sblk->idx[0].tx_consumer != tp->tx_cons ||
652 sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
653 work_exists = 1;
655 return work_exists;
658 /* tg3_restart_ints
659 * similar to tg3_enable_ints, but it accurately determines whether there
660 * is new work pending and can return without flushing the PIO write
661 * which reenables interrupts
663 static void tg3_restart_ints(struct tg3 *tp)
665 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
666 tp->last_tag << 24);
667 mmiowb();
669 /* When doing tagged status, this work check is unnecessary.
670 * The last_tag we write above tells the chip which piece of
671 * work we've completed.
673 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
674 tg3_has_work(tp))
675 tw32(HOSTCC_MODE, tp->coalesce_mode |
676 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
679 static inline void tg3_netif_stop(struct tg3 *tp)
681 tp->dev->trans_start = jiffies; /* prevent tx timeout */
682 napi_disable(&tp->napi);
683 netif_tx_disable(tp->dev);
686 static inline void tg3_netif_start(struct tg3 *tp)
688 netif_wake_queue(tp->dev);
689 /* NOTE: unconditional netif_wake_queue is only appropriate
690 * so long as all callers are assured to have free tx slots
691 * (such as after tg3_init_hw)
693 napi_enable(&tp->napi);
694 tp->hw_status->status |= SD_STATUS_UPDATED;
695 tg3_enable_ints(tp);
698 static void tg3_switch_clocks(struct tg3 *tp)
700 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
701 u32 orig_clock_ctrl;
703 if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
704 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
705 return;
707 orig_clock_ctrl = clock_ctrl;
708 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
709 CLOCK_CTRL_CLKRUN_OENABLE |
710 0x1f);
711 tp->pci_clock_ctrl = clock_ctrl;
713 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
714 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
715 tw32_wait_f(TG3PCI_CLOCK_CTRL,
716 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
718 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
719 tw32_wait_f(TG3PCI_CLOCK_CTRL,
720 clock_ctrl |
721 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
722 40);
723 tw32_wait_f(TG3PCI_CLOCK_CTRL,
724 clock_ctrl | (CLOCK_CTRL_ALTCLK),
725 40);
727 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
730 #define PHY_BUSY_LOOPS 5000
732 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
734 u32 frame_val;
735 unsigned int loops;
736 int ret;
738 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
739 tw32_f(MAC_MI_MODE,
740 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
741 udelay(80);
744 *val = 0x0;
746 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
747 MI_COM_PHY_ADDR_MASK);
748 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
749 MI_COM_REG_ADDR_MASK);
750 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
752 tw32_f(MAC_MI_COM, frame_val);
754 loops = PHY_BUSY_LOOPS;
755 while (loops != 0) {
756 udelay(10);
757 frame_val = tr32(MAC_MI_COM);
759 if ((frame_val & MI_COM_BUSY) == 0) {
760 udelay(5);
761 frame_val = tr32(MAC_MI_COM);
762 break;
764 loops -= 1;
767 ret = -EBUSY;
768 if (loops != 0) {
769 *val = frame_val & MI_COM_DATA_MASK;
770 ret = 0;
773 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
774 tw32_f(MAC_MI_MODE, tp->mi_mode);
775 udelay(80);
778 return ret;
781 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
783 u32 frame_val;
784 unsigned int loops;
785 int ret;
787 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
788 (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
789 return 0;
791 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
792 tw32_f(MAC_MI_MODE,
793 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
794 udelay(80);
797 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
798 MI_COM_PHY_ADDR_MASK);
799 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
800 MI_COM_REG_ADDR_MASK);
801 frame_val |= (val & MI_COM_DATA_MASK);
802 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
804 tw32_f(MAC_MI_COM, frame_val);
806 loops = PHY_BUSY_LOOPS;
807 while (loops != 0) {
808 udelay(10);
809 frame_val = tr32(MAC_MI_COM);
810 if ((frame_val & MI_COM_BUSY) == 0) {
811 udelay(5);
812 frame_val = tr32(MAC_MI_COM);
813 break;
815 loops -= 1;
818 ret = -EBUSY;
819 if (loops != 0)
820 ret = 0;
822 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
823 tw32_f(MAC_MI_MODE, tp->mi_mode);
824 udelay(80);
827 return ret;
830 static int tg3_bmcr_reset(struct tg3 *tp)
832 u32 phy_control;
833 int limit, err;
835 /* OK, reset it, and poll the BMCR_RESET bit until it
836 * clears or we time out.
838 phy_control = BMCR_RESET;
839 err = tg3_writephy(tp, MII_BMCR, phy_control);
840 if (err != 0)
841 return -EBUSY;
843 limit = 5000;
844 while (limit--) {
845 err = tg3_readphy(tp, MII_BMCR, &phy_control);
846 if (err != 0)
847 return -EBUSY;
849 if ((phy_control & BMCR_RESET) == 0) {
850 udelay(40);
851 break;
853 udelay(10);
855 if (limit <= 0)
856 return -EBUSY;
858 return 0;
861 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
863 struct tg3 *tp = (struct tg3 *)bp->priv;
864 u32 val;
866 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
867 return -EAGAIN;
869 if (tg3_readphy(tp, reg, &val))
870 return -EIO;
872 return val;
875 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
877 struct tg3 *tp = (struct tg3 *)bp->priv;
879 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
880 return -EAGAIN;
882 if (tg3_writephy(tp, reg, val))
883 return -EIO;
885 return 0;
888 static int tg3_mdio_reset(struct mii_bus *bp)
890 return 0;
893 static void tg3_mdio_config_5785(struct tg3 *tp)
895 u32 val;
896 struct phy_device *phydev;
898 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
899 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
900 case TG3_PHY_ID_BCM50610:
901 val = MAC_PHYCFG2_50610_LED_MODES;
902 break;
903 case TG3_PHY_ID_BCMAC131:
904 val = MAC_PHYCFG2_AC131_LED_MODES;
905 break;
906 case TG3_PHY_ID_RTL8211C:
907 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
908 break;
909 case TG3_PHY_ID_RTL8201E:
910 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
911 break;
912 default:
913 return;
916 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
917 tw32(MAC_PHYCFG2, val);
919 val = tr32(MAC_PHYCFG1);
920 val &= ~MAC_PHYCFG1_RGMII_INT;
921 tw32(MAC_PHYCFG1, val);
923 return;
926 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE))
927 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
928 MAC_PHYCFG2_FMODE_MASK_MASK |
929 MAC_PHYCFG2_GMODE_MASK_MASK |
930 MAC_PHYCFG2_ACT_MASK_MASK |
931 MAC_PHYCFG2_QUAL_MASK_MASK |
932 MAC_PHYCFG2_INBAND_ENABLE;
934 tw32(MAC_PHYCFG2, val);
936 val = tr32(MAC_PHYCFG1) & ~(MAC_PHYCFG1_RGMII_EXT_RX_DEC |
937 MAC_PHYCFG1_RGMII_SND_STAT_EN);
938 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE) {
939 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
940 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
941 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
942 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
944 tw32(MAC_PHYCFG1, val | MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV);
946 val = tr32(MAC_EXT_RGMII_MODE);
947 val &= ~(MAC_RGMII_MODE_RX_INT_B |
948 MAC_RGMII_MODE_RX_QUALITY |
949 MAC_RGMII_MODE_RX_ACTIVITY |
950 MAC_RGMII_MODE_RX_ENG_DET |
951 MAC_RGMII_MODE_TX_ENABLE |
952 MAC_RGMII_MODE_TX_LOWPWR |
953 MAC_RGMII_MODE_TX_RESET);
954 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)) {
955 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
956 val |= MAC_RGMII_MODE_RX_INT_B |
957 MAC_RGMII_MODE_RX_QUALITY |
958 MAC_RGMII_MODE_RX_ACTIVITY |
959 MAC_RGMII_MODE_RX_ENG_DET;
960 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
961 val |= MAC_RGMII_MODE_TX_ENABLE |
962 MAC_RGMII_MODE_TX_LOWPWR |
963 MAC_RGMII_MODE_TX_RESET;
965 tw32(MAC_EXT_RGMII_MODE, val);
968 static void tg3_mdio_start(struct tg3 *tp)
970 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
971 mutex_lock(&tp->mdio_bus->mdio_lock);
972 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
973 mutex_unlock(&tp->mdio_bus->mdio_lock);
976 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
977 tw32_f(MAC_MI_MODE, tp->mi_mode);
978 udelay(80);
980 if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) &&
981 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
982 tg3_mdio_config_5785(tp);
985 static void tg3_mdio_stop(struct tg3 *tp)
987 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
988 mutex_lock(&tp->mdio_bus->mdio_lock);
989 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_PAUSED;
990 mutex_unlock(&tp->mdio_bus->mdio_lock);
994 static int tg3_mdio_init(struct tg3 *tp)
996 int i;
997 u32 reg;
998 struct phy_device *phydev;
1000 tg3_mdio_start(tp);
1002 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
1003 (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
1004 return 0;
1006 tp->mdio_bus = mdiobus_alloc();
1007 if (tp->mdio_bus == NULL)
1008 return -ENOMEM;
1010 tp->mdio_bus->name = "tg3 mdio bus";
1011 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1012 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1013 tp->mdio_bus->priv = tp;
1014 tp->mdio_bus->parent = &tp->pdev->dev;
1015 tp->mdio_bus->read = &tg3_mdio_read;
1016 tp->mdio_bus->write = &tg3_mdio_write;
1017 tp->mdio_bus->reset = &tg3_mdio_reset;
1018 tp->mdio_bus->phy_mask = ~(1 << PHY_ADDR);
1019 tp->mdio_bus->irq = &tp->mdio_irq[0];
1021 for (i = 0; i < PHY_MAX_ADDR; i++)
1022 tp->mdio_bus->irq[i] = PHY_POLL;
1024 /* The bus registration will look for all the PHYs on the mdio bus.
1025 * Unfortunately, it does not ensure the PHY is powered up before
1026 * accessing the PHY ID registers. A chip reset is the
1027 * quickest way to bring the device back to an operational state..
1029 if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1030 tg3_bmcr_reset(tp);
1032 i = mdiobus_register(tp->mdio_bus);
1033 if (i) {
1034 printk(KERN_WARNING "%s: mdiobus_reg failed (0x%x)\n",
1035 tp->dev->name, i);
1036 mdiobus_free(tp->mdio_bus);
1037 return i;
1040 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1042 if (!phydev || !phydev->drv) {
1043 printk(KERN_WARNING "%s: No PHY devices\n", tp->dev->name);
1044 mdiobus_unregister(tp->mdio_bus);
1045 mdiobus_free(tp->mdio_bus);
1046 return -ENODEV;
1049 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1050 case TG3_PHY_ID_BCM57780:
1051 phydev->interface = PHY_INTERFACE_MODE_GMII;
1052 break;
1053 case TG3_PHY_ID_BCM50610:
1054 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)
1055 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1056 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1057 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1058 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1059 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1060 /* fallthru */
1061 case TG3_PHY_ID_RTL8211C:
1062 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1063 break;
1064 case TG3_PHY_ID_RTL8201E:
1065 case TG3_PHY_ID_BCMAC131:
1066 phydev->interface = PHY_INTERFACE_MODE_MII;
1067 break;
1070 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
1072 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1073 tg3_mdio_config_5785(tp);
1075 return 0;
1078 static void tg3_mdio_fini(struct tg3 *tp)
1080 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1081 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
1082 mdiobus_unregister(tp->mdio_bus);
1083 mdiobus_free(tp->mdio_bus);
1084 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
1088 /* tp->lock is held. */
1089 static inline void tg3_generate_fw_event(struct tg3 *tp)
1091 u32 val;
1093 val = tr32(GRC_RX_CPU_EVENT);
1094 val |= GRC_RX_CPU_DRIVER_EVENT;
1095 tw32_f(GRC_RX_CPU_EVENT, val);
1097 tp->last_event_jiffies = jiffies;
1100 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1102 /* tp->lock is held. */
1103 static void tg3_wait_for_event_ack(struct tg3 *tp)
1105 int i;
1106 unsigned int delay_cnt;
1107 long time_remain;
1109 /* If enough time has passed, no wait is necessary. */
1110 time_remain = (long)(tp->last_event_jiffies + 1 +
1111 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1112 (long)jiffies;
1113 if (time_remain < 0)
1114 return;
1116 /* Check if we can shorten the wait time. */
1117 delay_cnt = jiffies_to_usecs(time_remain);
1118 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1119 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1120 delay_cnt = (delay_cnt >> 3) + 1;
1122 for (i = 0; i < delay_cnt; i++) {
1123 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1124 break;
1125 udelay(8);
1129 /* tp->lock is held. */
1130 static void tg3_ump_link_report(struct tg3 *tp)
1132 u32 reg;
1133 u32 val;
1135 if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1136 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1137 return;
1139 tg3_wait_for_event_ack(tp);
1141 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1143 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1145 val = 0;
1146 if (!tg3_readphy(tp, MII_BMCR, &reg))
1147 val = reg << 16;
1148 if (!tg3_readphy(tp, MII_BMSR, &reg))
1149 val |= (reg & 0xffff);
1150 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1152 val = 0;
1153 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1154 val = reg << 16;
1155 if (!tg3_readphy(tp, MII_LPA, &reg))
1156 val |= (reg & 0xffff);
1157 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1159 val = 0;
1160 if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
1161 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1162 val = reg << 16;
1163 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1164 val |= (reg & 0xffff);
1166 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1168 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1169 val = reg << 16;
1170 else
1171 val = 0;
1172 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1174 tg3_generate_fw_event(tp);
1177 static void tg3_link_report(struct tg3 *tp)
1179 if (!netif_carrier_ok(tp->dev)) {
1180 if (netif_msg_link(tp))
1181 printk(KERN_INFO PFX "%s: Link is down.\n",
1182 tp->dev->name);
1183 tg3_ump_link_report(tp);
1184 } else if (netif_msg_link(tp)) {
1185 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1186 tp->dev->name,
1187 (tp->link_config.active_speed == SPEED_1000 ?
1188 1000 :
1189 (tp->link_config.active_speed == SPEED_100 ?
1190 100 : 10)),
1191 (tp->link_config.active_duplex == DUPLEX_FULL ?
1192 "full" : "half"));
1194 printk(KERN_INFO PFX
1195 "%s: Flow control is %s for TX and %s for RX.\n",
1196 tp->dev->name,
1197 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1198 "on" : "off",
1199 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1200 "on" : "off");
1201 tg3_ump_link_report(tp);
1205 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1207 u16 miireg;
1209 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1210 miireg = ADVERTISE_PAUSE_CAP;
1211 else if (flow_ctrl & FLOW_CTRL_TX)
1212 miireg = ADVERTISE_PAUSE_ASYM;
1213 else if (flow_ctrl & FLOW_CTRL_RX)
1214 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1215 else
1216 miireg = 0;
1218 return miireg;
1221 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1223 u16 miireg;
1225 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1226 miireg = ADVERTISE_1000XPAUSE;
1227 else if (flow_ctrl & FLOW_CTRL_TX)
1228 miireg = ADVERTISE_1000XPSE_ASYM;
1229 else if (flow_ctrl & FLOW_CTRL_RX)
1230 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1231 else
1232 miireg = 0;
1234 return miireg;
1237 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1239 u8 cap = 0;
1241 if (lcladv & ADVERTISE_1000XPAUSE) {
1242 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1243 if (rmtadv & LPA_1000XPAUSE)
1244 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1245 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1246 cap = FLOW_CTRL_RX;
1247 } else {
1248 if (rmtadv & LPA_1000XPAUSE)
1249 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1251 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1252 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1253 cap = FLOW_CTRL_TX;
1256 return cap;
1259 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1261 u8 autoneg;
1262 u8 flowctrl = 0;
1263 u32 old_rx_mode = tp->rx_mode;
1264 u32 old_tx_mode = tp->tx_mode;
1266 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
1267 autoneg = tp->mdio_bus->phy_map[PHY_ADDR]->autoneg;
1268 else
1269 autoneg = tp->link_config.autoneg;
1271 if (autoneg == AUTONEG_ENABLE &&
1272 (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1273 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1274 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1275 else
1276 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1277 } else
1278 flowctrl = tp->link_config.flowctrl;
1280 tp->link_config.active_flowctrl = flowctrl;
1282 if (flowctrl & FLOW_CTRL_RX)
1283 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1284 else
1285 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1287 if (old_rx_mode != tp->rx_mode)
1288 tw32_f(MAC_RX_MODE, tp->rx_mode);
1290 if (flowctrl & FLOW_CTRL_TX)
1291 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1292 else
1293 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1295 if (old_tx_mode != tp->tx_mode)
1296 tw32_f(MAC_TX_MODE, tp->tx_mode);
1299 static void tg3_adjust_link(struct net_device *dev)
1301 u8 oldflowctrl, linkmesg = 0;
1302 u32 mac_mode, lcl_adv, rmt_adv;
1303 struct tg3 *tp = netdev_priv(dev);
1304 struct phy_device *phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1306 spin_lock(&tp->lock);
1308 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1309 MAC_MODE_HALF_DUPLEX);
1311 oldflowctrl = tp->link_config.active_flowctrl;
1313 if (phydev->link) {
1314 lcl_adv = 0;
1315 rmt_adv = 0;
1317 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1318 mac_mode |= MAC_MODE_PORT_MODE_MII;
1319 else
1320 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1322 if (phydev->duplex == DUPLEX_HALF)
1323 mac_mode |= MAC_MODE_HALF_DUPLEX;
1324 else {
1325 lcl_adv = tg3_advert_flowctrl_1000T(
1326 tp->link_config.flowctrl);
1328 if (phydev->pause)
1329 rmt_adv = LPA_PAUSE_CAP;
1330 if (phydev->asym_pause)
1331 rmt_adv |= LPA_PAUSE_ASYM;
1334 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1335 } else
1336 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1338 if (mac_mode != tp->mac_mode) {
1339 tp->mac_mode = mac_mode;
1340 tw32_f(MAC_MODE, tp->mac_mode);
1341 udelay(40);
1344 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1345 if (phydev->speed == SPEED_10)
1346 tw32(MAC_MI_STAT,
1347 MAC_MI_STAT_10MBPS_MODE |
1348 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1349 else
1350 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1353 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1354 tw32(MAC_TX_LENGTHS,
1355 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1356 (6 << TX_LENGTHS_IPG_SHIFT) |
1357 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1358 else
1359 tw32(MAC_TX_LENGTHS,
1360 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1361 (6 << TX_LENGTHS_IPG_SHIFT) |
1362 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1364 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1365 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1366 phydev->speed != tp->link_config.active_speed ||
1367 phydev->duplex != tp->link_config.active_duplex ||
1368 oldflowctrl != tp->link_config.active_flowctrl)
1369 linkmesg = 1;
1371 tp->link_config.active_speed = phydev->speed;
1372 tp->link_config.active_duplex = phydev->duplex;
1374 spin_unlock(&tp->lock);
1376 if (linkmesg)
1377 tg3_link_report(tp);
1380 static int tg3_phy_init(struct tg3 *tp)
1382 struct phy_device *phydev;
1384 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
1385 return 0;
1387 /* Bring the PHY back to a known state. */
1388 tg3_bmcr_reset(tp);
1390 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1392 /* Attach the MAC to the PHY. */
1393 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1394 phydev->dev_flags, phydev->interface);
1395 if (IS_ERR(phydev)) {
1396 printk(KERN_ERR "%s: Could not attach to PHY\n", tp->dev->name);
1397 return PTR_ERR(phydev);
1400 /* Mask with MAC supported features. */
1401 switch (phydev->interface) {
1402 case PHY_INTERFACE_MODE_GMII:
1403 case PHY_INTERFACE_MODE_RGMII:
1404 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1405 phydev->supported &= (PHY_GBIT_FEATURES |
1406 SUPPORTED_Pause |
1407 SUPPORTED_Asym_Pause);
1408 break;
1410 /* fallthru */
1411 case PHY_INTERFACE_MODE_MII:
1412 phydev->supported &= (PHY_BASIC_FEATURES |
1413 SUPPORTED_Pause |
1414 SUPPORTED_Asym_Pause);
1415 break;
1416 default:
1417 phy_disconnect(tp->mdio_bus->phy_map[PHY_ADDR]);
1418 return -EINVAL;
1421 tp->tg3_flags3 |= TG3_FLG3_PHY_CONNECTED;
1423 phydev->advertising = phydev->supported;
1425 return 0;
1428 static void tg3_phy_start(struct tg3 *tp)
1430 struct phy_device *phydev;
1432 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1433 return;
1435 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1437 if (tp->link_config.phy_is_low_power) {
1438 tp->link_config.phy_is_low_power = 0;
1439 phydev->speed = tp->link_config.orig_speed;
1440 phydev->duplex = tp->link_config.orig_duplex;
1441 phydev->autoneg = tp->link_config.orig_autoneg;
1442 phydev->advertising = tp->link_config.orig_advertising;
1445 phy_start(phydev);
1447 phy_start_aneg(phydev);
1450 static void tg3_phy_stop(struct tg3 *tp)
1452 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1453 return;
1455 phy_stop(tp->mdio_bus->phy_map[PHY_ADDR]);
1458 static void tg3_phy_fini(struct tg3 *tp)
1460 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
1461 phy_disconnect(tp->mdio_bus->phy_map[PHY_ADDR]);
1462 tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED;
1466 static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1468 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1469 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1472 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1474 u32 reg;
1476 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
1477 return;
1479 reg = MII_TG3_MISC_SHDW_WREN |
1480 MII_TG3_MISC_SHDW_SCR5_SEL |
1481 MII_TG3_MISC_SHDW_SCR5_LPED |
1482 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1483 MII_TG3_MISC_SHDW_SCR5_SDTL |
1484 MII_TG3_MISC_SHDW_SCR5_C125OE;
1485 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1486 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1488 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1491 reg = MII_TG3_MISC_SHDW_WREN |
1492 MII_TG3_MISC_SHDW_APD_SEL |
1493 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1494 if (enable)
1495 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1497 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1500 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1502 u32 phy;
1504 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1505 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1506 return;
1508 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1509 u32 ephy;
1511 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
1512 tg3_writephy(tp, MII_TG3_EPHY_TEST,
1513 ephy | MII_TG3_EPHY_SHADOW_EN);
1514 if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
1515 if (enable)
1516 phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
1517 else
1518 phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
1519 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
1521 tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
1523 } else {
1524 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
1525 MII_TG3_AUXCTL_SHDWSEL_MISC;
1526 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
1527 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
1528 if (enable)
1529 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1530 else
1531 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1532 phy |= MII_TG3_AUXCTL_MISC_WREN;
1533 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1538 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1540 u32 val;
1542 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
1543 return;
1545 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
1546 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
1547 tg3_writephy(tp, MII_TG3_AUX_CTRL,
1548 (val | (1 << 15) | (1 << 4)));
1551 static void tg3_phy_apply_otp(struct tg3 *tp)
1553 u32 otp, phy;
1555 if (!tp->phy_otp)
1556 return;
1558 otp = tp->phy_otp;
1560 /* Enable SM_DSP clock and tx 6dB coding. */
1561 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1562 MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1563 MII_TG3_AUXCTL_ACTL_TX_6DB;
1564 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1566 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1567 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1568 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1570 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1571 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1572 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1574 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1575 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1576 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1578 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1579 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1581 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1582 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1584 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1585 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1586 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1588 /* Turn off SM_DSP clock. */
1589 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1590 MII_TG3_AUXCTL_ACTL_TX_6DB;
1591 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1594 static int tg3_wait_macro_done(struct tg3 *tp)
1596 int limit = 100;
1598 while (limit--) {
1599 u32 tmp32;
1601 if (!tg3_readphy(tp, 0x16, &tmp32)) {
1602 if ((tmp32 & 0x1000) == 0)
1603 break;
1606 if (limit <= 0)
1607 return -EBUSY;
1609 return 0;
1612 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1614 static const u32 test_pat[4][6] = {
1615 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1616 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1617 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1618 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1620 int chan;
1622 for (chan = 0; chan < 4; chan++) {
1623 int i;
1625 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1626 (chan * 0x2000) | 0x0200);
1627 tg3_writephy(tp, 0x16, 0x0002);
1629 for (i = 0; i < 6; i++)
1630 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1631 test_pat[chan][i]);
1633 tg3_writephy(tp, 0x16, 0x0202);
1634 if (tg3_wait_macro_done(tp)) {
1635 *resetp = 1;
1636 return -EBUSY;
1639 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1640 (chan * 0x2000) | 0x0200);
1641 tg3_writephy(tp, 0x16, 0x0082);
1642 if (tg3_wait_macro_done(tp)) {
1643 *resetp = 1;
1644 return -EBUSY;
1647 tg3_writephy(tp, 0x16, 0x0802);
1648 if (tg3_wait_macro_done(tp)) {
1649 *resetp = 1;
1650 return -EBUSY;
1653 for (i = 0; i < 6; i += 2) {
1654 u32 low, high;
1656 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1657 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1658 tg3_wait_macro_done(tp)) {
1659 *resetp = 1;
1660 return -EBUSY;
1662 low &= 0x7fff;
1663 high &= 0x000f;
1664 if (low != test_pat[chan][i] ||
1665 high != test_pat[chan][i+1]) {
1666 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1667 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1668 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1670 return -EBUSY;
1675 return 0;
1678 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1680 int chan;
1682 for (chan = 0; chan < 4; chan++) {
1683 int i;
1685 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1686 (chan * 0x2000) | 0x0200);
1687 tg3_writephy(tp, 0x16, 0x0002);
1688 for (i = 0; i < 6; i++)
1689 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1690 tg3_writephy(tp, 0x16, 0x0202);
1691 if (tg3_wait_macro_done(tp))
1692 return -EBUSY;
1695 return 0;
1698 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1700 u32 reg32, phy9_orig;
1701 int retries, do_phy_reset, err;
1703 retries = 10;
1704 do_phy_reset = 1;
1705 do {
1706 if (do_phy_reset) {
1707 err = tg3_bmcr_reset(tp);
1708 if (err)
1709 return err;
1710 do_phy_reset = 0;
1713 /* Disable transmitter and interrupt. */
1714 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1715 continue;
1717 reg32 |= 0x3000;
1718 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1720 /* Set full-duplex, 1000 mbps. */
1721 tg3_writephy(tp, MII_BMCR,
1722 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1724 /* Set to master mode. */
1725 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1726 continue;
1728 tg3_writephy(tp, MII_TG3_CTRL,
1729 (MII_TG3_CTRL_AS_MASTER |
1730 MII_TG3_CTRL_ENABLE_AS_MASTER));
1732 /* Enable SM_DSP_CLOCK and 6dB. */
1733 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1735 /* Block the PHY control access. */
1736 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1737 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1739 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1740 if (!err)
1741 break;
1742 } while (--retries);
1744 err = tg3_phy_reset_chanpat(tp);
1745 if (err)
1746 return err;
1748 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1749 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1751 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1752 tg3_writephy(tp, 0x16, 0x0000);
1754 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1755 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1756 /* Set Extended packet length bit for jumbo frames */
1757 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1759 else {
1760 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1763 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1765 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
1766 reg32 &= ~0x3000;
1767 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1768 } else if (!err)
1769 err = -EBUSY;
1771 return err;
1774 /* This will reset the tigon3 PHY if there is no valid
1775 * link unless the FORCE argument is non-zero.
1777 static int tg3_phy_reset(struct tg3 *tp)
1779 u32 cpmuctrl;
1780 u32 phy_status;
1781 int err;
1783 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1784 u32 val;
1786 val = tr32(GRC_MISC_CFG);
1787 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1788 udelay(40);
1790 err = tg3_readphy(tp, MII_BMSR, &phy_status);
1791 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1792 if (err != 0)
1793 return -EBUSY;
1795 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1796 netif_carrier_off(tp->dev);
1797 tg3_link_report(tp);
1800 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1801 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1802 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1803 err = tg3_phy_reset_5703_4_5(tp);
1804 if (err)
1805 return err;
1806 goto out;
1809 cpmuctrl = 0;
1810 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
1811 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
1812 cpmuctrl = tr32(TG3_CPMU_CTRL);
1813 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
1814 tw32(TG3_CPMU_CTRL,
1815 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
1818 err = tg3_bmcr_reset(tp);
1819 if (err)
1820 return err;
1822 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1823 u32 phy;
1825 phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1826 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
1828 tw32(TG3_CPMU_CTRL, cpmuctrl);
1831 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
1832 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
1833 u32 val;
1835 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1836 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1837 CPMU_LSPD_1000MB_MACCLK_12_5) {
1838 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1839 udelay(40);
1840 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1844 tg3_phy_apply_otp(tp);
1846 if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
1847 tg3_phy_toggle_apd(tp, true);
1848 else
1849 tg3_phy_toggle_apd(tp, false);
1851 out:
1852 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1853 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1854 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1855 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1856 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1857 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1858 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1860 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1861 tg3_writephy(tp, 0x1c, 0x8d68);
1862 tg3_writephy(tp, 0x1c, 0x8d68);
1864 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1865 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1866 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1867 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1868 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1869 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1870 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1871 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1872 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1874 else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1875 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1876 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1877 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1878 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1879 tg3_writephy(tp, MII_TG3_TEST1,
1880 MII_TG3_TEST1_TRIM_EN | 0x4);
1881 } else
1882 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1883 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1885 /* Set Extended packet length bit (bit 14) on all chips that */
1886 /* support jumbo frames */
1887 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1888 /* Cannot do read-modify-write on 5401 */
1889 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1890 } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1891 u32 phy_reg;
1893 /* Set bit 14 with read-modify-write to preserve other bits */
1894 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1895 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1896 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1899 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1900 * jumbo frames transmission.
1902 if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1903 u32 phy_reg;
1905 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1906 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1907 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1910 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1911 /* adjust output voltage */
1912 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
1915 tg3_phy_toggle_automdix(tp, 1);
1916 tg3_phy_set_wirespeed(tp);
1917 return 0;
1920 static void tg3_frob_aux_power(struct tg3 *tp)
1922 struct tg3 *tp_peer = tp;
1924 if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
1925 return;
1927 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1928 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1929 struct net_device *dev_peer;
1931 dev_peer = pci_get_drvdata(tp->pdev_peer);
1932 /* remove_one() may have been run on the peer. */
1933 if (!dev_peer)
1934 tp_peer = tp;
1935 else
1936 tp_peer = netdev_priv(dev_peer);
1939 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1940 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1941 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1942 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1943 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1944 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1945 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1946 (GRC_LCLCTRL_GPIO_OE0 |
1947 GRC_LCLCTRL_GPIO_OE1 |
1948 GRC_LCLCTRL_GPIO_OE2 |
1949 GRC_LCLCTRL_GPIO_OUTPUT0 |
1950 GRC_LCLCTRL_GPIO_OUTPUT1),
1951 100);
1952 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) {
1953 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
1954 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
1955 GRC_LCLCTRL_GPIO_OE1 |
1956 GRC_LCLCTRL_GPIO_OE2 |
1957 GRC_LCLCTRL_GPIO_OUTPUT0 |
1958 GRC_LCLCTRL_GPIO_OUTPUT1 |
1959 tp->grc_local_ctrl;
1960 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1962 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
1963 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1965 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
1966 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1967 } else {
1968 u32 no_gpio2;
1969 u32 grc_local_ctrl = 0;
1971 if (tp_peer != tp &&
1972 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1973 return;
1975 /* Workaround to prevent overdrawing Amps. */
1976 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1977 ASIC_REV_5714) {
1978 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1979 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1980 grc_local_ctrl, 100);
1983 /* On 5753 and variants, GPIO2 cannot be used. */
1984 no_gpio2 = tp->nic_sram_data_cfg &
1985 NIC_SRAM_DATA_CFG_NO_GPIO2;
1987 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1988 GRC_LCLCTRL_GPIO_OE1 |
1989 GRC_LCLCTRL_GPIO_OE2 |
1990 GRC_LCLCTRL_GPIO_OUTPUT1 |
1991 GRC_LCLCTRL_GPIO_OUTPUT2;
1992 if (no_gpio2) {
1993 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1994 GRC_LCLCTRL_GPIO_OUTPUT2);
1996 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1997 grc_local_ctrl, 100);
1999 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2001 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2002 grc_local_ctrl, 100);
2004 if (!no_gpio2) {
2005 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2006 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2007 grc_local_ctrl, 100);
2010 } else {
2011 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
2012 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
2013 if (tp_peer != tp &&
2014 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
2015 return;
2017 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2018 (GRC_LCLCTRL_GPIO_OE1 |
2019 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2021 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2022 GRC_LCLCTRL_GPIO_OE1, 100);
2024 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2025 (GRC_LCLCTRL_GPIO_OE1 |
2026 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2031 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2033 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2034 return 1;
2035 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
2036 if (speed != SPEED_10)
2037 return 1;
2038 } else if (speed == SPEED_10)
2039 return 1;
2041 return 0;
2044 static int tg3_setup_phy(struct tg3 *, int);
2046 #define RESET_KIND_SHUTDOWN 0
2047 #define RESET_KIND_INIT 1
2048 #define RESET_KIND_SUSPEND 2
2050 static void tg3_write_sig_post_reset(struct tg3 *, int);
2051 static int tg3_halt_cpu(struct tg3 *, u32);
2052 static int tg3_nvram_lock(struct tg3 *);
2053 static void tg3_nvram_unlock(struct tg3 *);
2055 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2057 u32 val;
2059 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2060 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2061 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2062 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2064 sg_dig_ctrl |=
2065 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2066 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2067 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2069 return;
2072 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2073 tg3_bmcr_reset(tp);
2074 val = tr32(GRC_MISC_CFG);
2075 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2076 udelay(40);
2077 return;
2078 } else if (do_low_power) {
2079 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2080 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2082 tg3_writephy(tp, MII_TG3_AUX_CTRL,
2083 MII_TG3_AUXCTL_SHDWSEL_PWRCTL |
2084 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2085 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2086 MII_TG3_AUXCTL_PCTL_VREG_11V);
2089 /* The PHY should not be powered down on some chips because
2090 * of bugs.
2092 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2093 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2094 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2095 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
2096 return;
2098 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2099 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2100 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2101 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2102 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2103 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2106 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2109 /* tp->lock is held. */
2110 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2112 u32 addr_high, addr_low;
2113 int i;
2115 addr_high = ((tp->dev->dev_addr[0] << 8) |
2116 tp->dev->dev_addr[1]);
2117 addr_low = ((tp->dev->dev_addr[2] << 24) |
2118 (tp->dev->dev_addr[3] << 16) |
2119 (tp->dev->dev_addr[4] << 8) |
2120 (tp->dev->dev_addr[5] << 0));
2121 for (i = 0; i < 4; i++) {
2122 if (i == 1 && skip_mac_1)
2123 continue;
2124 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2125 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2128 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2129 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2130 for (i = 0; i < 12; i++) {
2131 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2132 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2136 addr_high = (tp->dev->dev_addr[0] +
2137 tp->dev->dev_addr[1] +
2138 tp->dev->dev_addr[2] +
2139 tp->dev->dev_addr[3] +
2140 tp->dev->dev_addr[4] +
2141 tp->dev->dev_addr[5]) &
2142 TX_BACKOFF_SEED_MASK;
2143 tw32(MAC_TX_BACKOFF_SEED, addr_high);
2146 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2148 u32 misc_host_ctrl;
2149 bool device_should_wake, do_low_power;
2151 /* Make sure register accesses (indirect or otherwise)
2152 * will function correctly.
2154 pci_write_config_dword(tp->pdev,
2155 TG3PCI_MISC_HOST_CTRL,
2156 tp->misc_host_ctrl);
2158 switch (state) {
2159 case PCI_D0:
2160 pci_enable_wake(tp->pdev, state, false);
2161 pci_set_power_state(tp->pdev, PCI_D0);
2163 /* Switch out of Vaux if it is a NIC */
2164 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
2165 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2167 return 0;
2169 case PCI_D1:
2170 case PCI_D2:
2171 case PCI_D3hot:
2172 break;
2174 default:
2175 printk(KERN_ERR PFX "%s: Invalid power state (D%d) requested\n",
2176 tp->dev->name, state);
2177 return -EINVAL;
2180 /* Restore the CLKREQ setting. */
2181 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
2182 u16 lnkctl;
2184 pci_read_config_word(tp->pdev,
2185 tp->pcie_cap + PCI_EXP_LNKCTL,
2186 &lnkctl);
2187 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2188 pci_write_config_word(tp->pdev,
2189 tp->pcie_cap + PCI_EXP_LNKCTL,
2190 lnkctl);
2193 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2194 tw32(TG3PCI_MISC_HOST_CTRL,
2195 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2197 device_should_wake = pci_pme_capable(tp->pdev, state) &&
2198 device_may_wakeup(&tp->pdev->dev) &&
2199 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
2201 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
2202 do_low_power = false;
2203 if ((tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) &&
2204 !tp->link_config.phy_is_low_power) {
2205 struct phy_device *phydev;
2206 u32 phyid, advertising;
2208 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
2210 tp->link_config.phy_is_low_power = 1;
2212 tp->link_config.orig_speed = phydev->speed;
2213 tp->link_config.orig_duplex = phydev->duplex;
2214 tp->link_config.orig_autoneg = phydev->autoneg;
2215 tp->link_config.orig_advertising = phydev->advertising;
2217 advertising = ADVERTISED_TP |
2218 ADVERTISED_Pause |
2219 ADVERTISED_Autoneg |
2220 ADVERTISED_10baseT_Half;
2222 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2223 device_should_wake) {
2224 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2225 advertising |=
2226 ADVERTISED_100baseT_Half |
2227 ADVERTISED_100baseT_Full |
2228 ADVERTISED_10baseT_Full;
2229 else
2230 advertising |= ADVERTISED_10baseT_Full;
2233 phydev->advertising = advertising;
2235 phy_start_aneg(phydev);
2237 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2238 if (phyid != TG3_PHY_ID_BCMAC131) {
2239 phyid &= TG3_PHY_OUI_MASK;
2240 if (phyid == TG3_PHY_OUI_1 &&
2241 phyid == TG3_PHY_OUI_2 &&
2242 phyid == TG3_PHY_OUI_3)
2243 do_low_power = true;
2246 } else {
2247 do_low_power = true;
2249 if (tp->link_config.phy_is_low_power == 0) {
2250 tp->link_config.phy_is_low_power = 1;
2251 tp->link_config.orig_speed = tp->link_config.speed;
2252 tp->link_config.orig_duplex = tp->link_config.duplex;
2253 tp->link_config.orig_autoneg = tp->link_config.autoneg;
2256 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
2257 tp->link_config.speed = SPEED_10;
2258 tp->link_config.duplex = DUPLEX_HALF;
2259 tp->link_config.autoneg = AUTONEG_ENABLE;
2260 tg3_setup_phy(tp, 0);
2264 __tg3_set_mac_addr(tp, 0);
2266 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2267 u32 val;
2269 val = tr32(GRC_VCPU_EXT_CTRL);
2270 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2271 } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2272 int i;
2273 u32 val;
2275 for (i = 0; i < 200; i++) {
2276 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2277 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2278 break;
2279 msleep(1);
2282 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
2283 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2284 WOL_DRV_STATE_SHUTDOWN |
2285 WOL_DRV_WOL |
2286 WOL_SET_MAGIC_PKT);
2288 if (device_should_wake) {
2289 u32 mac_mode;
2291 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
2292 if (do_low_power) {
2293 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
2294 udelay(40);
2297 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
2298 mac_mode = MAC_MODE_PORT_MODE_GMII;
2299 else
2300 mac_mode = MAC_MODE_PORT_MODE_MII;
2302 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2303 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2304 ASIC_REV_5700) {
2305 u32 speed = (tp->tg3_flags &
2306 TG3_FLAG_WOL_SPEED_100MB) ?
2307 SPEED_100 : SPEED_10;
2308 if (tg3_5700_link_polarity(tp, speed))
2309 mac_mode |= MAC_MODE_LINK_POLARITY;
2310 else
2311 mac_mode &= ~MAC_MODE_LINK_POLARITY;
2313 } else {
2314 mac_mode = MAC_MODE_PORT_MODE_TBI;
2317 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
2318 tw32(MAC_LED_CTRL, tp->led_ctrl);
2320 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2321 if (((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
2322 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) &&
2323 ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2324 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)))
2325 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2327 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
2328 mac_mode |= tp->mac_mode &
2329 (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
2330 if (mac_mode & MAC_MODE_APE_TX_EN)
2331 mac_mode |= MAC_MODE_TDE_ENABLE;
2334 tw32_f(MAC_MODE, mac_mode);
2335 udelay(100);
2337 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2338 udelay(10);
2341 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
2342 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2343 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2344 u32 base_val;
2346 base_val = tp->pci_clock_ctrl;
2347 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2348 CLOCK_CTRL_TXCLK_DISABLE);
2350 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2351 CLOCK_CTRL_PWRDOWN_PLL133, 40);
2352 } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
2353 (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
2354 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
2355 /* do nothing */
2356 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2357 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
2358 u32 newbits1, newbits2;
2360 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2361 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2362 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2363 CLOCK_CTRL_TXCLK_DISABLE |
2364 CLOCK_CTRL_ALTCLK);
2365 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2366 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
2367 newbits1 = CLOCK_CTRL_625_CORE;
2368 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2369 } else {
2370 newbits1 = CLOCK_CTRL_ALTCLK;
2371 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2374 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2375 40);
2377 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2378 40);
2380 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2381 u32 newbits3;
2383 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2384 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2385 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2386 CLOCK_CTRL_TXCLK_DISABLE |
2387 CLOCK_CTRL_44MHZ_CORE);
2388 } else {
2389 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2392 tw32_wait_f(TG3PCI_CLOCK_CTRL,
2393 tp->pci_clock_ctrl | newbits3, 40);
2397 if (!(device_should_wake) &&
2398 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
2399 tg3_power_down_phy(tp, do_low_power);
2401 tg3_frob_aux_power(tp);
2403 /* Workaround for unstable PLL clock */
2404 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2405 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2406 u32 val = tr32(0x7d00);
2408 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2409 tw32(0x7d00, val);
2410 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2411 int err;
2413 err = tg3_nvram_lock(tp);
2414 tg3_halt_cpu(tp, RX_CPU_BASE);
2415 if (!err)
2416 tg3_nvram_unlock(tp);
2420 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2422 if (device_should_wake)
2423 pci_enable_wake(tp->pdev, state, true);
2425 /* Finally, set the new power state. */
2426 pci_set_power_state(tp->pdev, state);
2428 return 0;
2431 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2433 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2434 case MII_TG3_AUX_STAT_10HALF:
2435 *speed = SPEED_10;
2436 *duplex = DUPLEX_HALF;
2437 break;
2439 case MII_TG3_AUX_STAT_10FULL:
2440 *speed = SPEED_10;
2441 *duplex = DUPLEX_FULL;
2442 break;
2444 case MII_TG3_AUX_STAT_100HALF:
2445 *speed = SPEED_100;
2446 *duplex = DUPLEX_HALF;
2447 break;
2449 case MII_TG3_AUX_STAT_100FULL:
2450 *speed = SPEED_100;
2451 *duplex = DUPLEX_FULL;
2452 break;
2454 case MII_TG3_AUX_STAT_1000HALF:
2455 *speed = SPEED_1000;
2456 *duplex = DUPLEX_HALF;
2457 break;
2459 case MII_TG3_AUX_STAT_1000FULL:
2460 *speed = SPEED_1000;
2461 *duplex = DUPLEX_FULL;
2462 break;
2464 default:
2465 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2466 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2467 SPEED_10;
2468 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2469 DUPLEX_HALF;
2470 break;
2472 *speed = SPEED_INVALID;
2473 *duplex = DUPLEX_INVALID;
2474 break;
2478 static void tg3_phy_copper_begin(struct tg3 *tp)
2480 u32 new_adv;
2481 int i;
2483 if (tp->link_config.phy_is_low_power) {
2484 /* Entering low power mode. Disable gigabit and
2485 * 100baseT advertisements.
2487 tg3_writephy(tp, MII_TG3_CTRL, 0);
2489 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
2490 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
2491 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2492 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
2494 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2495 } else if (tp->link_config.speed == SPEED_INVALID) {
2496 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
2497 tp->link_config.advertising &=
2498 ~(ADVERTISED_1000baseT_Half |
2499 ADVERTISED_1000baseT_Full);
2501 new_adv = ADVERTISE_CSMA;
2502 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
2503 new_adv |= ADVERTISE_10HALF;
2504 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
2505 new_adv |= ADVERTISE_10FULL;
2506 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
2507 new_adv |= ADVERTISE_100HALF;
2508 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
2509 new_adv |= ADVERTISE_100FULL;
2511 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2513 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2515 if (tp->link_config.advertising &
2516 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
2517 new_adv = 0;
2518 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2519 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2520 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2521 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2522 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
2523 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2524 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2525 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2526 MII_TG3_CTRL_ENABLE_AS_MASTER);
2527 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2528 } else {
2529 tg3_writephy(tp, MII_TG3_CTRL, 0);
2531 } else {
2532 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2533 new_adv |= ADVERTISE_CSMA;
2535 /* Asking for a specific link mode. */
2536 if (tp->link_config.speed == SPEED_1000) {
2537 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2539 if (tp->link_config.duplex == DUPLEX_FULL)
2540 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
2541 else
2542 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
2543 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2544 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2545 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2546 MII_TG3_CTRL_ENABLE_AS_MASTER);
2547 } else {
2548 if (tp->link_config.speed == SPEED_100) {
2549 if (tp->link_config.duplex == DUPLEX_FULL)
2550 new_adv |= ADVERTISE_100FULL;
2551 else
2552 new_adv |= ADVERTISE_100HALF;
2553 } else {
2554 if (tp->link_config.duplex == DUPLEX_FULL)
2555 new_adv |= ADVERTISE_10FULL;
2556 else
2557 new_adv |= ADVERTISE_10HALF;
2559 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2561 new_adv = 0;
2564 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2567 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
2568 tp->link_config.speed != SPEED_INVALID) {
2569 u32 bmcr, orig_bmcr;
2571 tp->link_config.active_speed = tp->link_config.speed;
2572 tp->link_config.active_duplex = tp->link_config.duplex;
2574 bmcr = 0;
2575 switch (tp->link_config.speed) {
2576 default:
2577 case SPEED_10:
2578 break;
2580 case SPEED_100:
2581 bmcr |= BMCR_SPEED100;
2582 break;
2584 case SPEED_1000:
2585 bmcr |= TG3_BMCR_SPEED1000;
2586 break;
2589 if (tp->link_config.duplex == DUPLEX_FULL)
2590 bmcr |= BMCR_FULLDPLX;
2592 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
2593 (bmcr != orig_bmcr)) {
2594 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
2595 for (i = 0; i < 1500; i++) {
2596 u32 tmp;
2598 udelay(10);
2599 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
2600 tg3_readphy(tp, MII_BMSR, &tmp))
2601 continue;
2602 if (!(tmp & BMSR_LSTATUS)) {
2603 udelay(40);
2604 break;
2607 tg3_writephy(tp, MII_BMCR, bmcr);
2608 udelay(40);
2610 } else {
2611 tg3_writephy(tp, MII_BMCR,
2612 BMCR_ANENABLE | BMCR_ANRESTART);
2616 static int tg3_init_5401phy_dsp(struct tg3 *tp)
2618 int err;
2620 /* Turn off tap power management. */
2621 /* Set Extended packet length bit */
2622 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2624 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
2625 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
2627 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
2628 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
2630 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2631 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
2633 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2634 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
2636 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2637 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
2639 udelay(40);
2641 return err;
2644 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
2646 u32 adv_reg, all_mask = 0;
2648 if (mask & ADVERTISED_10baseT_Half)
2649 all_mask |= ADVERTISE_10HALF;
2650 if (mask & ADVERTISED_10baseT_Full)
2651 all_mask |= ADVERTISE_10FULL;
2652 if (mask & ADVERTISED_100baseT_Half)
2653 all_mask |= ADVERTISE_100HALF;
2654 if (mask & ADVERTISED_100baseT_Full)
2655 all_mask |= ADVERTISE_100FULL;
2657 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
2658 return 0;
2660 if ((adv_reg & all_mask) != all_mask)
2661 return 0;
2662 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
2663 u32 tg3_ctrl;
2665 all_mask = 0;
2666 if (mask & ADVERTISED_1000baseT_Half)
2667 all_mask |= ADVERTISE_1000HALF;
2668 if (mask & ADVERTISED_1000baseT_Full)
2669 all_mask |= ADVERTISE_1000FULL;
2671 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
2672 return 0;
2674 if ((tg3_ctrl & all_mask) != all_mask)
2675 return 0;
2677 return 1;
2680 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
2682 u32 curadv, reqadv;
2684 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
2685 return 1;
2687 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2688 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2690 if (tp->link_config.active_duplex == DUPLEX_FULL) {
2691 if (curadv != reqadv)
2692 return 0;
2694 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
2695 tg3_readphy(tp, MII_LPA, rmtadv);
2696 } else {
2697 /* Reprogram the advertisement register, even if it
2698 * does not affect the current link. If the link
2699 * gets renegotiated in the future, we can save an
2700 * additional renegotiation cycle by advertising
2701 * it correctly in the first place.
2703 if (curadv != reqadv) {
2704 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
2705 ADVERTISE_PAUSE_ASYM);
2706 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
2710 return 1;
2713 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
2715 int current_link_up;
2716 u32 bmsr, dummy;
2717 u32 lcl_adv, rmt_adv;
2718 u16 current_speed;
2719 u8 current_duplex;
2720 int i, err;
2722 tw32(MAC_EVENT, 0);
2724 tw32_f(MAC_STATUS,
2725 (MAC_STATUS_SYNC_CHANGED |
2726 MAC_STATUS_CFG_CHANGED |
2727 MAC_STATUS_MI_COMPLETION |
2728 MAC_STATUS_LNKSTATE_CHANGED));
2729 udelay(40);
2731 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
2732 tw32_f(MAC_MI_MODE,
2733 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
2734 udelay(80);
2737 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
2739 /* Some third-party PHYs need to be reset on link going
2740 * down.
2742 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2743 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2744 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
2745 netif_carrier_ok(tp->dev)) {
2746 tg3_readphy(tp, MII_BMSR, &bmsr);
2747 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2748 !(bmsr & BMSR_LSTATUS))
2749 force_reset = 1;
2751 if (force_reset)
2752 tg3_phy_reset(tp);
2754 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
2755 tg3_readphy(tp, MII_BMSR, &bmsr);
2756 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
2757 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
2758 bmsr = 0;
2760 if (!(bmsr & BMSR_LSTATUS)) {
2761 err = tg3_init_5401phy_dsp(tp);
2762 if (err)
2763 return err;
2765 tg3_readphy(tp, MII_BMSR, &bmsr);
2766 for (i = 0; i < 1000; i++) {
2767 udelay(10);
2768 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2769 (bmsr & BMSR_LSTATUS)) {
2770 udelay(40);
2771 break;
2775 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
2776 !(bmsr & BMSR_LSTATUS) &&
2777 tp->link_config.active_speed == SPEED_1000) {
2778 err = tg3_phy_reset(tp);
2779 if (!err)
2780 err = tg3_init_5401phy_dsp(tp);
2781 if (err)
2782 return err;
2785 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2786 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
2787 /* 5701 {A0,B0} CRC bug workaround */
2788 tg3_writephy(tp, 0x15, 0x0a75);
2789 tg3_writephy(tp, 0x1c, 0x8c68);
2790 tg3_writephy(tp, 0x1c, 0x8d68);
2791 tg3_writephy(tp, 0x1c, 0x8c68);
2794 /* Clear pending interrupts... */
2795 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2796 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2798 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
2799 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
2800 else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
2801 tg3_writephy(tp, MII_TG3_IMASK, ~0);
2803 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2804 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2805 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
2806 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2807 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
2808 else
2809 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
2812 current_link_up = 0;
2813 current_speed = SPEED_INVALID;
2814 current_duplex = DUPLEX_INVALID;
2816 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
2817 u32 val;
2819 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
2820 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
2821 if (!(val & (1 << 10))) {
2822 val |= (1 << 10);
2823 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2824 goto relink;
2828 bmsr = 0;
2829 for (i = 0; i < 100; i++) {
2830 tg3_readphy(tp, MII_BMSR, &bmsr);
2831 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2832 (bmsr & BMSR_LSTATUS))
2833 break;
2834 udelay(40);
2837 if (bmsr & BMSR_LSTATUS) {
2838 u32 aux_stat, bmcr;
2840 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
2841 for (i = 0; i < 2000; i++) {
2842 udelay(10);
2843 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
2844 aux_stat)
2845 break;
2848 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
2849 &current_speed,
2850 &current_duplex);
2852 bmcr = 0;
2853 for (i = 0; i < 200; i++) {
2854 tg3_readphy(tp, MII_BMCR, &bmcr);
2855 if (tg3_readphy(tp, MII_BMCR, &bmcr))
2856 continue;
2857 if (bmcr && bmcr != 0x7fff)
2858 break;
2859 udelay(10);
2862 lcl_adv = 0;
2863 rmt_adv = 0;
2865 tp->link_config.active_speed = current_speed;
2866 tp->link_config.active_duplex = current_duplex;
2868 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2869 if ((bmcr & BMCR_ANENABLE) &&
2870 tg3_copper_is_advertising_all(tp,
2871 tp->link_config.advertising)) {
2872 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
2873 &rmt_adv))
2874 current_link_up = 1;
2876 } else {
2877 if (!(bmcr & BMCR_ANENABLE) &&
2878 tp->link_config.speed == current_speed &&
2879 tp->link_config.duplex == current_duplex &&
2880 tp->link_config.flowctrl ==
2881 tp->link_config.active_flowctrl) {
2882 current_link_up = 1;
2886 if (current_link_up == 1 &&
2887 tp->link_config.active_duplex == DUPLEX_FULL)
2888 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2891 relink:
2892 if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
2893 u32 tmp;
2895 tg3_phy_copper_begin(tp);
2897 tg3_readphy(tp, MII_BMSR, &tmp);
2898 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
2899 (tmp & BMSR_LSTATUS))
2900 current_link_up = 1;
2903 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2904 if (current_link_up == 1) {
2905 if (tp->link_config.active_speed == SPEED_100 ||
2906 tp->link_config.active_speed == SPEED_10)
2907 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2908 else
2909 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2910 } else
2911 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2913 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2914 if (tp->link_config.active_duplex == DUPLEX_HALF)
2915 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2917 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
2918 if (current_link_up == 1 &&
2919 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
2920 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
2921 else
2922 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2925 /* ??? Without this setting Netgear GA302T PHY does not
2926 * ??? send/receive packets...
2928 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2929 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2930 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2931 tw32_f(MAC_MI_MODE, tp->mi_mode);
2932 udelay(80);
2935 tw32_f(MAC_MODE, tp->mac_mode);
2936 udelay(40);
2938 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2939 /* Polled via timer. */
2940 tw32_f(MAC_EVENT, 0);
2941 } else {
2942 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2944 udelay(40);
2946 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2947 current_link_up == 1 &&
2948 tp->link_config.active_speed == SPEED_1000 &&
2949 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2950 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2951 udelay(120);
2952 tw32_f(MAC_STATUS,
2953 (MAC_STATUS_SYNC_CHANGED |
2954 MAC_STATUS_CFG_CHANGED));
2955 udelay(40);
2956 tg3_write_mem(tp,
2957 NIC_SRAM_FIRMWARE_MBOX,
2958 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2961 /* Prevent send BD corruption. */
2962 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
2963 u16 oldlnkctl, newlnkctl;
2965 pci_read_config_word(tp->pdev,
2966 tp->pcie_cap + PCI_EXP_LNKCTL,
2967 &oldlnkctl);
2968 if (tp->link_config.active_speed == SPEED_100 ||
2969 tp->link_config.active_speed == SPEED_10)
2970 newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
2971 else
2972 newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
2973 if (newlnkctl != oldlnkctl)
2974 pci_write_config_word(tp->pdev,
2975 tp->pcie_cap + PCI_EXP_LNKCTL,
2976 newlnkctl);
2979 if (current_link_up != netif_carrier_ok(tp->dev)) {
2980 if (current_link_up)
2981 netif_carrier_on(tp->dev);
2982 else
2983 netif_carrier_off(tp->dev);
2984 tg3_link_report(tp);
2987 return 0;
2990 struct tg3_fiber_aneginfo {
2991 int state;
2992 #define ANEG_STATE_UNKNOWN 0
2993 #define ANEG_STATE_AN_ENABLE 1
2994 #define ANEG_STATE_RESTART_INIT 2
2995 #define ANEG_STATE_RESTART 3
2996 #define ANEG_STATE_DISABLE_LINK_OK 4
2997 #define ANEG_STATE_ABILITY_DETECT_INIT 5
2998 #define ANEG_STATE_ABILITY_DETECT 6
2999 #define ANEG_STATE_ACK_DETECT_INIT 7
3000 #define ANEG_STATE_ACK_DETECT 8
3001 #define ANEG_STATE_COMPLETE_ACK_INIT 9
3002 #define ANEG_STATE_COMPLETE_ACK 10
3003 #define ANEG_STATE_IDLE_DETECT_INIT 11
3004 #define ANEG_STATE_IDLE_DETECT 12
3005 #define ANEG_STATE_LINK_OK 13
3006 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
3007 #define ANEG_STATE_NEXT_PAGE_WAIT 15
3009 u32 flags;
3010 #define MR_AN_ENABLE 0x00000001
3011 #define MR_RESTART_AN 0x00000002
3012 #define MR_AN_COMPLETE 0x00000004
3013 #define MR_PAGE_RX 0x00000008
3014 #define MR_NP_LOADED 0x00000010
3015 #define MR_TOGGLE_TX 0x00000020
3016 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
3017 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
3018 #define MR_LP_ADV_SYM_PAUSE 0x00000100
3019 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
3020 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3021 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3022 #define MR_LP_ADV_NEXT_PAGE 0x00001000
3023 #define MR_TOGGLE_RX 0x00002000
3024 #define MR_NP_RX 0x00004000
3026 #define MR_LINK_OK 0x80000000
3028 unsigned long link_time, cur_time;
3030 u32 ability_match_cfg;
3031 int ability_match_count;
3033 char ability_match, idle_match, ack_match;
3035 u32 txconfig, rxconfig;
3036 #define ANEG_CFG_NP 0x00000080
3037 #define ANEG_CFG_ACK 0x00000040
3038 #define ANEG_CFG_RF2 0x00000020
3039 #define ANEG_CFG_RF1 0x00000010
3040 #define ANEG_CFG_PS2 0x00000001
3041 #define ANEG_CFG_PS1 0x00008000
3042 #define ANEG_CFG_HD 0x00004000
3043 #define ANEG_CFG_FD 0x00002000
3044 #define ANEG_CFG_INVAL 0x00001f06
3047 #define ANEG_OK 0
3048 #define ANEG_DONE 1
3049 #define ANEG_TIMER_ENAB 2
3050 #define ANEG_FAILED -1
3052 #define ANEG_STATE_SETTLE_TIME 10000
3054 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3055 struct tg3_fiber_aneginfo *ap)
3057 u16 flowctrl;
3058 unsigned long delta;
3059 u32 rx_cfg_reg;
3060 int ret;
3062 if (ap->state == ANEG_STATE_UNKNOWN) {
3063 ap->rxconfig = 0;
3064 ap->link_time = 0;
3065 ap->cur_time = 0;
3066 ap->ability_match_cfg = 0;
3067 ap->ability_match_count = 0;
3068 ap->ability_match = 0;
3069 ap->idle_match = 0;
3070 ap->ack_match = 0;
3072 ap->cur_time++;
3074 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3075 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3077 if (rx_cfg_reg != ap->ability_match_cfg) {
3078 ap->ability_match_cfg = rx_cfg_reg;
3079 ap->ability_match = 0;
3080 ap->ability_match_count = 0;
3081 } else {
3082 if (++ap->ability_match_count > 1) {
3083 ap->ability_match = 1;
3084 ap->ability_match_cfg = rx_cfg_reg;
3087 if (rx_cfg_reg & ANEG_CFG_ACK)
3088 ap->ack_match = 1;
3089 else
3090 ap->ack_match = 0;
3092 ap->idle_match = 0;
3093 } else {
3094 ap->idle_match = 1;
3095 ap->ability_match_cfg = 0;
3096 ap->ability_match_count = 0;
3097 ap->ability_match = 0;
3098 ap->ack_match = 0;
3100 rx_cfg_reg = 0;
3103 ap->rxconfig = rx_cfg_reg;
3104 ret = ANEG_OK;
3106 switch(ap->state) {
3107 case ANEG_STATE_UNKNOWN:
3108 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3109 ap->state = ANEG_STATE_AN_ENABLE;
3111 /* fallthru */
3112 case ANEG_STATE_AN_ENABLE:
3113 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3114 if (ap->flags & MR_AN_ENABLE) {
3115 ap->link_time = 0;
3116 ap->cur_time = 0;
3117 ap->ability_match_cfg = 0;
3118 ap->ability_match_count = 0;
3119 ap->ability_match = 0;
3120 ap->idle_match = 0;
3121 ap->ack_match = 0;
3123 ap->state = ANEG_STATE_RESTART_INIT;
3124 } else {
3125 ap->state = ANEG_STATE_DISABLE_LINK_OK;
3127 break;
3129 case ANEG_STATE_RESTART_INIT:
3130 ap->link_time = ap->cur_time;
3131 ap->flags &= ~(MR_NP_LOADED);
3132 ap->txconfig = 0;
3133 tw32(MAC_TX_AUTO_NEG, 0);
3134 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3135 tw32_f(MAC_MODE, tp->mac_mode);
3136 udelay(40);
3138 ret = ANEG_TIMER_ENAB;
3139 ap->state = ANEG_STATE_RESTART;
3141 /* fallthru */
3142 case ANEG_STATE_RESTART:
3143 delta = ap->cur_time - ap->link_time;
3144 if (delta > ANEG_STATE_SETTLE_TIME) {
3145 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3146 } else {
3147 ret = ANEG_TIMER_ENAB;
3149 break;
3151 case ANEG_STATE_DISABLE_LINK_OK:
3152 ret = ANEG_DONE;
3153 break;
3155 case ANEG_STATE_ABILITY_DETECT_INIT:
3156 ap->flags &= ~(MR_TOGGLE_TX);
3157 ap->txconfig = ANEG_CFG_FD;
3158 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3159 if (flowctrl & ADVERTISE_1000XPAUSE)
3160 ap->txconfig |= ANEG_CFG_PS1;
3161 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3162 ap->txconfig |= ANEG_CFG_PS2;
3163 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3164 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3165 tw32_f(MAC_MODE, tp->mac_mode);
3166 udelay(40);
3168 ap->state = ANEG_STATE_ABILITY_DETECT;
3169 break;
3171 case ANEG_STATE_ABILITY_DETECT:
3172 if (ap->ability_match != 0 && ap->rxconfig != 0) {
3173 ap->state = ANEG_STATE_ACK_DETECT_INIT;
3175 break;
3177 case ANEG_STATE_ACK_DETECT_INIT:
3178 ap->txconfig |= ANEG_CFG_ACK;
3179 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3180 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3181 tw32_f(MAC_MODE, tp->mac_mode);
3182 udelay(40);
3184 ap->state = ANEG_STATE_ACK_DETECT;
3186 /* fallthru */
3187 case ANEG_STATE_ACK_DETECT:
3188 if (ap->ack_match != 0) {
3189 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3190 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3191 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3192 } else {
3193 ap->state = ANEG_STATE_AN_ENABLE;
3195 } else if (ap->ability_match != 0 &&
3196 ap->rxconfig == 0) {
3197 ap->state = ANEG_STATE_AN_ENABLE;
3199 break;
3201 case ANEG_STATE_COMPLETE_ACK_INIT:
3202 if (ap->rxconfig & ANEG_CFG_INVAL) {
3203 ret = ANEG_FAILED;
3204 break;
3206 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3207 MR_LP_ADV_HALF_DUPLEX |
3208 MR_LP_ADV_SYM_PAUSE |
3209 MR_LP_ADV_ASYM_PAUSE |
3210 MR_LP_ADV_REMOTE_FAULT1 |
3211 MR_LP_ADV_REMOTE_FAULT2 |
3212 MR_LP_ADV_NEXT_PAGE |
3213 MR_TOGGLE_RX |
3214 MR_NP_RX);
3215 if (ap->rxconfig & ANEG_CFG_FD)
3216 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3217 if (ap->rxconfig & ANEG_CFG_HD)
3218 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3219 if (ap->rxconfig & ANEG_CFG_PS1)
3220 ap->flags |= MR_LP_ADV_SYM_PAUSE;
3221 if (ap->rxconfig & ANEG_CFG_PS2)
3222 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3223 if (ap->rxconfig & ANEG_CFG_RF1)
3224 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3225 if (ap->rxconfig & ANEG_CFG_RF2)
3226 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3227 if (ap->rxconfig & ANEG_CFG_NP)
3228 ap->flags |= MR_LP_ADV_NEXT_PAGE;
3230 ap->link_time = ap->cur_time;
3232 ap->flags ^= (MR_TOGGLE_TX);
3233 if (ap->rxconfig & 0x0008)
3234 ap->flags |= MR_TOGGLE_RX;
3235 if (ap->rxconfig & ANEG_CFG_NP)
3236 ap->flags |= MR_NP_RX;
3237 ap->flags |= MR_PAGE_RX;
3239 ap->state = ANEG_STATE_COMPLETE_ACK;
3240 ret = ANEG_TIMER_ENAB;
3241 break;
3243 case ANEG_STATE_COMPLETE_ACK:
3244 if (ap->ability_match != 0 &&
3245 ap->rxconfig == 0) {
3246 ap->state = ANEG_STATE_AN_ENABLE;
3247 break;
3249 delta = ap->cur_time - ap->link_time;
3250 if (delta > ANEG_STATE_SETTLE_TIME) {
3251 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3252 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3253 } else {
3254 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3255 !(ap->flags & MR_NP_RX)) {
3256 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3257 } else {
3258 ret = ANEG_FAILED;
3262 break;
3264 case ANEG_STATE_IDLE_DETECT_INIT:
3265 ap->link_time = ap->cur_time;
3266 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3267 tw32_f(MAC_MODE, tp->mac_mode);
3268 udelay(40);
3270 ap->state = ANEG_STATE_IDLE_DETECT;
3271 ret = ANEG_TIMER_ENAB;
3272 break;
3274 case ANEG_STATE_IDLE_DETECT:
3275 if (ap->ability_match != 0 &&
3276 ap->rxconfig == 0) {
3277 ap->state = ANEG_STATE_AN_ENABLE;
3278 break;
3280 delta = ap->cur_time - ap->link_time;
3281 if (delta > ANEG_STATE_SETTLE_TIME) {
3282 /* XXX another gem from the Broadcom driver :( */
3283 ap->state = ANEG_STATE_LINK_OK;
3285 break;
3287 case ANEG_STATE_LINK_OK:
3288 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3289 ret = ANEG_DONE;
3290 break;
3292 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3293 /* ??? unimplemented */
3294 break;
3296 case ANEG_STATE_NEXT_PAGE_WAIT:
3297 /* ??? unimplemented */
3298 break;
3300 default:
3301 ret = ANEG_FAILED;
3302 break;
3305 return ret;
3308 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3310 int res = 0;
3311 struct tg3_fiber_aneginfo aninfo;
3312 int status = ANEG_FAILED;
3313 unsigned int tick;
3314 u32 tmp;
3316 tw32_f(MAC_TX_AUTO_NEG, 0);
3318 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3319 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3320 udelay(40);
3322 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3323 udelay(40);
3325 memset(&aninfo, 0, sizeof(aninfo));
3326 aninfo.flags |= MR_AN_ENABLE;
3327 aninfo.state = ANEG_STATE_UNKNOWN;
3328 aninfo.cur_time = 0;
3329 tick = 0;
3330 while (++tick < 195000) {
3331 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3332 if (status == ANEG_DONE || status == ANEG_FAILED)
3333 break;
3335 udelay(1);
3338 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3339 tw32_f(MAC_MODE, tp->mac_mode);
3340 udelay(40);
3342 *txflags = aninfo.txconfig;
3343 *rxflags = aninfo.flags;
3345 if (status == ANEG_DONE &&
3346 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3347 MR_LP_ADV_FULL_DUPLEX)))
3348 res = 1;
3350 return res;
3353 static void tg3_init_bcm8002(struct tg3 *tp)
3355 u32 mac_status = tr32(MAC_STATUS);
3356 int i;
3358 /* Reset when initting first time or we have a link. */
3359 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
3360 !(mac_status & MAC_STATUS_PCS_SYNCED))
3361 return;
3363 /* Set PLL lock range. */
3364 tg3_writephy(tp, 0x16, 0x8007);
3366 /* SW reset */
3367 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3369 /* Wait for reset to complete. */
3370 /* XXX schedule_timeout() ... */
3371 for (i = 0; i < 500; i++)
3372 udelay(10);
3374 /* Config mode; select PMA/Ch 1 regs. */
3375 tg3_writephy(tp, 0x10, 0x8411);
3377 /* Enable auto-lock and comdet, select txclk for tx. */
3378 tg3_writephy(tp, 0x11, 0x0a10);
3380 tg3_writephy(tp, 0x18, 0x00a0);
3381 tg3_writephy(tp, 0x16, 0x41ff);
3383 /* Assert and deassert POR. */
3384 tg3_writephy(tp, 0x13, 0x0400);
3385 udelay(40);
3386 tg3_writephy(tp, 0x13, 0x0000);
3388 tg3_writephy(tp, 0x11, 0x0a50);
3389 udelay(40);
3390 tg3_writephy(tp, 0x11, 0x0a10);
3392 /* Wait for signal to stabilize */
3393 /* XXX schedule_timeout() ... */
3394 for (i = 0; i < 15000; i++)
3395 udelay(10);
3397 /* Deselect the channel register so we can read the PHYID
3398 * later.
3400 tg3_writephy(tp, 0x10, 0x8011);
3403 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3405 u16 flowctrl;
3406 u32 sg_dig_ctrl, sg_dig_status;
3407 u32 serdes_cfg, expected_sg_dig_ctrl;
3408 int workaround, port_a;
3409 int current_link_up;
3411 serdes_cfg = 0;
3412 expected_sg_dig_ctrl = 0;
3413 workaround = 0;
3414 port_a = 1;
3415 current_link_up = 0;
3417 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3418 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3419 workaround = 1;
3420 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3421 port_a = 0;
3423 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3424 /* preserve bits 20-23 for voltage regulator */
3425 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3428 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3430 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3431 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3432 if (workaround) {
3433 u32 val = serdes_cfg;
3435 if (port_a)
3436 val |= 0xc010000;
3437 else
3438 val |= 0x4010000;
3439 tw32_f(MAC_SERDES_CFG, val);
3442 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3444 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3445 tg3_setup_flow_control(tp, 0, 0);
3446 current_link_up = 1;
3448 goto out;
3451 /* Want auto-negotiation. */
3452 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3454 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3455 if (flowctrl & ADVERTISE_1000XPAUSE)
3456 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3457 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3458 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3460 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3461 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
3462 tp->serdes_counter &&
3463 ((mac_status & (MAC_STATUS_PCS_SYNCED |
3464 MAC_STATUS_RCVD_CFG)) ==
3465 MAC_STATUS_PCS_SYNCED)) {
3466 tp->serdes_counter--;
3467 current_link_up = 1;
3468 goto out;
3470 restart_autoneg:
3471 if (workaround)
3472 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3473 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3474 udelay(5);
3475 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3477 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3478 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3479 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3480 MAC_STATUS_SIGNAL_DET)) {
3481 sg_dig_status = tr32(SG_DIG_STATUS);
3482 mac_status = tr32(MAC_STATUS);
3484 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3485 (mac_status & MAC_STATUS_PCS_SYNCED)) {
3486 u32 local_adv = 0, remote_adv = 0;
3488 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3489 local_adv |= ADVERTISE_1000XPAUSE;
3490 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3491 local_adv |= ADVERTISE_1000XPSE_ASYM;
3493 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3494 remote_adv |= LPA_1000XPAUSE;
3495 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3496 remote_adv |= LPA_1000XPAUSE_ASYM;
3498 tg3_setup_flow_control(tp, local_adv, remote_adv);
3499 current_link_up = 1;
3500 tp->serdes_counter = 0;
3501 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3502 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3503 if (tp->serdes_counter)
3504 tp->serdes_counter--;
3505 else {
3506 if (workaround) {
3507 u32 val = serdes_cfg;
3509 if (port_a)
3510 val |= 0xc010000;
3511 else
3512 val |= 0x4010000;
3514 tw32_f(MAC_SERDES_CFG, val);
3517 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3518 udelay(40);
3520 /* Link parallel detection - link is up */
3521 /* only if we have PCS_SYNC and not */
3522 /* receiving config code words */
3523 mac_status = tr32(MAC_STATUS);
3524 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
3525 !(mac_status & MAC_STATUS_RCVD_CFG)) {
3526 tg3_setup_flow_control(tp, 0, 0);
3527 current_link_up = 1;
3528 tp->tg3_flags2 |=
3529 TG3_FLG2_PARALLEL_DETECT;
3530 tp->serdes_counter =
3531 SERDES_PARALLEL_DET_TIMEOUT;
3532 } else
3533 goto restart_autoneg;
3536 } else {
3537 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3538 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3541 out:
3542 return current_link_up;
3545 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
3547 int current_link_up = 0;
3549 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
3550 goto out;
3552 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3553 u32 txflags, rxflags;
3554 int i;
3556 if (fiber_autoneg(tp, &txflags, &rxflags)) {
3557 u32 local_adv = 0, remote_adv = 0;
3559 if (txflags & ANEG_CFG_PS1)
3560 local_adv |= ADVERTISE_1000XPAUSE;
3561 if (txflags & ANEG_CFG_PS2)
3562 local_adv |= ADVERTISE_1000XPSE_ASYM;
3564 if (rxflags & MR_LP_ADV_SYM_PAUSE)
3565 remote_adv |= LPA_1000XPAUSE;
3566 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
3567 remote_adv |= LPA_1000XPAUSE_ASYM;
3569 tg3_setup_flow_control(tp, local_adv, remote_adv);
3571 current_link_up = 1;
3573 for (i = 0; i < 30; i++) {
3574 udelay(20);
3575 tw32_f(MAC_STATUS,
3576 (MAC_STATUS_SYNC_CHANGED |
3577 MAC_STATUS_CFG_CHANGED));
3578 udelay(40);
3579 if ((tr32(MAC_STATUS) &
3580 (MAC_STATUS_SYNC_CHANGED |
3581 MAC_STATUS_CFG_CHANGED)) == 0)
3582 break;
3585 mac_status = tr32(MAC_STATUS);
3586 if (current_link_up == 0 &&
3587 (mac_status & MAC_STATUS_PCS_SYNCED) &&
3588 !(mac_status & MAC_STATUS_RCVD_CFG))
3589 current_link_up = 1;
3590 } else {
3591 tg3_setup_flow_control(tp, 0, 0);
3593 /* Forcing 1000FD link up. */
3594 current_link_up = 1;
3596 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
3597 udelay(40);
3599 tw32_f(MAC_MODE, tp->mac_mode);
3600 udelay(40);
3603 out:
3604 return current_link_up;
3607 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
3609 u32 orig_pause_cfg;
3610 u16 orig_active_speed;
3611 u8 orig_active_duplex;
3612 u32 mac_status;
3613 int current_link_up;
3614 int i;
3616 orig_pause_cfg = tp->link_config.active_flowctrl;
3617 orig_active_speed = tp->link_config.active_speed;
3618 orig_active_duplex = tp->link_config.active_duplex;
3620 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
3621 netif_carrier_ok(tp->dev) &&
3622 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
3623 mac_status = tr32(MAC_STATUS);
3624 mac_status &= (MAC_STATUS_PCS_SYNCED |
3625 MAC_STATUS_SIGNAL_DET |
3626 MAC_STATUS_CFG_CHANGED |
3627 MAC_STATUS_RCVD_CFG);
3628 if (mac_status == (MAC_STATUS_PCS_SYNCED |
3629 MAC_STATUS_SIGNAL_DET)) {
3630 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3631 MAC_STATUS_CFG_CHANGED));
3632 return 0;
3636 tw32_f(MAC_TX_AUTO_NEG, 0);
3638 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
3639 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
3640 tw32_f(MAC_MODE, tp->mac_mode);
3641 udelay(40);
3643 if (tp->phy_id == PHY_ID_BCM8002)
3644 tg3_init_bcm8002(tp);
3646 /* Enable link change event even when serdes polling. */
3647 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3648 udelay(40);
3650 current_link_up = 0;
3651 mac_status = tr32(MAC_STATUS);
3653 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
3654 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
3655 else
3656 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
3658 tp->hw_status->status =
3659 (SD_STATUS_UPDATED |
3660 (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
3662 for (i = 0; i < 100; i++) {
3663 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3664 MAC_STATUS_CFG_CHANGED));
3665 udelay(5);
3666 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
3667 MAC_STATUS_CFG_CHANGED |
3668 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
3669 break;
3672 mac_status = tr32(MAC_STATUS);
3673 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
3674 current_link_up = 0;
3675 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
3676 tp->serdes_counter == 0) {
3677 tw32_f(MAC_MODE, (tp->mac_mode |
3678 MAC_MODE_SEND_CONFIGS));
3679 udelay(1);
3680 tw32_f(MAC_MODE, tp->mac_mode);
3684 if (current_link_up == 1) {
3685 tp->link_config.active_speed = SPEED_1000;
3686 tp->link_config.active_duplex = DUPLEX_FULL;
3687 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3688 LED_CTRL_LNKLED_OVERRIDE |
3689 LED_CTRL_1000MBPS_ON));
3690 } else {
3691 tp->link_config.active_speed = SPEED_INVALID;
3692 tp->link_config.active_duplex = DUPLEX_INVALID;
3693 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3694 LED_CTRL_LNKLED_OVERRIDE |
3695 LED_CTRL_TRAFFIC_OVERRIDE));
3698 if (current_link_up != netif_carrier_ok(tp->dev)) {
3699 if (current_link_up)
3700 netif_carrier_on(tp->dev);
3701 else
3702 netif_carrier_off(tp->dev);
3703 tg3_link_report(tp);
3704 } else {
3705 u32 now_pause_cfg = tp->link_config.active_flowctrl;
3706 if (orig_pause_cfg != now_pause_cfg ||
3707 orig_active_speed != tp->link_config.active_speed ||
3708 orig_active_duplex != tp->link_config.active_duplex)
3709 tg3_link_report(tp);
3712 return 0;
3715 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
3717 int current_link_up, err = 0;
3718 u32 bmsr, bmcr;
3719 u16 current_speed;
3720 u8 current_duplex;
3721 u32 local_adv, remote_adv;
3723 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3724 tw32_f(MAC_MODE, tp->mac_mode);
3725 udelay(40);
3727 tw32(MAC_EVENT, 0);
3729 tw32_f(MAC_STATUS,
3730 (MAC_STATUS_SYNC_CHANGED |
3731 MAC_STATUS_CFG_CHANGED |
3732 MAC_STATUS_MI_COMPLETION |
3733 MAC_STATUS_LNKSTATE_CHANGED));
3734 udelay(40);
3736 if (force_reset)
3737 tg3_phy_reset(tp);
3739 current_link_up = 0;
3740 current_speed = SPEED_INVALID;
3741 current_duplex = DUPLEX_INVALID;
3743 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3744 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3745 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
3746 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3747 bmsr |= BMSR_LSTATUS;
3748 else
3749 bmsr &= ~BMSR_LSTATUS;
3752 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
3754 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
3755 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3756 /* do nothing, just check for link up at the end */
3757 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3758 u32 adv, new_adv;
3760 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3761 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
3762 ADVERTISE_1000XPAUSE |
3763 ADVERTISE_1000XPSE_ASYM |
3764 ADVERTISE_SLCT);
3766 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3768 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
3769 new_adv |= ADVERTISE_1000XHALF;
3770 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
3771 new_adv |= ADVERTISE_1000XFULL;
3773 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
3774 tg3_writephy(tp, MII_ADVERTISE, new_adv);
3775 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
3776 tg3_writephy(tp, MII_BMCR, bmcr);
3778 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3779 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
3780 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3782 return err;
3784 } else {
3785 u32 new_bmcr;
3787 bmcr &= ~BMCR_SPEED1000;
3788 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
3790 if (tp->link_config.duplex == DUPLEX_FULL)
3791 new_bmcr |= BMCR_FULLDPLX;
3793 if (new_bmcr != bmcr) {
3794 /* BMCR_SPEED1000 is a reserved bit that needs
3795 * to be set on write.
3797 new_bmcr |= BMCR_SPEED1000;
3799 /* Force a linkdown */
3800 if (netif_carrier_ok(tp->dev)) {
3801 u32 adv;
3803 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3804 adv &= ~(ADVERTISE_1000XFULL |
3805 ADVERTISE_1000XHALF |
3806 ADVERTISE_SLCT);
3807 tg3_writephy(tp, MII_ADVERTISE, adv);
3808 tg3_writephy(tp, MII_BMCR, bmcr |
3809 BMCR_ANRESTART |
3810 BMCR_ANENABLE);
3811 udelay(10);
3812 netif_carrier_off(tp->dev);
3814 tg3_writephy(tp, MII_BMCR, new_bmcr);
3815 bmcr = new_bmcr;
3816 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3817 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3818 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3819 ASIC_REV_5714) {
3820 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3821 bmsr |= BMSR_LSTATUS;
3822 else
3823 bmsr &= ~BMSR_LSTATUS;
3825 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3829 if (bmsr & BMSR_LSTATUS) {
3830 current_speed = SPEED_1000;
3831 current_link_up = 1;
3832 if (bmcr & BMCR_FULLDPLX)
3833 current_duplex = DUPLEX_FULL;
3834 else
3835 current_duplex = DUPLEX_HALF;
3837 local_adv = 0;
3838 remote_adv = 0;
3840 if (bmcr & BMCR_ANENABLE) {
3841 u32 common;
3843 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
3844 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
3845 common = local_adv & remote_adv;
3846 if (common & (ADVERTISE_1000XHALF |
3847 ADVERTISE_1000XFULL)) {
3848 if (common & ADVERTISE_1000XFULL)
3849 current_duplex = DUPLEX_FULL;
3850 else
3851 current_duplex = DUPLEX_HALF;
3853 else
3854 current_link_up = 0;
3858 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
3859 tg3_setup_flow_control(tp, local_adv, remote_adv);
3861 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3862 if (tp->link_config.active_duplex == DUPLEX_HALF)
3863 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3865 tw32_f(MAC_MODE, tp->mac_mode);
3866 udelay(40);
3868 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3870 tp->link_config.active_speed = current_speed;
3871 tp->link_config.active_duplex = current_duplex;
3873 if (current_link_up != netif_carrier_ok(tp->dev)) {
3874 if (current_link_up)
3875 netif_carrier_on(tp->dev);
3876 else {
3877 netif_carrier_off(tp->dev);
3878 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3880 tg3_link_report(tp);
3882 return err;
3885 static void tg3_serdes_parallel_detect(struct tg3 *tp)
3887 if (tp->serdes_counter) {
3888 /* Give autoneg time to complete. */
3889 tp->serdes_counter--;
3890 return;
3892 if (!netif_carrier_ok(tp->dev) &&
3893 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
3894 u32 bmcr;
3896 tg3_readphy(tp, MII_BMCR, &bmcr);
3897 if (bmcr & BMCR_ANENABLE) {
3898 u32 phy1, phy2;
3900 /* Select shadow register 0x1f */
3901 tg3_writephy(tp, 0x1c, 0x7c00);
3902 tg3_readphy(tp, 0x1c, &phy1);
3904 /* Select expansion interrupt status register */
3905 tg3_writephy(tp, 0x17, 0x0f01);
3906 tg3_readphy(tp, 0x15, &phy2);
3907 tg3_readphy(tp, 0x15, &phy2);
3909 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
3910 /* We have signal detect and not receiving
3911 * config code words, link is up by parallel
3912 * detection.
3915 bmcr &= ~BMCR_ANENABLE;
3916 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
3917 tg3_writephy(tp, MII_BMCR, bmcr);
3918 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
3922 else if (netif_carrier_ok(tp->dev) &&
3923 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
3924 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3925 u32 phy2;
3927 /* Select expansion interrupt status register */
3928 tg3_writephy(tp, 0x17, 0x0f01);
3929 tg3_readphy(tp, 0x15, &phy2);
3930 if (phy2 & 0x20) {
3931 u32 bmcr;
3933 /* Config code words received, turn on autoneg. */
3934 tg3_readphy(tp, MII_BMCR, &bmcr);
3935 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
3937 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3943 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3945 int err;
3947 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3948 err = tg3_setup_fiber_phy(tp, force_reset);
3949 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
3950 err = tg3_setup_fiber_mii_phy(tp, force_reset);
3951 } else {
3952 err = tg3_setup_copper_phy(tp, force_reset);
3955 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
3956 u32 val, scale;
3958 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
3959 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
3960 scale = 65;
3961 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
3962 scale = 6;
3963 else
3964 scale = 12;
3966 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
3967 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
3968 tw32(GRC_MISC_CFG, val);
3971 if (tp->link_config.active_speed == SPEED_1000 &&
3972 tp->link_config.active_duplex == DUPLEX_HALF)
3973 tw32(MAC_TX_LENGTHS,
3974 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3975 (6 << TX_LENGTHS_IPG_SHIFT) |
3976 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3977 else
3978 tw32(MAC_TX_LENGTHS,
3979 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3980 (6 << TX_LENGTHS_IPG_SHIFT) |
3981 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3983 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3984 if (netif_carrier_ok(tp->dev)) {
3985 tw32(HOSTCC_STAT_COAL_TICKS,
3986 tp->coal.stats_block_coalesce_usecs);
3987 } else {
3988 tw32(HOSTCC_STAT_COAL_TICKS, 0);
3992 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
3993 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
3994 if (!netif_carrier_ok(tp->dev))
3995 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
3996 tp->pwrmgmt_thresh;
3997 else
3998 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
3999 tw32(PCIE_PWR_MGMT_THRESH, val);
4002 return err;
4005 /* This is called whenever we suspect that the system chipset is re-
4006 * ordering the sequence of MMIO to the tx send mailbox. The symptom
4007 * is bogus tx completions. We try to recover by setting the
4008 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4009 * in the workqueue.
4011 static void tg3_tx_recover(struct tg3 *tp)
4013 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
4014 tp->write32_tx_mbox == tg3_write_indirect_mbox);
4016 printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
4017 "mapped I/O cycles to the network device, attempting to "
4018 "recover. Please report the problem to the driver maintainer "
4019 "and include system chipset information.\n", tp->dev->name);
4021 spin_lock(&tp->lock);
4022 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
4023 spin_unlock(&tp->lock);
4026 static inline u32 tg3_tx_avail(struct tg3 *tp)
4028 smp_mb();
4029 return (tp->tx_pending -
4030 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
4033 /* Tigon3 never reports partial packet sends. So we do not
4034 * need special logic to handle SKBs that have not had all
4035 * of their frags sent yet, like SunGEM does.
4037 static void tg3_tx(struct tg3 *tp)
4039 u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
4040 u32 sw_idx = tp->tx_cons;
4042 while (sw_idx != hw_idx) {
4043 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
4044 struct sk_buff *skb = ri->skb;
4045 int i, tx_bug = 0;
4047 if (unlikely(skb == NULL)) {
4048 tg3_tx_recover(tp);
4049 return;
4052 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
4054 ri->skb = NULL;
4056 sw_idx = NEXT_TX(sw_idx);
4058 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4059 ri = &tp->tx_buffers[sw_idx];
4060 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4061 tx_bug = 1;
4062 sw_idx = NEXT_TX(sw_idx);
4065 dev_kfree_skb(skb);
4067 if (unlikely(tx_bug)) {
4068 tg3_tx_recover(tp);
4069 return;
4073 tp->tx_cons = sw_idx;
4075 /* Need to make the tx_cons update visible to tg3_start_xmit()
4076 * before checking for netif_queue_stopped(). Without the
4077 * memory barrier, there is a small possibility that tg3_start_xmit()
4078 * will miss it and cause the queue to be stopped forever.
4080 smp_mb();
4082 if (unlikely(netif_queue_stopped(tp->dev) &&
4083 (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
4084 netif_tx_lock(tp->dev);
4085 if (netif_queue_stopped(tp->dev) &&
4086 (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
4087 netif_wake_queue(tp->dev);
4088 netif_tx_unlock(tp->dev);
4092 /* Returns size of skb allocated or < 0 on error.
4094 * We only need to fill in the address because the other members
4095 * of the RX descriptor are invariant, see tg3_init_rings.
4097 * Note the purposeful assymetry of cpu vs. chip accesses. For
4098 * posting buffers we only dirty the first cache line of the RX
4099 * descriptor (containing the address). Whereas for the RX status
4100 * buffers the cpu only reads the last cacheline of the RX descriptor
4101 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4103 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
4104 int src_idx, u32 dest_idx_unmasked)
4106 struct tg3_rx_buffer_desc *desc;
4107 struct ring_info *map, *src_map;
4108 struct sk_buff *skb;
4109 dma_addr_t mapping;
4110 int skb_size, dest_idx;
4112 src_map = NULL;
4113 switch (opaque_key) {
4114 case RXD_OPAQUE_RING_STD:
4115 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4116 desc = &tp->rx_std[dest_idx];
4117 map = &tp->rx_std_buffers[dest_idx];
4118 if (src_idx >= 0)
4119 src_map = &tp->rx_std_buffers[src_idx];
4120 skb_size = tp->rx_pkt_buf_sz;
4121 break;
4123 case RXD_OPAQUE_RING_JUMBO:
4124 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4125 desc = &tp->rx_jumbo[dest_idx];
4126 map = &tp->rx_jumbo_buffers[dest_idx];
4127 if (src_idx >= 0)
4128 src_map = &tp->rx_jumbo_buffers[src_idx];
4129 skb_size = RX_JUMBO_PKT_BUF_SZ;
4130 break;
4132 default:
4133 return -EINVAL;
4136 /* Do not overwrite any of the map or rp information
4137 * until we are sure we can commit to a new buffer.
4139 * Callers depend upon this behavior and assume that
4140 * we leave everything unchanged if we fail.
4142 skb = netdev_alloc_skb(tp->dev, skb_size);
4143 if (skb == NULL)
4144 return -ENOMEM;
4146 skb_reserve(skb, tp->rx_offset);
4148 mapping = pci_map_single(tp->pdev, skb->data,
4149 skb_size - tp->rx_offset,
4150 PCI_DMA_FROMDEVICE);
4152 map->skb = skb;
4153 pci_unmap_addr_set(map, mapping, mapping);
4155 if (src_map != NULL)
4156 src_map->skb = NULL;
4158 desc->addr_hi = ((u64)mapping >> 32);
4159 desc->addr_lo = ((u64)mapping & 0xffffffff);
4161 return skb_size;
4164 /* We only need to move over in the address because the other
4165 * members of the RX descriptor are invariant. See notes above
4166 * tg3_alloc_rx_skb for full details.
4168 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
4169 int src_idx, u32 dest_idx_unmasked)
4171 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4172 struct ring_info *src_map, *dest_map;
4173 int dest_idx;
4175 switch (opaque_key) {
4176 case RXD_OPAQUE_RING_STD:
4177 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4178 dest_desc = &tp->rx_std[dest_idx];
4179 dest_map = &tp->rx_std_buffers[dest_idx];
4180 src_desc = &tp->rx_std[src_idx];
4181 src_map = &tp->rx_std_buffers[src_idx];
4182 break;
4184 case RXD_OPAQUE_RING_JUMBO:
4185 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4186 dest_desc = &tp->rx_jumbo[dest_idx];
4187 dest_map = &tp->rx_jumbo_buffers[dest_idx];
4188 src_desc = &tp->rx_jumbo[src_idx];
4189 src_map = &tp->rx_jumbo_buffers[src_idx];
4190 break;
4192 default:
4193 return;
4196 dest_map->skb = src_map->skb;
4197 pci_unmap_addr_set(dest_map, mapping,
4198 pci_unmap_addr(src_map, mapping));
4199 dest_desc->addr_hi = src_desc->addr_hi;
4200 dest_desc->addr_lo = src_desc->addr_lo;
4202 src_map->skb = NULL;
4205 #if TG3_VLAN_TAG_USED
4206 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
4208 return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
4210 #endif
4212 /* The RX ring scheme is composed of multiple rings which post fresh
4213 * buffers to the chip, and one special ring the chip uses to report
4214 * status back to the host.
4216 * The special ring reports the status of received packets to the
4217 * host. The chip does not write into the original descriptor the
4218 * RX buffer was obtained from. The chip simply takes the original
4219 * descriptor as provided by the host, updates the status and length
4220 * field, then writes this into the next status ring entry.
4222 * Each ring the host uses to post buffers to the chip is described
4223 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
4224 * it is first placed into the on-chip ram. When the packet's length
4225 * is known, it walks down the TG3_BDINFO entries to select the ring.
4226 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4227 * which is within the range of the new packet's length is chosen.
4229 * The "separate ring for rx status" scheme may sound queer, but it makes
4230 * sense from a cache coherency perspective. If only the host writes
4231 * to the buffer post rings, and only the chip writes to the rx status
4232 * rings, then cache lines never move beyond shared-modified state.
4233 * If both the host and chip were to write into the same ring, cache line
4234 * eviction could occur since both entities want it in an exclusive state.
4236 static int tg3_rx(struct tg3 *tp, int budget)
4238 u32 work_mask, rx_std_posted = 0;
4239 u32 sw_idx = tp->rx_rcb_ptr;
4240 u16 hw_idx;
4241 int received;
4243 hw_idx = tp->hw_status->idx[0].rx_producer;
4245 * We need to order the read of hw_idx and the read of
4246 * the opaque cookie.
4248 rmb();
4249 work_mask = 0;
4250 received = 0;
4251 while (sw_idx != hw_idx && budget > 0) {
4252 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
4253 unsigned int len;
4254 struct sk_buff *skb;
4255 dma_addr_t dma_addr;
4256 u32 opaque_key, desc_idx, *post_ptr;
4258 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4259 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4260 if (opaque_key == RXD_OPAQUE_RING_STD) {
4261 dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
4262 mapping);
4263 skb = tp->rx_std_buffers[desc_idx].skb;
4264 post_ptr = &tp->rx_std_ptr;
4265 rx_std_posted++;
4266 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4267 dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
4268 mapping);
4269 skb = tp->rx_jumbo_buffers[desc_idx].skb;
4270 post_ptr = &tp->rx_jumbo_ptr;
4272 else {
4273 goto next_pkt_nopost;
4276 work_mask |= opaque_key;
4278 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4279 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4280 drop_it:
4281 tg3_recycle_rx(tp, opaque_key,
4282 desc_idx, *post_ptr);
4283 drop_it_no_recycle:
4284 /* Other statistics kept track of by card. */
4285 tp->net_stats.rx_dropped++;
4286 goto next_pkt;
4289 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
4290 ETH_FCS_LEN;
4292 if (len > RX_COPY_THRESHOLD
4293 && tp->rx_offset == NET_IP_ALIGN
4294 /* rx_offset will likely not equal NET_IP_ALIGN
4295 * if this is a 5701 card running in PCI-X mode
4296 * [see tg3_get_invariants()]
4299 int skb_size;
4301 skb_size = tg3_alloc_rx_skb(tp, opaque_key,
4302 desc_idx, *post_ptr);
4303 if (skb_size < 0)
4304 goto drop_it;
4306 pci_unmap_single(tp->pdev, dma_addr,
4307 skb_size - tp->rx_offset,
4308 PCI_DMA_FROMDEVICE);
4310 skb_put(skb, len);
4311 } else {
4312 struct sk_buff *copy_skb;
4314 tg3_recycle_rx(tp, opaque_key,
4315 desc_idx, *post_ptr);
4317 copy_skb = netdev_alloc_skb(tp->dev,
4318 len + TG3_RAW_IP_ALIGN);
4319 if (copy_skb == NULL)
4320 goto drop_it_no_recycle;
4322 skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
4323 skb_put(copy_skb, len);
4324 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4325 skb_copy_from_linear_data(skb, copy_skb->data, len);
4326 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4328 /* We'll reuse the original ring buffer. */
4329 skb = copy_skb;
4332 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
4333 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4334 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4335 >> RXD_TCPCSUM_SHIFT) == 0xffff))
4336 skb->ip_summed = CHECKSUM_UNNECESSARY;
4337 else
4338 skb->ip_summed = CHECKSUM_NONE;
4340 skb->protocol = eth_type_trans(skb, tp->dev);
4341 #if TG3_VLAN_TAG_USED
4342 if (tp->vlgrp != NULL &&
4343 desc->type_flags & RXD_FLAG_VLAN) {
4344 tg3_vlan_rx(tp, skb,
4345 desc->err_vlan & RXD_VLAN_MASK);
4346 } else
4347 #endif
4348 netif_receive_skb(skb);
4350 received++;
4351 budget--;
4353 next_pkt:
4354 (*post_ptr)++;
4356 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
4357 u32 idx = *post_ptr % TG3_RX_RING_SIZE;
4359 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
4360 TG3_64BIT_REG_LOW, idx);
4361 work_mask &= ~RXD_OPAQUE_RING_STD;
4362 rx_std_posted = 0;
4364 next_pkt_nopost:
4365 sw_idx++;
4366 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
4368 /* Refresh hw_idx to see if there is new work */
4369 if (sw_idx == hw_idx) {
4370 hw_idx = tp->hw_status->idx[0].rx_producer;
4371 rmb();
4375 /* ACK the status ring. */
4376 tp->rx_rcb_ptr = sw_idx;
4377 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
4379 /* Refill RX ring(s). */
4380 if (work_mask & RXD_OPAQUE_RING_STD) {
4381 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
4382 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
4383 sw_idx);
4385 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
4386 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
4387 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
4388 sw_idx);
4390 mmiowb();
4392 return received;
4395 static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
4397 struct tg3_hw_status *sblk = tp->hw_status;
4399 /* handle link change and other phy events */
4400 if (!(tp->tg3_flags &
4401 (TG3_FLAG_USE_LINKCHG_REG |
4402 TG3_FLAG_POLL_SERDES))) {
4403 if (sblk->status & SD_STATUS_LINK_CHG) {
4404 sblk->status = SD_STATUS_UPDATED |
4405 (sblk->status & ~SD_STATUS_LINK_CHG);
4406 spin_lock(&tp->lock);
4407 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
4408 tw32_f(MAC_STATUS,
4409 (MAC_STATUS_SYNC_CHANGED |
4410 MAC_STATUS_CFG_CHANGED |
4411 MAC_STATUS_MI_COMPLETION |
4412 MAC_STATUS_LNKSTATE_CHANGED));
4413 udelay(40);
4414 } else
4415 tg3_setup_phy(tp, 0);
4416 spin_unlock(&tp->lock);
4420 /* run TX completion thread */
4421 if (sblk->idx[0].tx_consumer != tp->tx_cons) {
4422 tg3_tx(tp);
4423 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4424 return work_done;
4427 /* run RX thread, within the bounds set by NAPI.
4428 * All RX "locking" is done by ensuring outside
4429 * code synchronizes with tg3->napi.poll()
4431 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
4432 work_done += tg3_rx(tp, budget - work_done);
4434 return work_done;
4437 static int tg3_poll(struct napi_struct *napi, int budget)
4439 struct tg3 *tp = container_of(napi, struct tg3, napi);
4440 int work_done = 0;
4441 struct tg3_hw_status *sblk = tp->hw_status;
4443 while (1) {
4444 work_done = tg3_poll_work(tp, work_done, budget);
4446 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4447 goto tx_recovery;
4449 if (unlikely(work_done >= budget))
4450 break;
4452 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
4453 /* tp->last_tag is used in tg3_restart_ints() below
4454 * to tell the hw how much work has been processed,
4455 * so we must read it before checking for more work.
4457 tp->last_tag = sblk->status_tag;
4458 rmb();
4459 } else
4460 sblk->status &= ~SD_STATUS_UPDATED;
4462 if (likely(!tg3_has_work(tp))) {
4463 netif_rx_complete(napi);
4464 tg3_restart_ints(tp);
4465 break;
4469 return work_done;
4471 tx_recovery:
4472 /* work_done is guaranteed to be less than budget. */
4473 netif_rx_complete(napi);
4474 schedule_work(&tp->reset_task);
4475 return work_done;
4478 static void tg3_irq_quiesce(struct tg3 *tp)
4480 BUG_ON(tp->irq_sync);
4482 tp->irq_sync = 1;
4483 smp_mb();
4485 synchronize_irq(tp->pdev->irq);
4488 static inline int tg3_irq_sync(struct tg3 *tp)
4490 return tp->irq_sync;
4493 /* Fully shutdown all tg3 driver activity elsewhere in the system.
4494 * If irq_sync is non-zero, then the IRQ handler must be synchronized
4495 * with as well. Most of the time, this is not necessary except when
4496 * shutting down the device.
4498 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
4500 spin_lock_bh(&tp->lock);
4501 if (irq_sync)
4502 tg3_irq_quiesce(tp);
4505 static inline void tg3_full_unlock(struct tg3 *tp)
4507 spin_unlock_bh(&tp->lock);
4510 /* One-shot MSI handler - Chip automatically disables interrupt
4511 * after sending MSI so driver doesn't have to do it.
4513 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
4515 struct net_device *dev = dev_id;
4516 struct tg3 *tp = netdev_priv(dev);
4518 prefetch(tp->hw_status);
4519 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4521 if (likely(!tg3_irq_sync(tp)))
4522 netif_rx_schedule(&tp->napi);
4524 return IRQ_HANDLED;
4527 /* MSI ISR - No need to check for interrupt sharing and no need to
4528 * flush status block and interrupt mailbox. PCI ordering rules
4529 * guarantee that MSI will arrive after the status block.
4531 static irqreturn_t tg3_msi(int irq, void *dev_id)
4533 struct net_device *dev = dev_id;
4534 struct tg3 *tp = netdev_priv(dev);
4536 prefetch(tp->hw_status);
4537 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4539 * Writing any value to intr-mbox-0 clears PCI INTA# and
4540 * chip-internal interrupt pending events.
4541 * Writing non-zero to intr-mbox-0 additional tells the
4542 * NIC to stop sending us irqs, engaging "in-intr-handler"
4543 * event coalescing.
4545 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4546 if (likely(!tg3_irq_sync(tp)))
4547 netif_rx_schedule(&tp->napi);
4549 return IRQ_RETVAL(1);
4552 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
4554 struct net_device *dev = dev_id;
4555 struct tg3 *tp = netdev_priv(dev);
4556 struct tg3_hw_status *sblk = tp->hw_status;
4557 unsigned int handled = 1;
4559 /* In INTx mode, it is possible for the interrupt to arrive at
4560 * the CPU before the status block posted prior to the interrupt.
4561 * Reading the PCI State register will confirm whether the
4562 * interrupt is ours and will flush the status block.
4564 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
4565 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4566 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4567 handled = 0;
4568 goto out;
4573 * Writing any value to intr-mbox-0 clears PCI INTA# and
4574 * chip-internal interrupt pending events.
4575 * Writing non-zero to intr-mbox-0 additional tells the
4576 * NIC to stop sending us irqs, engaging "in-intr-handler"
4577 * event coalescing.
4579 * Flush the mailbox to de-assert the IRQ immediately to prevent
4580 * spurious interrupts. The flush impacts performance but
4581 * excessive spurious interrupts can be worse in some cases.
4583 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4584 if (tg3_irq_sync(tp))
4585 goto out;
4586 sblk->status &= ~SD_STATUS_UPDATED;
4587 if (likely(tg3_has_work(tp))) {
4588 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4589 netif_rx_schedule(&tp->napi);
4590 } else {
4591 /* No work, shared interrupt perhaps? re-enable
4592 * interrupts, and flush that PCI write
4594 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
4595 0x00000000);
4597 out:
4598 return IRQ_RETVAL(handled);
4601 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
4603 struct net_device *dev = dev_id;
4604 struct tg3 *tp = netdev_priv(dev);
4605 struct tg3_hw_status *sblk = tp->hw_status;
4606 unsigned int handled = 1;
4608 /* In INTx mode, it is possible for the interrupt to arrive at
4609 * the CPU before the status block posted prior to the interrupt.
4610 * Reading the PCI State register will confirm whether the
4611 * interrupt is ours and will flush the status block.
4613 if (unlikely(sblk->status_tag == tp->last_tag)) {
4614 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4615 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4616 handled = 0;
4617 goto out;
4622 * writing any value to intr-mbox-0 clears PCI INTA# and
4623 * chip-internal interrupt pending events.
4624 * writing non-zero to intr-mbox-0 additional tells the
4625 * NIC to stop sending us irqs, engaging "in-intr-handler"
4626 * event coalescing.
4628 * Flush the mailbox to de-assert the IRQ immediately to prevent
4629 * spurious interrupts. The flush impacts performance but
4630 * excessive spurious interrupts can be worse in some cases.
4632 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4633 if (tg3_irq_sync(tp))
4634 goto out;
4635 if (netif_rx_schedule_prep(&tp->napi)) {
4636 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4637 /* Update last_tag to mark that this status has been
4638 * seen. Because interrupt may be shared, we may be
4639 * racing with tg3_poll(), so only update last_tag
4640 * if tg3_poll() is not scheduled.
4642 tp->last_tag = sblk->status_tag;
4643 __netif_rx_schedule(&tp->napi);
4645 out:
4646 return IRQ_RETVAL(handled);
4649 /* ISR for interrupt test */
4650 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
4652 struct net_device *dev = dev_id;
4653 struct tg3 *tp = netdev_priv(dev);
4654 struct tg3_hw_status *sblk = tp->hw_status;
4656 if ((sblk->status & SD_STATUS_UPDATED) ||
4657 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4658 tg3_disable_ints(tp);
4659 return IRQ_RETVAL(1);
4661 return IRQ_RETVAL(0);
4664 static int tg3_init_hw(struct tg3 *, int);
4665 static int tg3_halt(struct tg3 *, int, int);
4667 /* Restart hardware after configuration changes, self-test, etc.
4668 * Invoked with tp->lock held.
4670 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
4671 __releases(tp->lock)
4672 __acquires(tp->lock)
4674 int err;
4676 err = tg3_init_hw(tp, reset_phy);
4677 if (err) {
4678 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
4679 "aborting.\n", tp->dev->name);
4680 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4681 tg3_full_unlock(tp);
4682 del_timer_sync(&tp->timer);
4683 tp->irq_sync = 0;
4684 napi_enable(&tp->napi);
4685 dev_close(tp->dev);
4686 tg3_full_lock(tp, 0);
4688 return err;
4691 #ifdef CONFIG_NET_POLL_CONTROLLER
4692 static void tg3_poll_controller(struct net_device *dev)
4694 struct tg3 *tp = netdev_priv(dev);
4696 tg3_interrupt(tp->pdev->irq, dev);
4698 #endif
4700 static void tg3_reset_task(struct work_struct *work)
4702 struct tg3 *tp = container_of(work, struct tg3, reset_task);
4703 int err;
4704 unsigned int restart_timer;
4706 tg3_full_lock(tp, 0);
4708 if (!netif_running(tp->dev)) {
4709 tg3_full_unlock(tp);
4710 return;
4713 tg3_full_unlock(tp);
4715 tg3_phy_stop(tp);
4717 tg3_netif_stop(tp);
4719 tg3_full_lock(tp, 1);
4721 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
4722 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
4724 if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
4725 tp->write32_tx_mbox = tg3_write32_tx_mbox;
4726 tp->write32_rx_mbox = tg3_write_flush_reg32;
4727 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
4728 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
4731 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
4732 err = tg3_init_hw(tp, 1);
4733 if (err)
4734 goto out;
4736 tg3_netif_start(tp);
4738 if (restart_timer)
4739 mod_timer(&tp->timer, jiffies + 1);
4741 out:
4742 tg3_full_unlock(tp);
4744 if (!err)
4745 tg3_phy_start(tp);
4748 static void tg3_dump_short_state(struct tg3 *tp)
4750 printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
4751 tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
4752 printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
4753 tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
4756 static void tg3_tx_timeout(struct net_device *dev)
4758 struct tg3 *tp = netdev_priv(dev);
4760 if (netif_msg_tx_err(tp)) {
4761 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
4762 dev->name);
4763 tg3_dump_short_state(tp);
4766 schedule_work(&tp->reset_task);
4769 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
4770 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
4772 u32 base = (u32) mapping & 0xffffffff;
4774 return ((base > 0xffffdcc0) &&
4775 (base + len + 8 < base));
4778 /* Test for DMA addresses > 40-bit */
4779 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
4780 int len)
4782 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
4783 if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
4784 return (((u64) mapping + len) > DMA_40BIT_MASK);
4785 return 0;
4786 #else
4787 return 0;
4788 #endif
4791 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
4793 /* Workaround 4GB and 40-bit hardware DMA bugs. */
4794 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
4795 u32 last_plus_one, u32 *start,
4796 u32 base_flags, u32 mss)
4798 struct sk_buff *new_skb;
4799 dma_addr_t new_addr = 0;
4800 u32 entry = *start;
4801 int i, ret = 0;
4803 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
4804 new_skb = skb_copy(skb, GFP_ATOMIC);
4805 else {
4806 int more_headroom = 4 - ((unsigned long)skb->data & 3);
4808 new_skb = skb_copy_expand(skb,
4809 skb_headroom(skb) + more_headroom,
4810 skb_tailroom(skb), GFP_ATOMIC);
4813 if (!new_skb) {
4814 ret = -1;
4815 } else {
4816 /* New SKB is guaranteed to be linear. */
4817 entry = *start;
4818 ret = skb_dma_map(&tp->pdev->dev, new_skb, DMA_TO_DEVICE);
4819 new_addr = skb_shinfo(new_skb)->dma_maps[0];
4821 /* Make sure new skb does not cross any 4G boundaries.
4822 * Drop the packet if it does.
4824 if (ret || tg3_4g_overflow_test(new_addr, new_skb->len)) {
4825 if (!ret)
4826 skb_dma_unmap(&tp->pdev->dev, new_skb,
4827 DMA_TO_DEVICE);
4828 ret = -1;
4829 dev_kfree_skb(new_skb);
4830 new_skb = NULL;
4831 } else {
4832 tg3_set_txd(tp, entry, new_addr, new_skb->len,
4833 base_flags, 1 | (mss << 1));
4834 *start = NEXT_TX(entry);
4838 /* Now clean up the sw ring entries. */
4839 i = 0;
4840 while (entry != last_plus_one) {
4841 if (i == 0) {
4842 tp->tx_buffers[entry].skb = new_skb;
4843 } else {
4844 tp->tx_buffers[entry].skb = NULL;
4846 entry = NEXT_TX(entry);
4847 i++;
4850 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
4851 dev_kfree_skb(skb);
4853 return ret;
4856 static void tg3_set_txd(struct tg3 *tp, int entry,
4857 dma_addr_t mapping, int len, u32 flags,
4858 u32 mss_and_is_end)
4860 struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
4861 int is_end = (mss_and_is_end & 0x1);
4862 u32 mss = (mss_and_is_end >> 1);
4863 u32 vlan_tag = 0;
4865 if (is_end)
4866 flags |= TXD_FLAG_END;
4867 if (flags & TXD_FLAG_VLAN) {
4868 vlan_tag = flags >> 16;
4869 flags &= 0xffff;
4871 vlan_tag |= (mss << TXD_MSS_SHIFT);
4873 txd->addr_hi = ((u64) mapping >> 32);
4874 txd->addr_lo = ((u64) mapping & 0xffffffff);
4875 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
4876 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
4879 /* hard_start_xmit for devices that don't have any bugs and
4880 * support TG3_FLG2_HW_TSO_2 only.
4882 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
4884 struct tg3 *tp = netdev_priv(dev);
4885 u32 len, entry, base_flags, mss;
4886 struct skb_shared_info *sp;
4887 dma_addr_t mapping;
4889 len = skb_headlen(skb);
4891 /* We are running in BH disabled context with netif_tx_lock
4892 * and TX reclaim runs via tp->napi.poll inside of a software
4893 * interrupt. Furthermore, IRQ processing runs lockless so we have
4894 * no IRQ context deadlocks to worry about either. Rejoice!
4896 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4897 if (!netif_queue_stopped(dev)) {
4898 netif_stop_queue(dev);
4900 /* This is a hard error, log it. */
4901 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4902 "queue awake!\n", dev->name);
4904 return NETDEV_TX_BUSY;
4907 entry = tp->tx_prod;
4908 base_flags = 0;
4909 mss = 0;
4910 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4911 int tcp_opt_len, ip_tcp_len;
4913 if (skb_header_cloned(skb) &&
4914 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4915 dev_kfree_skb(skb);
4916 goto out_unlock;
4919 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
4920 mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
4921 else {
4922 struct iphdr *iph = ip_hdr(skb);
4924 tcp_opt_len = tcp_optlen(skb);
4925 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4927 iph->check = 0;
4928 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4929 mss |= (ip_tcp_len + tcp_opt_len) << 9;
4932 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4933 TXD_FLAG_CPU_POST_DMA);
4935 tcp_hdr(skb)->check = 0;
4938 else if (skb->ip_summed == CHECKSUM_PARTIAL)
4939 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4940 #if TG3_VLAN_TAG_USED
4941 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4942 base_flags |= (TXD_FLAG_VLAN |
4943 (vlan_tx_tag_get(skb) << 16));
4944 #endif
4946 if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
4947 dev_kfree_skb(skb);
4948 goto out_unlock;
4951 sp = skb_shinfo(skb);
4953 mapping = sp->dma_maps[0];
4955 tp->tx_buffers[entry].skb = skb;
4957 tg3_set_txd(tp, entry, mapping, len, base_flags,
4958 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4960 entry = NEXT_TX(entry);
4962 /* Now loop through additional data fragments, and queue them. */
4963 if (skb_shinfo(skb)->nr_frags > 0) {
4964 unsigned int i, last;
4966 last = skb_shinfo(skb)->nr_frags - 1;
4967 for (i = 0; i <= last; i++) {
4968 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4970 len = frag->size;
4971 mapping = sp->dma_maps[i + 1];
4972 tp->tx_buffers[entry].skb = NULL;
4974 tg3_set_txd(tp, entry, mapping, len,
4975 base_flags, (i == last) | (mss << 1));
4977 entry = NEXT_TX(entry);
4981 /* Packets are ready, update Tx producer idx local and on card. */
4982 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4984 tp->tx_prod = entry;
4985 if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4986 netif_stop_queue(dev);
4987 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4988 netif_wake_queue(tp->dev);
4991 out_unlock:
4992 mmiowb();
4994 dev->trans_start = jiffies;
4996 return NETDEV_TX_OK;
4999 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
5001 /* Use GSO to workaround a rare TSO bug that may be triggered when the
5002 * TSO header is greater than 80 bytes.
5004 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
5006 struct sk_buff *segs, *nskb;
5008 /* Estimate the number of fragments in the worst case */
5009 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
5010 netif_stop_queue(tp->dev);
5011 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
5012 return NETDEV_TX_BUSY;
5014 netif_wake_queue(tp->dev);
5017 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
5018 if (IS_ERR(segs))
5019 goto tg3_tso_bug_end;
5021 do {
5022 nskb = segs;
5023 segs = segs->next;
5024 nskb->next = NULL;
5025 tg3_start_xmit_dma_bug(nskb, tp->dev);
5026 } while (segs);
5028 tg3_tso_bug_end:
5029 dev_kfree_skb(skb);
5031 return NETDEV_TX_OK;
5034 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
5035 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
5037 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
5039 struct tg3 *tp = netdev_priv(dev);
5040 u32 len, entry, base_flags, mss;
5041 struct skb_shared_info *sp;
5042 int would_hit_hwbug;
5043 dma_addr_t mapping;
5045 len = skb_headlen(skb);
5047 /* We are running in BH disabled context with netif_tx_lock
5048 * and TX reclaim runs via tp->napi.poll inside of a software
5049 * interrupt. Furthermore, IRQ processing runs lockless so we have
5050 * no IRQ context deadlocks to worry about either. Rejoice!
5052 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
5053 if (!netif_queue_stopped(dev)) {
5054 netif_stop_queue(dev);
5056 /* This is a hard error, log it. */
5057 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
5058 "queue awake!\n", dev->name);
5060 return NETDEV_TX_BUSY;
5063 entry = tp->tx_prod;
5064 base_flags = 0;
5065 if (skb->ip_summed == CHECKSUM_PARTIAL)
5066 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5067 mss = 0;
5068 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
5069 struct iphdr *iph;
5070 int tcp_opt_len, ip_tcp_len, hdr_len;
5072 if (skb_header_cloned(skb) &&
5073 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5074 dev_kfree_skb(skb);
5075 goto out_unlock;
5078 tcp_opt_len = tcp_optlen(skb);
5079 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5081 hdr_len = ip_tcp_len + tcp_opt_len;
5082 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
5083 (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
5084 return (tg3_tso_bug(tp, skb));
5086 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5087 TXD_FLAG_CPU_POST_DMA);
5089 iph = ip_hdr(skb);
5090 iph->check = 0;
5091 iph->tot_len = htons(mss + hdr_len);
5092 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
5093 tcp_hdr(skb)->check = 0;
5094 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
5095 } else
5096 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5097 iph->daddr, 0,
5098 IPPROTO_TCP,
5101 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
5102 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
5103 if (tcp_opt_len || iph->ihl > 5) {
5104 int tsflags;
5106 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5107 mss |= (tsflags << 11);
5109 } else {
5110 if (tcp_opt_len || iph->ihl > 5) {
5111 int tsflags;
5113 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5114 base_flags |= tsflags << 12;
5118 #if TG3_VLAN_TAG_USED
5119 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
5120 base_flags |= (TXD_FLAG_VLAN |
5121 (vlan_tx_tag_get(skb) << 16));
5122 #endif
5124 if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
5125 dev_kfree_skb(skb);
5126 goto out_unlock;
5129 sp = skb_shinfo(skb);
5131 mapping = sp->dma_maps[0];
5133 tp->tx_buffers[entry].skb = skb;
5135 would_hit_hwbug = 0;
5137 if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
5138 would_hit_hwbug = 1;
5139 else if (tg3_4g_overflow_test(mapping, len))
5140 would_hit_hwbug = 1;
5142 tg3_set_txd(tp, entry, mapping, len, base_flags,
5143 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5145 entry = NEXT_TX(entry);
5147 /* Now loop through additional data fragments, and queue them. */
5148 if (skb_shinfo(skb)->nr_frags > 0) {
5149 unsigned int i, last;
5151 last = skb_shinfo(skb)->nr_frags - 1;
5152 for (i = 0; i <= last; i++) {
5153 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5155 len = frag->size;
5156 mapping = sp->dma_maps[i + 1];
5158 tp->tx_buffers[entry].skb = NULL;
5160 if (tg3_4g_overflow_test(mapping, len))
5161 would_hit_hwbug = 1;
5163 if (tg3_40bit_overflow_test(tp, mapping, len))
5164 would_hit_hwbug = 1;
5166 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5167 tg3_set_txd(tp, entry, mapping, len,
5168 base_flags, (i == last)|(mss << 1));
5169 else
5170 tg3_set_txd(tp, entry, mapping, len,
5171 base_flags, (i == last));
5173 entry = NEXT_TX(entry);
5177 if (would_hit_hwbug) {
5178 u32 last_plus_one = entry;
5179 u32 start;
5181 start = entry - 1 - skb_shinfo(skb)->nr_frags;
5182 start &= (TG3_TX_RING_SIZE - 1);
5184 /* If the workaround fails due to memory/mapping
5185 * failure, silently drop this packet.
5187 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
5188 &start, base_flags, mss))
5189 goto out_unlock;
5191 entry = start;
5194 /* Packets are ready, update Tx producer idx local and on card. */
5195 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
5197 tp->tx_prod = entry;
5198 if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
5199 netif_stop_queue(dev);
5200 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
5201 netif_wake_queue(tp->dev);
5204 out_unlock:
5205 mmiowb();
5207 dev->trans_start = jiffies;
5209 return NETDEV_TX_OK;
5212 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
5213 int new_mtu)
5215 dev->mtu = new_mtu;
5217 if (new_mtu > ETH_DATA_LEN) {
5218 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5219 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
5220 ethtool_op_set_tso(dev, 0);
5222 else
5223 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
5224 } else {
5225 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5226 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
5227 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
5231 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
5233 struct tg3 *tp = netdev_priv(dev);
5234 int err;
5236 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
5237 return -EINVAL;
5239 if (!netif_running(dev)) {
5240 /* We'll just catch it later when the
5241 * device is up'd.
5243 tg3_set_mtu(dev, tp, new_mtu);
5244 return 0;
5247 tg3_phy_stop(tp);
5249 tg3_netif_stop(tp);
5251 tg3_full_lock(tp, 1);
5253 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5255 tg3_set_mtu(dev, tp, new_mtu);
5257 err = tg3_restart_hw(tp, 0);
5259 if (!err)
5260 tg3_netif_start(tp);
5262 tg3_full_unlock(tp);
5264 if (!err)
5265 tg3_phy_start(tp);
5267 return err;
5270 /* Free up pending packets in all rx/tx rings.
5272 * The chip has been shut down and the driver detached from
5273 * the networking, so no interrupts or new tx packets will
5274 * end up in the driver. tp->{tx,}lock is not held and we are not
5275 * in an interrupt context and thus may sleep.
5277 static void tg3_free_rings(struct tg3 *tp)
5279 struct ring_info *rxp;
5280 int i;
5282 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5283 rxp = &tp->rx_std_buffers[i];
5285 if (rxp->skb == NULL)
5286 continue;
5287 pci_unmap_single(tp->pdev,
5288 pci_unmap_addr(rxp, mapping),
5289 tp->rx_pkt_buf_sz - tp->rx_offset,
5290 PCI_DMA_FROMDEVICE);
5291 dev_kfree_skb_any(rxp->skb);
5292 rxp->skb = NULL;
5295 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5296 rxp = &tp->rx_jumbo_buffers[i];
5298 if (rxp->skb == NULL)
5299 continue;
5300 pci_unmap_single(tp->pdev,
5301 pci_unmap_addr(rxp, mapping),
5302 RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
5303 PCI_DMA_FROMDEVICE);
5304 dev_kfree_skb_any(rxp->skb);
5305 rxp->skb = NULL;
5308 for (i = 0; i < TG3_TX_RING_SIZE; ) {
5309 struct tx_ring_info *txp;
5310 struct sk_buff *skb;
5312 txp = &tp->tx_buffers[i];
5313 skb = txp->skb;
5315 if (skb == NULL) {
5316 i++;
5317 continue;
5320 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
5322 txp->skb = NULL;
5324 i += skb_shinfo(skb)->nr_frags + 1;
5326 dev_kfree_skb_any(skb);
5330 /* Initialize tx/rx rings for packet processing.
5332 * The chip has been shut down and the driver detached from
5333 * the networking, so no interrupts or new tx packets will
5334 * end up in the driver. tp->{tx,}lock are held and thus
5335 * we may not sleep.
5337 static int tg3_init_rings(struct tg3 *tp)
5339 u32 i;
5341 /* Free up all the SKBs. */
5342 tg3_free_rings(tp);
5344 /* Zero out all descriptors. */
5345 memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
5346 memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
5347 memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
5348 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
5350 tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
5351 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
5352 (tp->dev->mtu > ETH_DATA_LEN))
5353 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
5355 /* Initialize invariants of the rings, we only set this
5356 * stuff once. This works because the card does not
5357 * write into the rx buffer posting rings.
5359 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5360 struct tg3_rx_buffer_desc *rxd;
5362 rxd = &tp->rx_std[i];
5363 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
5364 << RXD_LEN_SHIFT;
5365 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
5366 rxd->opaque = (RXD_OPAQUE_RING_STD |
5367 (i << RXD_OPAQUE_INDEX_SHIFT));
5370 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5371 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5372 struct tg3_rx_buffer_desc *rxd;
5374 rxd = &tp->rx_jumbo[i];
5375 rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
5376 << RXD_LEN_SHIFT;
5377 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
5378 RXD_FLAG_JUMBO;
5379 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
5380 (i << RXD_OPAQUE_INDEX_SHIFT));
5384 /* Now allocate fresh SKBs for each rx ring. */
5385 for (i = 0; i < tp->rx_pending; i++) {
5386 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
5387 printk(KERN_WARNING PFX
5388 "%s: Using a smaller RX standard ring, "
5389 "only %d out of %d buffers were allocated "
5390 "successfully.\n",
5391 tp->dev->name, i, tp->rx_pending);
5392 if (i == 0)
5393 return -ENOMEM;
5394 tp->rx_pending = i;
5395 break;
5399 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5400 for (i = 0; i < tp->rx_jumbo_pending; i++) {
5401 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
5402 -1, i) < 0) {
5403 printk(KERN_WARNING PFX
5404 "%s: Using a smaller RX jumbo ring, "
5405 "only %d out of %d buffers were "
5406 "allocated successfully.\n",
5407 tp->dev->name, i, tp->rx_jumbo_pending);
5408 if (i == 0) {
5409 tg3_free_rings(tp);
5410 return -ENOMEM;
5412 tp->rx_jumbo_pending = i;
5413 break;
5417 return 0;
5421 * Must not be invoked with interrupt sources disabled and
5422 * the hardware shutdown down.
5424 static void tg3_free_consistent(struct tg3 *tp)
5426 kfree(tp->rx_std_buffers);
5427 tp->rx_std_buffers = NULL;
5428 if (tp->rx_std) {
5429 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
5430 tp->rx_std, tp->rx_std_mapping);
5431 tp->rx_std = NULL;
5433 if (tp->rx_jumbo) {
5434 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5435 tp->rx_jumbo, tp->rx_jumbo_mapping);
5436 tp->rx_jumbo = NULL;
5438 if (tp->rx_rcb) {
5439 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5440 tp->rx_rcb, tp->rx_rcb_mapping);
5441 tp->rx_rcb = NULL;
5443 if (tp->tx_ring) {
5444 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
5445 tp->tx_ring, tp->tx_desc_mapping);
5446 tp->tx_ring = NULL;
5448 if (tp->hw_status) {
5449 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
5450 tp->hw_status, tp->status_mapping);
5451 tp->hw_status = NULL;
5453 if (tp->hw_stats) {
5454 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
5455 tp->hw_stats, tp->stats_mapping);
5456 tp->hw_stats = NULL;
5461 * Must not be invoked with interrupt sources disabled and
5462 * the hardware shutdown down. Can sleep.
5464 static int tg3_alloc_consistent(struct tg3 *tp)
5466 tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
5467 (TG3_RX_RING_SIZE +
5468 TG3_RX_JUMBO_RING_SIZE)) +
5469 (sizeof(struct tx_ring_info) *
5470 TG3_TX_RING_SIZE),
5471 GFP_KERNEL);
5472 if (!tp->rx_std_buffers)
5473 return -ENOMEM;
5475 tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
5476 tp->tx_buffers = (struct tx_ring_info *)
5477 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
5479 tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
5480 &tp->rx_std_mapping);
5481 if (!tp->rx_std)
5482 goto err_out;
5484 tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5485 &tp->rx_jumbo_mapping);
5487 if (!tp->rx_jumbo)
5488 goto err_out;
5490 tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5491 &tp->rx_rcb_mapping);
5492 if (!tp->rx_rcb)
5493 goto err_out;
5495 tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
5496 &tp->tx_desc_mapping);
5497 if (!tp->tx_ring)
5498 goto err_out;
5500 tp->hw_status = pci_alloc_consistent(tp->pdev,
5501 TG3_HW_STATUS_SIZE,
5502 &tp->status_mapping);
5503 if (!tp->hw_status)
5504 goto err_out;
5506 tp->hw_stats = pci_alloc_consistent(tp->pdev,
5507 sizeof(struct tg3_hw_stats),
5508 &tp->stats_mapping);
5509 if (!tp->hw_stats)
5510 goto err_out;
5512 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5513 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5515 return 0;
5517 err_out:
5518 tg3_free_consistent(tp);
5519 return -ENOMEM;
5522 #define MAX_WAIT_CNT 1000
5524 /* To stop a block, clear the enable bit and poll till it
5525 * clears. tp->lock is held.
5527 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
5529 unsigned int i;
5530 u32 val;
5532 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5533 switch (ofs) {
5534 case RCVLSC_MODE:
5535 case DMAC_MODE:
5536 case MBFREE_MODE:
5537 case BUFMGR_MODE:
5538 case MEMARB_MODE:
5539 /* We can't enable/disable these bits of the
5540 * 5705/5750, just say success.
5542 return 0;
5544 default:
5545 break;
5549 val = tr32(ofs);
5550 val &= ~enable_bit;
5551 tw32_f(ofs, val);
5553 for (i = 0; i < MAX_WAIT_CNT; i++) {
5554 udelay(100);
5555 val = tr32(ofs);
5556 if ((val & enable_bit) == 0)
5557 break;
5560 if (i == MAX_WAIT_CNT && !silent) {
5561 printk(KERN_ERR PFX "tg3_stop_block timed out, "
5562 "ofs=%lx enable_bit=%x\n",
5563 ofs, enable_bit);
5564 return -ENODEV;
5567 return 0;
5570 /* tp->lock is held. */
5571 static int tg3_abort_hw(struct tg3 *tp, int silent)
5573 int i, err;
5575 tg3_disable_ints(tp);
5577 tp->rx_mode &= ~RX_MODE_ENABLE;
5578 tw32_f(MAC_RX_MODE, tp->rx_mode);
5579 udelay(10);
5581 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
5582 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
5583 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
5584 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
5585 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
5586 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
5588 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
5589 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
5590 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
5591 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
5592 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
5593 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
5594 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
5596 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
5597 tw32_f(MAC_MODE, tp->mac_mode);
5598 udelay(40);
5600 tp->tx_mode &= ~TX_MODE_ENABLE;
5601 tw32_f(MAC_TX_MODE, tp->tx_mode);
5603 for (i = 0; i < MAX_WAIT_CNT; i++) {
5604 udelay(100);
5605 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
5606 break;
5608 if (i >= MAX_WAIT_CNT) {
5609 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
5610 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
5611 tp->dev->name, tr32(MAC_TX_MODE));
5612 err |= -ENODEV;
5615 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
5616 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
5617 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
5619 tw32(FTQ_RESET, 0xffffffff);
5620 tw32(FTQ_RESET, 0x00000000);
5622 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
5623 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
5625 if (tp->hw_status)
5626 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5627 if (tp->hw_stats)
5628 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5630 return err;
5633 /* tp->lock is held. */
5634 static int tg3_nvram_lock(struct tg3 *tp)
5636 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5637 int i;
5639 if (tp->nvram_lock_cnt == 0) {
5640 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
5641 for (i = 0; i < 8000; i++) {
5642 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
5643 break;
5644 udelay(20);
5646 if (i == 8000) {
5647 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
5648 return -ENODEV;
5651 tp->nvram_lock_cnt++;
5653 return 0;
5656 /* tp->lock is held. */
5657 static void tg3_nvram_unlock(struct tg3 *tp)
5659 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5660 if (tp->nvram_lock_cnt > 0)
5661 tp->nvram_lock_cnt--;
5662 if (tp->nvram_lock_cnt == 0)
5663 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
5667 /* tp->lock is held. */
5668 static void tg3_enable_nvram_access(struct tg3 *tp)
5670 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5671 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5672 u32 nvaccess = tr32(NVRAM_ACCESS);
5674 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
5678 /* tp->lock is held. */
5679 static void tg3_disable_nvram_access(struct tg3 *tp)
5681 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5682 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5683 u32 nvaccess = tr32(NVRAM_ACCESS);
5685 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
5689 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
5691 int i;
5692 u32 apedata;
5694 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
5695 if (apedata != APE_SEG_SIG_MAGIC)
5696 return;
5698 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
5699 if (!(apedata & APE_FW_STATUS_READY))
5700 return;
5702 /* Wait for up to 1 millisecond for APE to service previous event. */
5703 for (i = 0; i < 10; i++) {
5704 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
5705 return;
5707 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
5709 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5710 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
5711 event | APE_EVENT_STATUS_EVENT_PENDING);
5713 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
5715 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5716 break;
5718 udelay(100);
5721 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5722 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
5725 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
5727 u32 event;
5728 u32 apedata;
5730 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
5731 return;
5733 switch (kind) {
5734 case RESET_KIND_INIT:
5735 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
5736 APE_HOST_SEG_SIG_MAGIC);
5737 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
5738 APE_HOST_SEG_LEN_MAGIC);
5739 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
5740 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
5741 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
5742 APE_HOST_DRIVER_ID_MAGIC);
5743 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
5744 APE_HOST_BEHAV_NO_PHYLOCK);
5746 event = APE_EVENT_STATUS_STATE_START;
5747 break;
5748 case RESET_KIND_SHUTDOWN:
5749 /* With the interface we are currently using,
5750 * APE does not track driver state. Wiping
5751 * out the HOST SEGMENT SIGNATURE forces
5752 * the APE to assume OS absent status.
5754 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
5756 event = APE_EVENT_STATUS_STATE_UNLOAD;
5757 break;
5758 case RESET_KIND_SUSPEND:
5759 event = APE_EVENT_STATUS_STATE_SUSPEND;
5760 break;
5761 default:
5762 return;
5765 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
5767 tg3_ape_send_event(tp, event);
5770 /* tp->lock is held. */
5771 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
5773 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
5774 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
5776 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5777 switch (kind) {
5778 case RESET_KIND_INIT:
5779 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5780 DRV_STATE_START);
5781 break;
5783 case RESET_KIND_SHUTDOWN:
5784 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5785 DRV_STATE_UNLOAD);
5786 break;
5788 case RESET_KIND_SUSPEND:
5789 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5790 DRV_STATE_SUSPEND);
5791 break;
5793 default:
5794 break;
5798 if (kind == RESET_KIND_INIT ||
5799 kind == RESET_KIND_SUSPEND)
5800 tg3_ape_driver_state_change(tp, kind);
5803 /* tp->lock is held. */
5804 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
5806 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5807 switch (kind) {
5808 case RESET_KIND_INIT:
5809 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5810 DRV_STATE_START_DONE);
5811 break;
5813 case RESET_KIND_SHUTDOWN:
5814 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5815 DRV_STATE_UNLOAD_DONE);
5816 break;
5818 default:
5819 break;
5823 if (kind == RESET_KIND_SHUTDOWN)
5824 tg3_ape_driver_state_change(tp, kind);
5827 /* tp->lock is held. */
5828 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
5830 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5831 switch (kind) {
5832 case RESET_KIND_INIT:
5833 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5834 DRV_STATE_START);
5835 break;
5837 case RESET_KIND_SHUTDOWN:
5838 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5839 DRV_STATE_UNLOAD);
5840 break;
5842 case RESET_KIND_SUSPEND:
5843 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5844 DRV_STATE_SUSPEND);
5845 break;
5847 default:
5848 break;
5853 static int tg3_poll_fw(struct tg3 *tp)
5855 int i;
5856 u32 val;
5858 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5859 /* Wait up to 20ms for init done. */
5860 for (i = 0; i < 200; i++) {
5861 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
5862 return 0;
5863 udelay(100);
5865 return -ENODEV;
5868 /* Wait for firmware initialization to complete. */
5869 for (i = 0; i < 100000; i++) {
5870 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
5871 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
5872 break;
5873 udelay(10);
5876 /* Chip might not be fitted with firmware. Some Sun onboard
5877 * parts are configured like that. So don't signal the timeout
5878 * of the above loop as an error, but do report the lack of
5879 * running firmware once.
5881 if (i >= 100000 &&
5882 !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
5883 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
5885 printk(KERN_INFO PFX "%s: No firmware running.\n",
5886 tp->dev->name);
5889 return 0;
5892 /* Save PCI command register before chip reset */
5893 static void tg3_save_pci_state(struct tg3 *tp)
5895 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
5898 /* Restore PCI state after chip reset */
5899 static void tg3_restore_pci_state(struct tg3 *tp)
5901 u32 val;
5903 /* Re-enable indirect register accesses. */
5904 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
5905 tp->misc_host_ctrl);
5907 /* Set MAX PCI retry to zero. */
5908 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
5909 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5910 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
5911 val |= PCISTATE_RETRY_SAME_DMA;
5912 /* Allow reads and writes to the APE register and memory space. */
5913 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
5914 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
5915 PCISTATE_ALLOW_APE_SHMEM_WR;
5916 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
5918 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
5920 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
5921 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5922 pcie_set_readrq(tp->pdev, 4096);
5923 else {
5924 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
5925 tp->pci_cacheline_sz);
5926 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
5927 tp->pci_lat_timer);
5931 /* Make sure PCI-X relaxed ordering bit is clear. */
5932 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
5933 u16 pcix_cmd;
5935 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5936 &pcix_cmd);
5937 pcix_cmd &= ~PCI_X_CMD_ERO;
5938 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5939 pcix_cmd);
5942 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5944 /* Chip reset on 5780 will reset MSI enable bit,
5945 * so need to restore it.
5947 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5948 u16 ctrl;
5950 pci_read_config_word(tp->pdev,
5951 tp->msi_cap + PCI_MSI_FLAGS,
5952 &ctrl);
5953 pci_write_config_word(tp->pdev,
5954 tp->msi_cap + PCI_MSI_FLAGS,
5955 ctrl | PCI_MSI_FLAGS_ENABLE);
5956 val = tr32(MSGINT_MODE);
5957 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
5962 static void tg3_stop_fw(struct tg3 *);
5964 /* tp->lock is held. */
5965 static int tg3_chip_reset(struct tg3 *tp)
5967 u32 val;
5968 void (*write_op)(struct tg3 *, u32, u32);
5969 int err;
5971 tg3_nvram_lock(tp);
5973 tg3_mdio_stop(tp);
5975 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
5977 /* No matching tg3_nvram_unlock() after this because
5978 * chip reset below will undo the nvram lock.
5980 tp->nvram_lock_cnt = 0;
5982 /* GRC_MISC_CFG core clock reset will clear the memory
5983 * enable bit in PCI register 4 and the MSI enable bit
5984 * on some chips, so we save relevant registers here.
5986 tg3_save_pci_state(tp);
5988 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
5989 (tp->tg3_flags3 & TG3_FLG3_5755_PLUS))
5990 tw32(GRC_FASTBOOT_PC, 0);
5993 * We must avoid the readl() that normally takes place.
5994 * It locks machines, causes machine checks, and other
5995 * fun things. So, temporarily disable the 5701
5996 * hardware workaround, while we do the reset.
5998 write_op = tp->write32;
5999 if (write_op == tg3_write_flush_reg32)
6000 tp->write32 = tg3_write32;
6002 /* Prevent the irq handler from reading or writing PCI registers
6003 * during chip reset when the memory enable bit in the PCI command
6004 * register may be cleared. The chip does not generate interrupt
6005 * at this time, but the irq handler may still be called due to irq
6006 * sharing or irqpoll.
6008 tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
6009 if (tp->hw_status) {
6010 tp->hw_status->status = 0;
6011 tp->hw_status->status_tag = 0;
6013 tp->last_tag = 0;
6014 smp_mb();
6015 synchronize_irq(tp->pdev->irq);
6017 /* do the reset */
6018 val = GRC_MISC_CFG_CORECLK_RESET;
6020 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
6021 if (tr32(0x7e2c) == 0x60) {
6022 tw32(0x7e2c, 0x20);
6024 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
6025 tw32(GRC_MISC_CFG, (1 << 29));
6026 val |= (1 << 29);
6030 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6031 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
6032 tw32(GRC_VCPU_EXT_CTRL,
6033 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
6036 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6037 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
6038 tw32(GRC_MISC_CFG, val);
6040 /* restore 5701 hardware bug workaround write method */
6041 tp->write32 = write_op;
6043 /* Unfortunately, we have to delay before the PCI read back.
6044 * Some 575X chips even will not respond to a PCI cfg access
6045 * when the reset command is given to the chip.
6047 * How do these hardware designers expect things to work
6048 * properly if the PCI write is posted for a long period
6049 * of time? It is always necessary to have some method by
6050 * which a register read back can occur to push the write
6051 * out which does the reset.
6053 * For most tg3 variants the trick below was working.
6054 * Ho hum...
6056 udelay(120);
6058 /* Flush PCI posted writes. The normal MMIO registers
6059 * are inaccessible at this time so this is the only
6060 * way to make this reliably (actually, this is no longer
6061 * the case, see above). I tried to use indirect
6062 * register read/write but this upset some 5701 variants.
6064 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
6066 udelay(120);
6068 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && tp->pcie_cap) {
6069 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
6070 int i;
6071 u32 cfg_val;
6073 /* Wait for link training to complete. */
6074 for (i = 0; i < 5000; i++)
6075 udelay(100);
6077 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
6078 pci_write_config_dword(tp->pdev, 0xc4,
6079 cfg_val | (1 << 15));
6082 /* Set PCIE max payload size to 128 bytes and
6083 * clear the "no snoop" and "relaxed ordering" bits.
6085 pci_write_config_word(tp->pdev,
6086 tp->pcie_cap + PCI_EXP_DEVCTL,
6089 pcie_set_readrq(tp->pdev, 4096);
6091 /* Clear error status */
6092 pci_write_config_word(tp->pdev,
6093 tp->pcie_cap + PCI_EXP_DEVSTA,
6094 PCI_EXP_DEVSTA_CED |
6095 PCI_EXP_DEVSTA_NFED |
6096 PCI_EXP_DEVSTA_FED |
6097 PCI_EXP_DEVSTA_URD);
6100 tg3_restore_pci_state(tp);
6102 tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
6104 val = 0;
6105 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
6106 val = tr32(MEMARB_MODE);
6107 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
6109 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
6110 tg3_stop_fw(tp);
6111 tw32(0x5000, 0x400);
6114 tw32(GRC_MODE, tp->grc_mode);
6116 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
6117 val = tr32(0xc4);
6119 tw32(0xc4, val | (1 << 15));
6122 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
6123 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6124 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
6125 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
6126 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
6127 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6130 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6131 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
6132 tw32_f(MAC_MODE, tp->mac_mode);
6133 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6134 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
6135 tw32_f(MAC_MODE, tp->mac_mode);
6136 } else if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
6137 tp->mac_mode &= (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
6138 if (tp->mac_mode & MAC_MODE_APE_TX_EN)
6139 tp->mac_mode |= MAC_MODE_TDE_ENABLE;
6140 tw32_f(MAC_MODE, tp->mac_mode);
6141 } else
6142 tw32_f(MAC_MODE, 0);
6143 udelay(40);
6145 tg3_mdio_start(tp);
6147 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
6149 err = tg3_poll_fw(tp);
6150 if (err)
6151 return err;
6153 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
6154 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
6155 val = tr32(0x7c00);
6157 tw32(0x7c00, val | (1 << 25));
6160 /* Reprobe ASF enable state. */
6161 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
6162 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
6163 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
6164 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
6165 u32 nic_cfg;
6167 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
6168 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
6169 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
6170 tp->last_event_jiffies = jiffies;
6171 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
6172 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
6176 return 0;
6179 /* tp->lock is held. */
6180 static void tg3_stop_fw(struct tg3 *tp)
6182 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
6183 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
6184 /* Wait for RX cpu to ACK the previous event. */
6185 tg3_wait_for_event_ack(tp);
6187 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
6189 tg3_generate_fw_event(tp);
6191 /* Wait for RX cpu to ACK this event. */
6192 tg3_wait_for_event_ack(tp);
6196 /* tp->lock is held. */
6197 static int tg3_halt(struct tg3 *tp, int kind, int silent)
6199 int err;
6201 tg3_stop_fw(tp);
6203 tg3_write_sig_pre_reset(tp, kind);
6205 tg3_abort_hw(tp, silent);
6206 err = tg3_chip_reset(tp);
6208 tg3_write_sig_legacy(tp, kind);
6209 tg3_write_sig_post_reset(tp, kind);
6211 if (err)
6212 return err;
6214 return 0;
6217 #define RX_CPU_SCRATCH_BASE 0x30000
6218 #define RX_CPU_SCRATCH_SIZE 0x04000
6219 #define TX_CPU_SCRATCH_BASE 0x34000
6220 #define TX_CPU_SCRATCH_SIZE 0x04000
6222 /* tp->lock is held. */
6223 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
6225 int i;
6227 BUG_ON(offset == TX_CPU_BASE &&
6228 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
6230 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6231 u32 val = tr32(GRC_VCPU_EXT_CTRL);
6233 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
6234 return 0;
6236 if (offset == RX_CPU_BASE) {
6237 for (i = 0; i < 10000; i++) {
6238 tw32(offset + CPU_STATE, 0xffffffff);
6239 tw32(offset + CPU_MODE, CPU_MODE_HALT);
6240 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6241 break;
6244 tw32(offset + CPU_STATE, 0xffffffff);
6245 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
6246 udelay(10);
6247 } else {
6248 for (i = 0; i < 10000; i++) {
6249 tw32(offset + CPU_STATE, 0xffffffff);
6250 tw32(offset + CPU_MODE, CPU_MODE_HALT);
6251 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6252 break;
6256 if (i >= 10000) {
6257 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
6258 "and %s CPU\n",
6259 tp->dev->name,
6260 (offset == RX_CPU_BASE ? "RX" : "TX"));
6261 return -ENODEV;
6264 /* Clear firmware's nvram arbitration. */
6265 if (tp->tg3_flags & TG3_FLAG_NVRAM)
6266 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
6267 return 0;
6270 struct fw_info {
6271 unsigned int fw_base;
6272 unsigned int fw_len;
6273 const __be32 *fw_data;
6276 /* tp->lock is held. */
6277 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
6278 int cpu_scratch_size, struct fw_info *info)
6280 int err, lock_err, i;
6281 void (*write_op)(struct tg3 *, u32, u32);
6283 if (cpu_base == TX_CPU_BASE &&
6284 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6285 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
6286 "TX cpu firmware on %s which is 5705.\n",
6287 tp->dev->name);
6288 return -EINVAL;
6291 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6292 write_op = tg3_write_mem;
6293 else
6294 write_op = tg3_write_indirect_reg32;
6296 /* It is possible that bootcode is still loading at this point.
6297 * Get the nvram lock first before halting the cpu.
6299 lock_err = tg3_nvram_lock(tp);
6300 err = tg3_halt_cpu(tp, cpu_base);
6301 if (!lock_err)
6302 tg3_nvram_unlock(tp);
6303 if (err)
6304 goto out;
6306 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
6307 write_op(tp, cpu_scratch_base + i, 0);
6308 tw32(cpu_base + CPU_STATE, 0xffffffff);
6309 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
6310 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
6311 write_op(tp, (cpu_scratch_base +
6312 (info->fw_base & 0xffff) +
6313 (i * sizeof(u32))),
6314 be32_to_cpu(info->fw_data[i]));
6316 err = 0;
6318 out:
6319 return err;
6322 /* tp->lock is held. */
6323 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
6325 struct fw_info info;
6326 const __be32 *fw_data;
6327 int err, i;
6329 fw_data = (void *)tp->fw->data;
6331 /* Firmware blob starts with version numbers, followed by
6332 start address and length. We are setting complete length.
6333 length = end_address_of_bss - start_address_of_text.
6334 Remainder is the blob to be loaded contiguously
6335 from start address. */
6337 info.fw_base = be32_to_cpu(fw_data[1]);
6338 info.fw_len = tp->fw->size - 12;
6339 info.fw_data = &fw_data[3];
6341 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
6342 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
6343 &info);
6344 if (err)
6345 return err;
6347 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
6348 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
6349 &info);
6350 if (err)
6351 return err;
6353 /* Now startup only the RX cpu. */
6354 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6355 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
6357 for (i = 0; i < 5; i++) {
6358 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
6359 break;
6360 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6361 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
6362 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
6363 udelay(1000);
6365 if (i >= 5) {
6366 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
6367 "to set RX CPU PC, is %08x should be %08x\n",
6368 tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
6369 info.fw_base);
6370 return -ENODEV;
6372 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6373 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
6375 return 0;
6378 /* 5705 needs a special version of the TSO firmware. */
6380 /* tp->lock is held. */
6381 static int tg3_load_tso_firmware(struct tg3 *tp)
6383 struct fw_info info;
6384 const __be32 *fw_data;
6385 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
6386 int err, i;
6388 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6389 return 0;
6391 fw_data = (void *)tp->fw->data;
6393 /* Firmware blob starts with version numbers, followed by
6394 start address and length. We are setting complete length.
6395 length = end_address_of_bss - start_address_of_text.
6396 Remainder is the blob to be loaded contiguously
6397 from start address. */
6399 info.fw_base = be32_to_cpu(fw_data[1]);
6400 cpu_scratch_size = tp->fw_len;
6401 info.fw_len = tp->fw->size - 12;
6402 info.fw_data = &fw_data[3];
6404 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6405 cpu_base = RX_CPU_BASE;
6406 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
6407 } else {
6408 cpu_base = TX_CPU_BASE;
6409 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
6410 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
6413 err = tg3_load_firmware_cpu(tp, cpu_base,
6414 cpu_scratch_base, cpu_scratch_size,
6415 &info);
6416 if (err)
6417 return err;
6419 /* Now startup the cpu. */
6420 tw32(cpu_base + CPU_STATE, 0xffffffff);
6421 tw32_f(cpu_base + CPU_PC, info.fw_base);
6423 for (i = 0; i < 5; i++) {
6424 if (tr32(cpu_base + CPU_PC) == info.fw_base)
6425 break;
6426 tw32(cpu_base + CPU_STATE, 0xffffffff);
6427 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
6428 tw32_f(cpu_base + CPU_PC, info.fw_base);
6429 udelay(1000);
6431 if (i >= 5) {
6432 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
6433 "to set CPU PC, is %08x should be %08x\n",
6434 tp->dev->name, tr32(cpu_base + CPU_PC),
6435 info.fw_base);
6436 return -ENODEV;
6438 tw32(cpu_base + CPU_STATE, 0xffffffff);
6439 tw32_f(cpu_base + CPU_MODE, 0x00000000);
6440 return 0;
6444 static int tg3_set_mac_addr(struct net_device *dev, void *p)
6446 struct tg3 *tp = netdev_priv(dev);
6447 struct sockaddr *addr = p;
6448 int err = 0, skip_mac_1 = 0;
6450 if (!is_valid_ether_addr(addr->sa_data))
6451 return -EINVAL;
6453 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6455 if (!netif_running(dev))
6456 return 0;
6458 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6459 u32 addr0_high, addr0_low, addr1_high, addr1_low;
6461 addr0_high = tr32(MAC_ADDR_0_HIGH);
6462 addr0_low = tr32(MAC_ADDR_0_LOW);
6463 addr1_high = tr32(MAC_ADDR_1_HIGH);
6464 addr1_low = tr32(MAC_ADDR_1_LOW);
6466 /* Skip MAC addr 1 if ASF is using it. */
6467 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
6468 !(addr1_high == 0 && addr1_low == 0))
6469 skip_mac_1 = 1;
6471 spin_lock_bh(&tp->lock);
6472 __tg3_set_mac_addr(tp, skip_mac_1);
6473 spin_unlock_bh(&tp->lock);
6475 return err;
6478 /* tp->lock is held. */
6479 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
6480 dma_addr_t mapping, u32 maxlen_flags,
6481 u32 nic_addr)
6483 tg3_write_mem(tp,
6484 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
6485 ((u64) mapping >> 32));
6486 tg3_write_mem(tp,
6487 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
6488 ((u64) mapping & 0xffffffff));
6489 tg3_write_mem(tp,
6490 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
6491 maxlen_flags);
6493 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6494 tg3_write_mem(tp,
6495 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
6496 nic_addr);
6499 static void __tg3_set_rx_mode(struct net_device *);
6500 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
6502 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
6503 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
6504 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
6505 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
6506 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6507 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
6508 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
6510 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
6511 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
6512 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6513 u32 val = ec->stats_block_coalesce_usecs;
6515 if (!netif_carrier_ok(tp->dev))
6516 val = 0;
6518 tw32(HOSTCC_STAT_COAL_TICKS, val);
6522 /* tp->lock is held. */
6523 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6525 u32 val, rdmac_mode;
6526 int i, err, limit;
6528 tg3_disable_ints(tp);
6530 tg3_stop_fw(tp);
6532 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
6534 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
6535 tg3_abort_hw(tp, 1);
6538 if (reset_phy &&
6539 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB))
6540 tg3_phy_reset(tp);
6542 err = tg3_chip_reset(tp);
6543 if (err)
6544 return err;
6546 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
6548 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
6549 val = tr32(TG3_CPMU_CTRL);
6550 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
6551 tw32(TG3_CPMU_CTRL, val);
6553 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
6554 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
6555 val |= CPMU_LSPD_10MB_MACCLK_6_25;
6556 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
6558 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
6559 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
6560 val |= CPMU_LNK_AWARE_MACCLK_6_25;
6561 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
6563 val = tr32(TG3_CPMU_HST_ACC);
6564 val &= ~CPMU_HST_ACC_MACCLK_MASK;
6565 val |= CPMU_HST_ACC_MACCLK_6_25;
6566 tw32(TG3_CPMU_HST_ACC, val);
6569 /* This works around an issue with Athlon chipsets on
6570 * B3 tigon3 silicon. This bit has no effect on any
6571 * other revision. But do not set this on PCI Express
6572 * chips and don't even touch the clocks if the CPMU is present.
6574 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
6575 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
6576 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
6577 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6580 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
6581 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
6582 val = tr32(TG3PCI_PCISTATE);
6583 val |= PCISTATE_RETRY_SAME_DMA;
6584 tw32(TG3PCI_PCISTATE, val);
6587 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
6588 /* Allow reads and writes to the
6589 * APE register and memory space.
6591 val = tr32(TG3PCI_PCISTATE);
6592 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
6593 PCISTATE_ALLOW_APE_SHMEM_WR;
6594 tw32(TG3PCI_PCISTATE, val);
6597 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
6598 /* Enable some hw fixes. */
6599 val = tr32(TG3PCI_MSI_DATA);
6600 val |= (1 << 26) | (1 << 28) | (1 << 29);
6601 tw32(TG3PCI_MSI_DATA, val);
6604 /* Descriptor ring init may make accesses to the
6605 * NIC SRAM area to setup the TX descriptors, so we
6606 * can only do this after the hardware has been
6607 * successfully reset.
6609 err = tg3_init_rings(tp);
6610 if (err)
6611 return err;
6613 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
6614 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
6615 /* This value is determined during the probe time DMA
6616 * engine test, tg3_test_dma.
6618 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
6621 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
6622 GRC_MODE_4X_NIC_SEND_RINGS |
6623 GRC_MODE_NO_TX_PHDR_CSUM |
6624 GRC_MODE_NO_RX_PHDR_CSUM);
6625 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
6627 /* Pseudo-header checksum is done by hardware logic and not
6628 * the offload processers, so make the chip do the pseudo-
6629 * header checksums on receive. For transmit it is more
6630 * convenient to do the pseudo-header checksum in software
6631 * as Linux does that on transmit for us in all cases.
6633 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
6635 tw32(GRC_MODE,
6636 tp->grc_mode |
6637 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
6639 /* Setup the timer prescalar register. Clock is always 66Mhz. */
6640 val = tr32(GRC_MISC_CFG);
6641 val &= ~0xff;
6642 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
6643 tw32(GRC_MISC_CFG, val);
6645 /* Initialize MBUF/DESC pool. */
6646 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6647 /* Do nothing. */
6648 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
6649 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
6650 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
6651 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
6652 else
6653 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
6654 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
6655 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
6657 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6658 int fw_len;
6660 fw_len = tp->fw_len;
6661 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
6662 tw32(BUFMGR_MB_POOL_ADDR,
6663 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
6664 tw32(BUFMGR_MB_POOL_SIZE,
6665 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
6668 if (tp->dev->mtu <= ETH_DATA_LEN) {
6669 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6670 tp->bufmgr_config.mbuf_read_dma_low_water);
6671 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6672 tp->bufmgr_config.mbuf_mac_rx_low_water);
6673 tw32(BUFMGR_MB_HIGH_WATER,
6674 tp->bufmgr_config.mbuf_high_water);
6675 } else {
6676 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6677 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
6678 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6679 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
6680 tw32(BUFMGR_MB_HIGH_WATER,
6681 tp->bufmgr_config.mbuf_high_water_jumbo);
6683 tw32(BUFMGR_DMA_LOW_WATER,
6684 tp->bufmgr_config.dma_low_water);
6685 tw32(BUFMGR_DMA_HIGH_WATER,
6686 tp->bufmgr_config.dma_high_water);
6688 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
6689 for (i = 0; i < 2000; i++) {
6690 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
6691 break;
6692 udelay(10);
6694 if (i >= 2000) {
6695 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
6696 tp->dev->name);
6697 return -ENODEV;
6700 /* Setup replenish threshold. */
6701 val = tp->rx_pending / 8;
6702 if (val == 0)
6703 val = 1;
6704 else if (val > tp->rx_std_max_post)
6705 val = tp->rx_std_max_post;
6706 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6707 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
6708 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
6710 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
6711 val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
6714 tw32(RCVBDI_STD_THRESH, val);
6716 /* Initialize TG3_BDINFO's at:
6717 * RCVDBDI_STD_BD: standard eth size rx ring
6718 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
6719 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
6721 * like so:
6722 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
6723 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
6724 * ring attribute flags
6725 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
6727 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
6728 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
6730 * The size of each ring is fixed in the firmware, but the location is
6731 * configurable.
6733 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6734 ((u64) tp->rx_std_mapping >> 32));
6735 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6736 ((u64) tp->rx_std_mapping & 0xffffffff));
6737 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
6738 NIC_SRAM_RX_BUFFER_DESC);
6740 /* Don't even try to program the JUMBO/MINI buffer descriptor
6741 * configs on 5705.
6743 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6744 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6745 RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
6746 } else {
6747 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6748 RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6750 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
6751 BDINFO_FLAGS_DISABLED);
6753 /* Setup replenish threshold. */
6754 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
6756 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
6757 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6758 ((u64) tp->rx_jumbo_mapping >> 32));
6759 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6760 ((u64) tp->rx_jumbo_mapping & 0xffffffff));
6761 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6762 RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6763 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
6764 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
6765 } else {
6766 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6767 BDINFO_FLAGS_DISABLED);
6772 /* There is only one send ring on 5705/5750, no need to explicitly
6773 * disable the others.
6775 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6776 /* Clear out send RCB ring in SRAM. */
6777 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
6778 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6779 BDINFO_FLAGS_DISABLED);
6782 tp->tx_prod = 0;
6783 tp->tx_cons = 0;
6784 tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6785 tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6787 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
6788 tp->tx_desc_mapping,
6789 (TG3_TX_RING_SIZE <<
6790 BDINFO_FLAGS_MAXLEN_SHIFT),
6791 NIC_SRAM_TX_BUFFER_DESC);
6793 /* There is only one receive return ring on 5705/5750, no need
6794 * to explicitly disable the others.
6796 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6797 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
6798 i += TG3_BDINFO_SIZE) {
6799 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6800 BDINFO_FLAGS_DISABLED);
6804 tp->rx_rcb_ptr = 0;
6805 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
6807 tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
6808 tp->rx_rcb_mapping,
6809 (TG3_RX_RCB_RING_SIZE(tp) <<
6810 BDINFO_FLAGS_MAXLEN_SHIFT),
6813 tp->rx_std_ptr = tp->rx_pending;
6814 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
6815 tp->rx_std_ptr);
6817 tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
6818 tp->rx_jumbo_pending : 0;
6819 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
6820 tp->rx_jumbo_ptr);
6822 /* Initialize MAC address and backoff seed. */
6823 __tg3_set_mac_addr(tp, 0);
6825 /* MTU + ethernet header + FCS + optional VLAN tag */
6826 tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
6828 /* The slot time is changed by tg3_setup_phy if we
6829 * run at gigabit with half duplex.
6831 tw32(MAC_TX_LENGTHS,
6832 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6833 (6 << TX_LENGTHS_IPG_SHIFT) |
6834 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6836 /* Receive rules. */
6837 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
6838 tw32(RCVLPC_CONFIG, 0x0181);
6840 /* Calculate RDMAC_MODE setting early, we need it to determine
6841 * the RCVLPC_STATE_ENABLE mask.
6843 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
6844 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
6845 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
6846 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6847 RDMAC_MODE_LNGREAD_ENAB);
6849 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
6850 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
6851 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
6852 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
6853 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
6854 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
6856 /* If statement applies to 5705 and 5750 PCI devices only */
6857 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6858 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6859 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
6860 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
6861 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6862 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6863 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6864 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6865 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6869 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6870 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6872 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6873 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
6875 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
6876 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
6877 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
6879 /* Receive/send statistics. */
6880 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6881 val = tr32(RCVLPC_STATS_ENABLE);
6882 val &= ~RCVLPC_STATSENAB_DACK_FIX;
6883 tw32(RCVLPC_STATS_ENABLE, val);
6884 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6885 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6886 val = tr32(RCVLPC_STATS_ENABLE);
6887 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6888 tw32(RCVLPC_STATS_ENABLE, val);
6889 } else {
6890 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
6892 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
6893 tw32(SNDDATAI_STATSENAB, 0xffffff);
6894 tw32(SNDDATAI_STATSCTRL,
6895 (SNDDATAI_SCTRL_ENABLE |
6896 SNDDATAI_SCTRL_FASTUPD));
6898 /* Setup host coalescing engine. */
6899 tw32(HOSTCC_MODE, 0);
6900 for (i = 0; i < 2000; i++) {
6901 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
6902 break;
6903 udelay(10);
6906 __tg3_set_coalesce(tp, &tp->coal);
6908 /* set status block DMA address */
6909 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6910 ((u64) tp->status_mapping >> 32));
6911 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6912 ((u64) tp->status_mapping & 0xffffffff));
6914 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6915 /* Status/statistics block address. See tg3_timer,
6916 * the tg3_periodic_fetch_stats call there, and
6917 * tg3_get_stats to see how this works for 5705/5750 chips.
6919 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6920 ((u64) tp->stats_mapping >> 32));
6921 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6922 ((u64) tp->stats_mapping & 0xffffffff));
6923 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
6924 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
6927 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
6929 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
6930 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
6931 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6932 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
6934 /* Clear statistics/status block in chip, and status block in ram. */
6935 for (i = NIC_SRAM_STATS_BLK;
6936 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
6937 i += sizeof(u32)) {
6938 tg3_write_mem(tp, i, 0);
6939 udelay(40);
6941 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
6943 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6944 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
6945 /* reset to prevent losing 1st rx packet intermittently */
6946 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6947 udelay(10);
6950 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
6951 tp->mac_mode &= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
6952 else
6953 tp->mac_mode = 0;
6954 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
6955 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
6956 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6957 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6958 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
6959 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
6960 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
6961 udelay(40);
6963 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
6964 * If TG3_FLG2_IS_NIC is zero, we should read the
6965 * register to preserve the GPIO settings for LOMs. The GPIOs,
6966 * whether used as inputs or outputs, are set by boot code after
6967 * reset.
6969 if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
6970 u32 gpio_mask;
6972 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
6973 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
6974 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
6976 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
6977 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
6978 GRC_LCLCTRL_GPIO_OUTPUT3;
6980 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6981 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
6983 tp->grc_local_ctrl &= ~gpio_mask;
6984 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
6986 /* GPIO1 must be driven high for eeprom write protect */
6987 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
6988 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
6989 GRC_LCLCTRL_GPIO_OUTPUT1);
6991 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6992 udelay(100);
6994 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
6995 tp->last_tag = 0;
6997 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6998 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
6999 udelay(40);
7002 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
7003 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
7004 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
7005 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
7006 WDMAC_MODE_LNGREAD_ENAB);
7008 /* If statement applies to 5705 and 5750 PCI devices only */
7009 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7010 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7011 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7012 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
7013 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
7014 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
7015 /* nothing */
7016 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7017 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7018 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
7019 val |= WDMAC_MODE_RX_ACCEL;
7023 /* Enable host coalescing bug fix */
7024 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
7025 val |= WDMAC_MODE_STATUS_TAG_FIX;
7027 tw32_f(WDMAC_MODE, val);
7028 udelay(40);
7030 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
7031 u16 pcix_cmd;
7033 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7034 &pcix_cmd);
7035 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
7036 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
7037 pcix_cmd |= PCI_X_CMD_READ_2K;
7038 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
7039 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
7040 pcix_cmd |= PCI_X_CMD_READ_2K;
7042 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7043 pcix_cmd);
7046 tw32_f(RDMAC_MODE, rdmac_mode);
7047 udelay(40);
7049 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
7050 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7051 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
7053 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
7054 tw32(SNDDATAC_MODE,
7055 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
7056 else
7057 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
7059 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
7060 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
7061 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
7062 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
7063 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7064 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
7065 tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
7066 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
7068 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
7069 err = tg3_load_5701_a0_firmware_fix(tp);
7070 if (err)
7071 return err;
7074 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7075 err = tg3_load_tso_firmware(tp);
7076 if (err)
7077 return err;
7080 tp->tx_mode = TX_MODE_ENABLE;
7081 tw32_f(MAC_TX_MODE, tp->tx_mode);
7082 udelay(100);
7084 tp->rx_mode = RX_MODE_ENABLE;
7085 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
7086 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
7088 tw32_f(MAC_RX_MODE, tp->rx_mode);
7089 udelay(10);
7091 tw32(MAC_LED_CTRL, tp->led_ctrl);
7093 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
7094 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7095 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7096 udelay(10);
7098 tw32_f(MAC_RX_MODE, tp->rx_mode);
7099 udelay(10);
7101 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7102 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
7103 !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
7104 /* Set drive transmission level to 1.2V */
7105 /* only if the signal pre-emphasis bit is not set */
7106 val = tr32(MAC_SERDES_CFG);
7107 val &= 0xfffff000;
7108 val |= 0x880;
7109 tw32(MAC_SERDES_CFG, val);
7111 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
7112 tw32(MAC_SERDES_CFG, 0x616000);
7115 /* Prevent chip from dropping frames when flow control
7116 * is enabled.
7118 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
7120 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
7121 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
7122 /* Use hardware link auto-negotiation */
7123 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
7126 if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
7127 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
7128 u32 tmp;
7130 tmp = tr32(SERDES_RX_CTRL);
7131 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
7132 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
7133 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
7134 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7137 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
7138 if (tp->link_config.phy_is_low_power) {
7139 tp->link_config.phy_is_low_power = 0;
7140 tp->link_config.speed = tp->link_config.orig_speed;
7141 tp->link_config.duplex = tp->link_config.orig_duplex;
7142 tp->link_config.autoneg = tp->link_config.orig_autoneg;
7145 err = tg3_setup_phy(tp, 0);
7146 if (err)
7147 return err;
7149 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7150 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
7151 u32 tmp;
7153 /* Clear CRC stats. */
7154 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
7155 tg3_writephy(tp, MII_TG3_TEST1,
7156 tmp | MII_TG3_TEST1_CRC_EN);
7157 tg3_readphy(tp, 0x14, &tmp);
7162 __tg3_set_rx_mode(tp->dev);
7164 /* Initialize receive rules. */
7165 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
7166 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
7167 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
7168 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
7170 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7171 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
7172 limit = 8;
7173 else
7174 limit = 16;
7175 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
7176 limit -= 4;
7177 switch (limit) {
7178 case 16:
7179 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
7180 case 15:
7181 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
7182 case 14:
7183 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
7184 case 13:
7185 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
7186 case 12:
7187 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
7188 case 11:
7189 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
7190 case 10:
7191 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
7192 case 9:
7193 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
7194 case 8:
7195 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
7196 case 7:
7197 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
7198 case 6:
7199 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
7200 case 5:
7201 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
7202 case 4:
7203 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
7204 case 3:
7205 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
7206 case 2:
7207 case 1:
7209 default:
7210 break;
7213 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7214 /* Write our heartbeat update interval to APE. */
7215 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
7216 APE_HOST_HEARTBEAT_INT_DISABLE);
7218 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
7220 return 0;
7223 /* Called at device open time to get the chip ready for
7224 * packet processing. Invoked with tp->lock held.
7226 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
7228 tg3_switch_clocks(tp);
7230 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7232 return tg3_reset_hw(tp, reset_phy);
7235 #define TG3_STAT_ADD32(PSTAT, REG) \
7236 do { u32 __val = tr32(REG); \
7237 (PSTAT)->low += __val; \
7238 if ((PSTAT)->low < __val) \
7239 (PSTAT)->high += 1; \
7240 } while (0)
7242 static void tg3_periodic_fetch_stats(struct tg3 *tp)
7244 struct tg3_hw_stats *sp = tp->hw_stats;
7246 if (!netif_carrier_ok(tp->dev))
7247 return;
7249 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
7250 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
7251 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
7252 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
7253 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
7254 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
7255 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
7256 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
7257 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
7258 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
7259 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
7260 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
7261 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
7263 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
7264 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
7265 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
7266 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
7267 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
7268 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
7269 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
7270 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
7271 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
7272 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
7273 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
7274 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
7275 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
7276 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
7278 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
7279 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
7280 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
7283 static void tg3_timer(unsigned long __opaque)
7285 struct tg3 *tp = (struct tg3 *) __opaque;
7287 if (tp->irq_sync)
7288 goto restart_timer;
7290 spin_lock(&tp->lock);
7292 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7293 /* All of this garbage is because when using non-tagged
7294 * IRQ status the mailbox/status_block protocol the chip
7295 * uses with the cpu is race prone.
7297 if (tp->hw_status->status & SD_STATUS_UPDATED) {
7298 tw32(GRC_LOCAL_CTRL,
7299 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
7300 } else {
7301 tw32(HOSTCC_MODE, tp->coalesce_mode |
7302 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
7305 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
7306 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
7307 spin_unlock(&tp->lock);
7308 schedule_work(&tp->reset_task);
7309 return;
7313 /* This part only runs once per second. */
7314 if (!--tp->timer_counter) {
7315 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7316 tg3_periodic_fetch_stats(tp);
7318 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
7319 u32 mac_stat;
7320 int phy_event;
7322 mac_stat = tr32(MAC_STATUS);
7324 phy_event = 0;
7325 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
7326 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
7327 phy_event = 1;
7328 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
7329 phy_event = 1;
7331 if (phy_event)
7332 tg3_setup_phy(tp, 0);
7333 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
7334 u32 mac_stat = tr32(MAC_STATUS);
7335 int need_setup = 0;
7337 if (netif_carrier_ok(tp->dev) &&
7338 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
7339 need_setup = 1;
7341 if (! netif_carrier_ok(tp->dev) &&
7342 (mac_stat & (MAC_STATUS_PCS_SYNCED |
7343 MAC_STATUS_SIGNAL_DET))) {
7344 need_setup = 1;
7346 if (need_setup) {
7347 if (!tp->serdes_counter) {
7348 tw32_f(MAC_MODE,
7349 (tp->mac_mode &
7350 ~MAC_MODE_PORT_MODE_MASK));
7351 udelay(40);
7352 tw32_f(MAC_MODE, tp->mac_mode);
7353 udelay(40);
7355 tg3_setup_phy(tp, 0);
7357 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
7358 tg3_serdes_parallel_detect(tp);
7360 tp->timer_counter = tp->timer_multiplier;
7363 /* Heartbeat is only sent once every 2 seconds.
7365 * The heartbeat is to tell the ASF firmware that the host
7366 * driver is still alive. In the event that the OS crashes,
7367 * ASF needs to reset the hardware to free up the FIFO space
7368 * that may be filled with rx packets destined for the host.
7369 * If the FIFO is full, ASF will no longer function properly.
7371 * Unintended resets have been reported on real time kernels
7372 * where the timer doesn't run on time. Netpoll will also have
7373 * same problem.
7375 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
7376 * to check the ring condition when the heartbeat is expiring
7377 * before doing the reset. This will prevent most unintended
7378 * resets.
7380 if (!--tp->asf_counter) {
7381 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
7382 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
7383 tg3_wait_for_event_ack(tp);
7385 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
7386 FWCMD_NICDRV_ALIVE3);
7387 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
7388 /* 5 seconds timeout */
7389 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
7391 tg3_generate_fw_event(tp);
7393 tp->asf_counter = tp->asf_multiplier;
7396 spin_unlock(&tp->lock);
7398 restart_timer:
7399 tp->timer.expires = jiffies + tp->timer_offset;
7400 add_timer(&tp->timer);
7403 static int tg3_request_irq(struct tg3 *tp)
7405 irq_handler_t fn;
7406 unsigned long flags;
7407 struct net_device *dev = tp->dev;
7409 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7410 fn = tg3_msi;
7411 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
7412 fn = tg3_msi_1shot;
7413 flags = IRQF_SAMPLE_RANDOM;
7414 } else {
7415 fn = tg3_interrupt;
7416 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7417 fn = tg3_interrupt_tagged;
7418 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
7420 return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
7423 static int tg3_test_interrupt(struct tg3 *tp)
7425 struct net_device *dev = tp->dev;
7426 int err, i, intr_ok = 0;
7428 if (!netif_running(dev))
7429 return -ENODEV;
7431 tg3_disable_ints(tp);
7433 free_irq(tp->pdev->irq, dev);
7435 err = request_irq(tp->pdev->irq, tg3_test_isr,
7436 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
7437 if (err)
7438 return err;
7440 tp->hw_status->status &= ~SD_STATUS_UPDATED;
7441 tg3_enable_ints(tp);
7443 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7444 HOSTCC_MODE_NOW);
7446 for (i = 0; i < 5; i++) {
7447 u32 int_mbox, misc_host_ctrl;
7449 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
7450 TG3_64BIT_REG_LOW);
7451 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
7453 if ((int_mbox != 0) ||
7454 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
7455 intr_ok = 1;
7456 break;
7459 msleep(10);
7462 tg3_disable_ints(tp);
7464 free_irq(tp->pdev->irq, dev);
7466 err = tg3_request_irq(tp);
7468 if (err)
7469 return err;
7471 if (intr_ok)
7472 return 0;
7474 return -EIO;
7477 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
7478 * successfully restored
7480 static int tg3_test_msi(struct tg3 *tp)
7482 struct net_device *dev = tp->dev;
7483 int err;
7484 u16 pci_cmd;
7486 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
7487 return 0;
7489 /* Turn off SERR reporting in case MSI terminates with Master
7490 * Abort.
7492 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7493 pci_write_config_word(tp->pdev, PCI_COMMAND,
7494 pci_cmd & ~PCI_COMMAND_SERR);
7496 err = tg3_test_interrupt(tp);
7498 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7500 if (!err)
7501 return 0;
7503 /* other failures */
7504 if (err != -EIO)
7505 return err;
7507 /* MSI test failed, go back to INTx mode */
7508 printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
7509 "switching to INTx mode. Please report this failure to "
7510 "the PCI maintainer and include system chipset information.\n",
7511 tp->dev->name);
7513 free_irq(tp->pdev->irq, dev);
7514 pci_disable_msi(tp->pdev);
7516 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7518 err = tg3_request_irq(tp);
7519 if (err)
7520 return err;
7522 /* Need to reset the chip because the MSI cycle may have terminated
7523 * with Master Abort.
7525 tg3_full_lock(tp, 1);
7527 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7528 err = tg3_init_hw(tp, 1);
7530 tg3_full_unlock(tp);
7532 if (err)
7533 free_irq(tp->pdev->irq, dev);
7535 return err;
7538 static int tg3_request_firmware(struct tg3 *tp)
7540 const __be32 *fw_data;
7542 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
7543 printk(KERN_ERR "%s: Failed to load firmware \"%s\"\n",
7544 tp->dev->name, tp->fw_needed);
7545 return -ENOENT;
7548 fw_data = (void *)tp->fw->data;
7550 /* Firmware blob starts with version numbers, followed by
7551 * start address and _full_ length including BSS sections
7552 * (which must be longer than the actual data, of course
7555 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
7556 if (tp->fw_len < (tp->fw->size - 12)) {
7557 printk(KERN_ERR "%s: bogus length %d in \"%s\"\n",
7558 tp->dev->name, tp->fw_len, tp->fw_needed);
7559 release_firmware(tp->fw);
7560 tp->fw = NULL;
7561 return -EINVAL;
7564 /* We no longer need firmware; we have it. */
7565 tp->fw_needed = NULL;
7566 return 0;
7569 static int tg3_open(struct net_device *dev)
7571 struct tg3 *tp = netdev_priv(dev);
7572 int err;
7574 if (tp->fw_needed) {
7575 err = tg3_request_firmware(tp);
7576 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
7577 if (err)
7578 return err;
7579 } else if (err) {
7580 printk(KERN_WARNING "%s: TSO capability disabled.\n",
7581 tp->dev->name);
7582 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
7583 } else if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7584 printk(KERN_NOTICE "%s: TSO capability restored.\n",
7585 tp->dev->name);
7586 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
7590 netif_carrier_off(tp->dev);
7592 err = tg3_set_power_state(tp, PCI_D0);
7593 if (err)
7594 return err;
7596 tg3_full_lock(tp, 0);
7598 tg3_disable_ints(tp);
7599 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
7601 tg3_full_unlock(tp);
7603 /* The placement of this call is tied
7604 * to the setup and use of Host TX descriptors.
7606 err = tg3_alloc_consistent(tp);
7607 if (err)
7608 return err;
7610 if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
7611 /* All MSI supporting chips should support tagged
7612 * status. Assert that this is the case.
7614 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7615 printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
7616 "Not using MSI.\n", tp->dev->name);
7617 } else if (pci_enable_msi(tp->pdev) == 0) {
7618 u32 msi_mode;
7620 msi_mode = tr32(MSGINT_MODE);
7621 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
7622 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
7625 err = tg3_request_irq(tp);
7627 if (err) {
7628 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7629 pci_disable_msi(tp->pdev);
7630 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7632 tg3_free_consistent(tp);
7633 return err;
7636 napi_enable(&tp->napi);
7638 tg3_full_lock(tp, 0);
7640 err = tg3_init_hw(tp, 1);
7641 if (err) {
7642 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7643 tg3_free_rings(tp);
7644 } else {
7645 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7646 tp->timer_offset = HZ;
7647 else
7648 tp->timer_offset = HZ / 10;
7650 BUG_ON(tp->timer_offset > HZ);
7651 tp->timer_counter = tp->timer_multiplier =
7652 (HZ / tp->timer_offset);
7653 tp->asf_counter = tp->asf_multiplier =
7654 ((HZ / tp->timer_offset) * 2);
7656 init_timer(&tp->timer);
7657 tp->timer.expires = jiffies + tp->timer_offset;
7658 tp->timer.data = (unsigned long) tp;
7659 tp->timer.function = tg3_timer;
7662 tg3_full_unlock(tp);
7664 if (err) {
7665 napi_disable(&tp->napi);
7666 free_irq(tp->pdev->irq, dev);
7667 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7668 pci_disable_msi(tp->pdev);
7669 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7671 tg3_free_consistent(tp);
7672 return err;
7675 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7676 err = tg3_test_msi(tp);
7678 if (err) {
7679 tg3_full_lock(tp, 0);
7681 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7682 pci_disable_msi(tp->pdev);
7683 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7685 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7686 tg3_free_rings(tp);
7687 tg3_free_consistent(tp);
7689 tg3_full_unlock(tp);
7691 napi_disable(&tp->napi);
7693 return err;
7696 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7697 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
7698 u32 val = tr32(PCIE_TRANSACTION_CFG);
7700 tw32(PCIE_TRANSACTION_CFG,
7701 val | PCIE_TRANS_CFG_1SHOT_MSI);
7706 tg3_phy_start(tp);
7708 tg3_full_lock(tp, 0);
7710 add_timer(&tp->timer);
7711 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
7712 tg3_enable_ints(tp);
7714 tg3_full_unlock(tp);
7716 netif_start_queue(dev);
7718 return 0;
7721 #if 0
7722 /*static*/ void tg3_dump_state(struct tg3 *tp)
7724 u32 val32, val32_2, val32_3, val32_4, val32_5;
7725 u16 val16;
7726 int i;
7728 pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
7729 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
7730 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
7731 val16, val32);
7733 /* MAC block */
7734 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
7735 tr32(MAC_MODE), tr32(MAC_STATUS));
7736 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
7737 tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
7738 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
7739 tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
7740 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
7741 tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
7743 /* Send data initiator control block */
7744 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
7745 tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
7746 printk(" SNDDATAI_STATSCTRL[%08x]\n",
7747 tr32(SNDDATAI_STATSCTRL));
7749 /* Send data completion control block */
7750 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
7752 /* Send BD ring selector block */
7753 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
7754 tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
7756 /* Send BD initiator control block */
7757 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
7758 tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
7760 /* Send BD completion control block */
7761 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
7763 /* Receive list placement control block */
7764 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
7765 tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
7766 printk(" RCVLPC_STATSCTRL[%08x]\n",
7767 tr32(RCVLPC_STATSCTRL));
7769 /* Receive data and receive BD initiator control block */
7770 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
7771 tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
7773 /* Receive data completion control block */
7774 printk("DEBUG: RCVDCC_MODE[%08x]\n",
7775 tr32(RCVDCC_MODE));
7777 /* Receive BD initiator control block */
7778 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
7779 tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
7781 /* Receive BD completion control block */
7782 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
7783 tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
7785 /* Receive list selector control block */
7786 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
7787 tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
7789 /* Mbuf cluster free block */
7790 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
7791 tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
7793 /* Host coalescing control block */
7794 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
7795 tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
7796 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
7797 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7798 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7799 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
7800 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7801 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7802 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
7803 tr32(HOSTCC_STATS_BLK_NIC_ADDR));
7804 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
7805 tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
7807 /* Memory arbiter control block */
7808 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
7809 tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
7811 /* Buffer manager control block */
7812 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
7813 tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
7814 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
7815 tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
7816 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
7817 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
7818 tr32(BUFMGR_DMA_DESC_POOL_ADDR),
7819 tr32(BUFMGR_DMA_DESC_POOL_SIZE));
7821 /* Read DMA control block */
7822 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
7823 tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
7825 /* Write DMA control block */
7826 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
7827 tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
7829 /* DMA completion block */
7830 printk("DEBUG: DMAC_MODE[%08x]\n",
7831 tr32(DMAC_MODE));
7833 /* GRC block */
7834 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
7835 tr32(GRC_MODE), tr32(GRC_MISC_CFG));
7836 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
7837 tr32(GRC_LOCAL_CTRL));
7839 /* TG3_BDINFOs */
7840 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
7841 tr32(RCVDBDI_JUMBO_BD + 0x0),
7842 tr32(RCVDBDI_JUMBO_BD + 0x4),
7843 tr32(RCVDBDI_JUMBO_BD + 0x8),
7844 tr32(RCVDBDI_JUMBO_BD + 0xc));
7845 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
7846 tr32(RCVDBDI_STD_BD + 0x0),
7847 tr32(RCVDBDI_STD_BD + 0x4),
7848 tr32(RCVDBDI_STD_BD + 0x8),
7849 tr32(RCVDBDI_STD_BD + 0xc));
7850 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
7851 tr32(RCVDBDI_MINI_BD + 0x0),
7852 tr32(RCVDBDI_MINI_BD + 0x4),
7853 tr32(RCVDBDI_MINI_BD + 0x8),
7854 tr32(RCVDBDI_MINI_BD + 0xc));
7856 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
7857 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
7858 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
7859 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
7860 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
7861 val32, val32_2, val32_3, val32_4);
7863 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
7864 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
7865 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
7866 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
7867 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
7868 val32, val32_2, val32_3, val32_4);
7870 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
7871 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
7872 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
7873 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
7874 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
7875 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
7876 val32, val32_2, val32_3, val32_4, val32_5);
7878 /* SW status block */
7879 printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
7880 tp->hw_status->status,
7881 tp->hw_status->status_tag,
7882 tp->hw_status->rx_jumbo_consumer,
7883 tp->hw_status->rx_consumer,
7884 tp->hw_status->rx_mini_consumer,
7885 tp->hw_status->idx[0].rx_producer,
7886 tp->hw_status->idx[0].tx_consumer);
7888 /* SW statistics block */
7889 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
7890 ((u32 *)tp->hw_stats)[0],
7891 ((u32 *)tp->hw_stats)[1],
7892 ((u32 *)tp->hw_stats)[2],
7893 ((u32 *)tp->hw_stats)[3]);
7895 /* Mailboxes */
7896 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
7897 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
7898 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
7899 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
7900 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
7902 /* NIC side send descriptors. */
7903 for (i = 0; i < 6; i++) {
7904 unsigned long txd;
7906 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
7907 + (i * sizeof(struct tg3_tx_buffer_desc));
7908 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
7910 readl(txd + 0x0), readl(txd + 0x4),
7911 readl(txd + 0x8), readl(txd + 0xc));
7914 /* NIC side RX descriptors. */
7915 for (i = 0; i < 6; i++) {
7916 unsigned long rxd;
7918 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
7919 + (i * sizeof(struct tg3_rx_buffer_desc));
7920 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
7922 readl(rxd + 0x0), readl(rxd + 0x4),
7923 readl(rxd + 0x8), readl(rxd + 0xc));
7924 rxd += (4 * sizeof(u32));
7925 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
7927 readl(rxd + 0x0), readl(rxd + 0x4),
7928 readl(rxd + 0x8), readl(rxd + 0xc));
7931 for (i = 0; i < 6; i++) {
7932 unsigned long rxd;
7934 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
7935 + (i * sizeof(struct tg3_rx_buffer_desc));
7936 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
7938 readl(rxd + 0x0), readl(rxd + 0x4),
7939 readl(rxd + 0x8), readl(rxd + 0xc));
7940 rxd += (4 * sizeof(u32));
7941 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
7943 readl(rxd + 0x0), readl(rxd + 0x4),
7944 readl(rxd + 0x8), readl(rxd + 0xc));
7947 #endif
7949 static struct net_device_stats *tg3_get_stats(struct net_device *);
7950 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
7952 static int tg3_close(struct net_device *dev)
7954 struct tg3 *tp = netdev_priv(dev);
7956 napi_disable(&tp->napi);
7957 cancel_work_sync(&tp->reset_task);
7959 netif_stop_queue(dev);
7961 del_timer_sync(&tp->timer);
7963 tg3_full_lock(tp, 1);
7964 #if 0
7965 tg3_dump_state(tp);
7966 #endif
7968 tg3_disable_ints(tp);
7970 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7971 tg3_free_rings(tp);
7972 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
7974 tg3_full_unlock(tp);
7976 free_irq(tp->pdev->irq, dev);
7977 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7978 pci_disable_msi(tp->pdev);
7979 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7982 memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
7983 sizeof(tp->net_stats_prev));
7984 memcpy(&tp->estats_prev, tg3_get_estats(tp),
7985 sizeof(tp->estats_prev));
7987 tg3_free_consistent(tp);
7989 tg3_set_power_state(tp, PCI_D3hot);
7991 netif_carrier_off(tp->dev);
7993 return 0;
7996 static inline unsigned long get_stat64(tg3_stat64_t *val)
7998 unsigned long ret;
8000 #if (BITS_PER_LONG == 32)
8001 ret = val->low;
8002 #else
8003 ret = ((u64)val->high << 32) | ((u64)val->low);
8004 #endif
8005 return ret;
8008 static inline u64 get_estat64(tg3_stat64_t *val)
8010 return ((u64)val->high << 32) | ((u64)val->low);
8013 static unsigned long calc_crc_errors(struct tg3 *tp)
8015 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8017 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
8018 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8019 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
8020 u32 val;
8022 spin_lock_bh(&tp->lock);
8023 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
8024 tg3_writephy(tp, MII_TG3_TEST1,
8025 val | MII_TG3_TEST1_CRC_EN);
8026 tg3_readphy(tp, 0x14, &val);
8027 } else
8028 val = 0;
8029 spin_unlock_bh(&tp->lock);
8031 tp->phy_crc_errors += val;
8033 return tp->phy_crc_errors;
8036 return get_stat64(&hw_stats->rx_fcs_errors);
8039 #define ESTAT_ADD(member) \
8040 estats->member = old_estats->member + \
8041 get_estat64(&hw_stats->member)
8043 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
8045 struct tg3_ethtool_stats *estats = &tp->estats;
8046 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
8047 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8049 if (!hw_stats)
8050 return old_estats;
8052 ESTAT_ADD(rx_octets);
8053 ESTAT_ADD(rx_fragments);
8054 ESTAT_ADD(rx_ucast_packets);
8055 ESTAT_ADD(rx_mcast_packets);
8056 ESTAT_ADD(rx_bcast_packets);
8057 ESTAT_ADD(rx_fcs_errors);
8058 ESTAT_ADD(rx_align_errors);
8059 ESTAT_ADD(rx_xon_pause_rcvd);
8060 ESTAT_ADD(rx_xoff_pause_rcvd);
8061 ESTAT_ADD(rx_mac_ctrl_rcvd);
8062 ESTAT_ADD(rx_xoff_entered);
8063 ESTAT_ADD(rx_frame_too_long_errors);
8064 ESTAT_ADD(rx_jabbers);
8065 ESTAT_ADD(rx_undersize_packets);
8066 ESTAT_ADD(rx_in_length_errors);
8067 ESTAT_ADD(rx_out_length_errors);
8068 ESTAT_ADD(rx_64_or_less_octet_packets);
8069 ESTAT_ADD(rx_65_to_127_octet_packets);
8070 ESTAT_ADD(rx_128_to_255_octet_packets);
8071 ESTAT_ADD(rx_256_to_511_octet_packets);
8072 ESTAT_ADD(rx_512_to_1023_octet_packets);
8073 ESTAT_ADD(rx_1024_to_1522_octet_packets);
8074 ESTAT_ADD(rx_1523_to_2047_octet_packets);
8075 ESTAT_ADD(rx_2048_to_4095_octet_packets);
8076 ESTAT_ADD(rx_4096_to_8191_octet_packets);
8077 ESTAT_ADD(rx_8192_to_9022_octet_packets);
8079 ESTAT_ADD(tx_octets);
8080 ESTAT_ADD(tx_collisions);
8081 ESTAT_ADD(tx_xon_sent);
8082 ESTAT_ADD(tx_xoff_sent);
8083 ESTAT_ADD(tx_flow_control);
8084 ESTAT_ADD(tx_mac_errors);
8085 ESTAT_ADD(tx_single_collisions);
8086 ESTAT_ADD(tx_mult_collisions);
8087 ESTAT_ADD(tx_deferred);
8088 ESTAT_ADD(tx_excessive_collisions);
8089 ESTAT_ADD(tx_late_collisions);
8090 ESTAT_ADD(tx_collide_2times);
8091 ESTAT_ADD(tx_collide_3times);
8092 ESTAT_ADD(tx_collide_4times);
8093 ESTAT_ADD(tx_collide_5times);
8094 ESTAT_ADD(tx_collide_6times);
8095 ESTAT_ADD(tx_collide_7times);
8096 ESTAT_ADD(tx_collide_8times);
8097 ESTAT_ADD(tx_collide_9times);
8098 ESTAT_ADD(tx_collide_10times);
8099 ESTAT_ADD(tx_collide_11times);
8100 ESTAT_ADD(tx_collide_12times);
8101 ESTAT_ADD(tx_collide_13times);
8102 ESTAT_ADD(tx_collide_14times);
8103 ESTAT_ADD(tx_collide_15times);
8104 ESTAT_ADD(tx_ucast_packets);
8105 ESTAT_ADD(tx_mcast_packets);
8106 ESTAT_ADD(tx_bcast_packets);
8107 ESTAT_ADD(tx_carrier_sense_errors);
8108 ESTAT_ADD(tx_discards);
8109 ESTAT_ADD(tx_errors);
8111 ESTAT_ADD(dma_writeq_full);
8112 ESTAT_ADD(dma_write_prioq_full);
8113 ESTAT_ADD(rxbds_empty);
8114 ESTAT_ADD(rx_discards);
8115 ESTAT_ADD(rx_errors);
8116 ESTAT_ADD(rx_threshold_hit);
8118 ESTAT_ADD(dma_readq_full);
8119 ESTAT_ADD(dma_read_prioq_full);
8120 ESTAT_ADD(tx_comp_queue_full);
8122 ESTAT_ADD(ring_set_send_prod_index);
8123 ESTAT_ADD(ring_status_update);
8124 ESTAT_ADD(nic_irqs);
8125 ESTAT_ADD(nic_avoided_irqs);
8126 ESTAT_ADD(nic_tx_threshold_hit);
8128 return estats;
8131 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
8133 struct tg3 *tp = netdev_priv(dev);
8134 struct net_device_stats *stats = &tp->net_stats;
8135 struct net_device_stats *old_stats = &tp->net_stats_prev;
8136 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8138 if (!hw_stats)
8139 return old_stats;
8141 stats->rx_packets = old_stats->rx_packets +
8142 get_stat64(&hw_stats->rx_ucast_packets) +
8143 get_stat64(&hw_stats->rx_mcast_packets) +
8144 get_stat64(&hw_stats->rx_bcast_packets);
8146 stats->tx_packets = old_stats->tx_packets +
8147 get_stat64(&hw_stats->tx_ucast_packets) +
8148 get_stat64(&hw_stats->tx_mcast_packets) +
8149 get_stat64(&hw_stats->tx_bcast_packets);
8151 stats->rx_bytes = old_stats->rx_bytes +
8152 get_stat64(&hw_stats->rx_octets);
8153 stats->tx_bytes = old_stats->tx_bytes +
8154 get_stat64(&hw_stats->tx_octets);
8156 stats->rx_errors = old_stats->rx_errors +
8157 get_stat64(&hw_stats->rx_errors);
8158 stats->tx_errors = old_stats->tx_errors +
8159 get_stat64(&hw_stats->tx_errors) +
8160 get_stat64(&hw_stats->tx_mac_errors) +
8161 get_stat64(&hw_stats->tx_carrier_sense_errors) +
8162 get_stat64(&hw_stats->tx_discards);
8164 stats->multicast = old_stats->multicast +
8165 get_stat64(&hw_stats->rx_mcast_packets);
8166 stats->collisions = old_stats->collisions +
8167 get_stat64(&hw_stats->tx_collisions);
8169 stats->rx_length_errors = old_stats->rx_length_errors +
8170 get_stat64(&hw_stats->rx_frame_too_long_errors) +
8171 get_stat64(&hw_stats->rx_undersize_packets);
8173 stats->rx_over_errors = old_stats->rx_over_errors +
8174 get_stat64(&hw_stats->rxbds_empty);
8175 stats->rx_frame_errors = old_stats->rx_frame_errors +
8176 get_stat64(&hw_stats->rx_align_errors);
8177 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
8178 get_stat64(&hw_stats->tx_discards);
8179 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
8180 get_stat64(&hw_stats->tx_carrier_sense_errors);
8182 stats->rx_crc_errors = old_stats->rx_crc_errors +
8183 calc_crc_errors(tp);
8185 stats->rx_missed_errors = old_stats->rx_missed_errors +
8186 get_stat64(&hw_stats->rx_discards);
8188 return stats;
8191 static inline u32 calc_crc(unsigned char *buf, int len)
8193 u32 reg;
8194 u32 tmp;
8195 int j, k;
8197 reg = 0xffffffff;
8199 for (j = 0; j < len; j++) {
8200 reg ^= buf[j];
8202 for (k = 0; k < 8; k++) {
8203 tmp = reg & 0x01;
8205 reg >>= 1;
8207 if (tmp) {
8208 reg ^= 0xedb88320;
8213 return ~reg;
8216 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8218 /* accept or reject all multicast frames */
8219 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8220 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8221 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8222 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8225 static void __tg3_set_rx_mode(struct net_device *dev)
8227 struct tg3 *tp = netdev_priv(dev);
8228 u32 rx_mode;
8230 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8231 RX_MODE_KEEP_VLAN_TAG);
8233 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8234 * flag clear.
8236 #if TG3_VLAN_TAG_USED
8237 if (!tp->vlgrp &&
8238 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8239 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8240 #else
8241 /* By definition, VLAN is disabled always in this
8242 * case.
8244 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8245 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8246 #endif
8248 if (dev->flags & IFF_PROMISC) {
8249 /* Promiscuous mode. */
8250 rx_mode |= RX_MODE_PROMISC;
8251 } else if (dev->flags & IFF_ALLMULTI) {
8252 /* Accept all multicast. */
8253 tg3_set_multi (tp, 1);
8254 } else if (dev->mc_count < 1) {
8255 /* Reject all multicast. */
8256 tg3_set_multi (tp, 0);
8257 } else {
8258 /* Accept one or more multicast(s). */
8259 struct dev_mc_list *mclist;
8260 unsigned int i;
8261 u32 mc_filter[4] = { 0, };
8262 u32 regidx;
8263 u32 bit;
8264 u32 crc;
8266 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
8267 i++, mclist = mclist->next) {
8269 crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
8270 bit = ~crc & 0x7f;
8271 regidx = (bit & 0x60) >> 5;
8272 bit &= 0x1f;
8273 mc_filter[regidx] |= (1 << bit);
8276 tw32(MAC_HASH_REG_0, mc_filter[0]);
8277 tw32(MAC_HASH_REG_1, mc_filter[1]);
8278 tw32(MAC_HASH_REG_2, mc_filter[2]);
8279 tw32(MAC_HASH_REG_3, mc_filter[3]);
8282 if (rx_mode != tp->rx_mode) {
8283 tp->rx_mode = rx_mode;
8284 tw32_f(MAC_RX_MODE, rx_mode);
8285 udelay(10);
8289 static void tg3_set_rx_mode(struct net_device *dev)
8291 struct tg3 *tp = netdev_priv(dev);
8293 if (!netif_running(dev))
8294 return;
8296 tg3_full_lock(tp, 0);
8297 __tg3_set_rx_mode(dev);
8298 tg3_full_unlock(tp);
8301 #define TG3_REGDUMP_LEN (32 * 1024)
8303 static int tg3_get_regs_len(struct net_device *dev)
8305 return TG3_REGDUMP_LEN;
8308 static void tg3_get_regs(struct net_device *dev,
8309 struct ethtool_regs *regs, void *_p)
8311 u32 *p = _p;
8312 struct tg3 *tp = netdev_priv(dev);
8313 u8 *orig_p = _p;
8314 int i;
8316 regs->version = 0;
8318 memset(p, 0, TG3_REGDUMP_LEN);
8320 if (tp->link_config.phy_is_low_power)
8321 return;
8323 tg3_full_lock(tp, 0);
8325 #define __GET_REG32(reg) (*(p)++ = tr32(reg))
8326 #define GET_REG32_LOOP(base,len) \
8327 do { p = (u32 *)(orig_p + (base)); \
8328 for (i = 0; i < len; i += 4) \
8329 __GET_REG32((base) + i); \
8330 } while (0)
8331 #define GET_REG32_1(reg) \
8332 do { p = (u32 *)(orig_p + (reg)); \
8333 __GET_REG32((reg)); \
8334 } while (0)
8336 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
8337 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
8338 GET_REG32_LOOP(MAC_MODE, 0x4f0);
8339 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
8340 GET_REG32_1(SNDDATAC_MODE);
8341 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
8342 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
8343 GET_REG32_1(SNDBDC_MODE);
8344 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
8345 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
8346 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
8347 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
8348 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
8349 GET_REG32_1(RCVDCC_MODE);
8350 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
8351 GET_REG32_LOOP(RCVCC_MODE, 0x14);
8352 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
8353 GET_REG32_1(MBFREE_MODE);
8354 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
8355 GET_REG32_LOOP(MEMARB_MODE, 0x10);
8356 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
8357 GET_REG32_LOOP(RDMAC_MODE, 0x08);
8358 GET_REG32_LOOP(WDMAC_MODE, 0x08);
8359 GET_REG32_1(RX_CPU_MODE);
8360 GET_REG32_1(RX_CPU_STATE);
8361 GET_REG32_1(RX_CPU_PGMCTR);
8362 GET_REG32_1(RX_CPU_HWBKPT);
8363 GET_REG32_1(TX_CPU_MODE);
8364 GET_REG32_1(TX_CPU_STATE);
8365 GET_REG32_1(TX_CPU_PGMCTR);
8366 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
8367 GET_REG32_LOOP(FTQ_RESET, 0x120);
8368 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
8369 GET_REG32_1(DMAC_MODE);
8370 GET_REG32_LOOP(GRC_MODE, 0x4c);
8371 if (tp->tg3_flags & TG3_FLAG_NVRAM)
8372 GET_REG32_LOOP(NVRAM_CMD, 0x24);
8374 #undef __GET_REG32
8375 #undef GET_REG32_LOOP
8376 #undef GET_REG32_1
8378 tg3_full_unlock(tp);
8381 static int tg3_get_eeprom_len(struct net_device *dev)
8383 struct tg3 *tp = netdev_priv(dev);
8385 return tp->nvram_size;
8388 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
8389 static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val);
8390 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
8392 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8394 struct tg3 *tp = netdev_priv(dev);
8395 int ret;
8396 u8 *pd;
8397 u32 i, offset, len, b_offset, b_count;
8398 __le32 val;
8400 if (tp->link_config.phy_is_low_power)
8401 return -EAGAIN;
8403 offset = eeprom->offset;
8404 len = eeprom->len;
8405 eeprom->len = 0;
8407 eeprom->magic = TG3_EEPROM_MAGIC;
8409 if (offset & 3) {
8410 /* adjustments to start on required 4 byte boundary */
8411 b_offset = offset & 3;
8412 b_count = 4 - b_offset;
8413 if (b_count > len) {
8414 /* i.e. offset=1 len=2 */
8415 b_count = len;
8417 ret = tg3_nvram_read_le(tp, offset-b_offset, &val);
8418 if (ret)
8419 return ret;
8420 memcpy(data, ((char*)&val) + b_offset, b_count);
8421 len -= b_count;
8422 offset += b_count;
8423 eeprom->len += b_count;
8426 /* read bytes upto the last 4 byte boundary */
8427 pd = &data[eeprom->len];
8428 for (i = 0; i < (len - (len & 3)); i += 4) {
8429 ret = tg3_nvram_read_le(tp, offset + i, &val);
8430 if (ret) {
8431 eeprom->len += i;
8432 return ret;
8434 memcpy(pd + i, &val, 4);
8436 eeprom->len += i;
8438 if (len & 3) {
8439 /* read last bytes not ending on 4 byte boundary */
8440 pd = &data[eeprom->len];
8441 b_count = len & 3;
8442 b_offset = offset + len - b_count;
8443 ret = tg3_nvram_read_le(tp, b_offset, &val);
8444 if (ret)
8445 return ret;
8446 memcpy(pd, &val, b_count);
8447 eeprom->len += b_count;
8449 return 0;
8452 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
8454 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8456 struct tg3 *tp = netdev_priv(dev);
8457 int ret;
8458 u32 offset, len, b_offset, odd_len;
8459 u8 *buf;
8460 __le32 start, end;
8462 if (tp->link_config.phy_is_low_power)
8463 return -EAGAIN;
8465 if (eeprom->magic != TG3_EEPROM_MAGIC)
8466 return -EINVAL;
8468 offset = eeprom->offset;
8469 len = eeprom->len;
8471 if ((b_offset = (offset & 3))) {
8472 /* adjustments to start on required 4 byte boundary */
8473 ret = tg3_nvram_read_le(tp, offset-b_offset, &start);
8474 if (ret)
8475 return ret;
8476 len += b_offset;
8477 offset &= ~3;
8478 if (len < 4)
8479 len = 4;
8482 odd_len = 0;
8483 if (len & 3) {
8484 /* adjustments to end on required 4 byte boundary */
8485 odd_len = 1;
8486 len = (len + 3) & ~3;
8487 ret = tg3_nvram_read_le(tp, offset+len-4, &end);
8488 if (ret)
8489 return ret;
8492 buf = data;
8493 if (b_offset || odd_len) {
8494 buf = kmalloc(len, GFP_KERNEL);
8495 if (!buf)
8496 return -ENOMEM;
8497 if (b_offset)
8498 memcpy(buf, &start, 4);
8499 if (odd_len)
8500 memcpy(buf+len-4, &end, 4);
8501 memcpy(buf + b_offset, data, eeprom->len);
8504 ret = tg3_nvram_write_block(tp, offset, len, buf);
8506 if (buf != data)
8507 kfree(buf);
8509 return ret;
8512 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8514 struct tg3 *tp = netdev_priv(dev);
8516 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
8517 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
8518 return -EAGAIN;
8519 return phy_ethtool_gset(tp->mdio_bus->phy_map[PHY_ADDR], cmd);
8522 cmd->supported = (SUPPORTED_Autoneg);
8524 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8525 cmd->supported |= (SUPPORTED_1000baseT_Half |
8526 SUPPORTED_1000baseT_Full);
8528 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
8529 cmd->supported |= (SUPPORTED_100baseT_Half |
8530 SUPPORTED_100baseT_Full |
8531 SUPPORTED_10baseT_Half |
8532 SUPPORTED_10baseT_Full |
8533 SUPPORTED_TP);
8534 cmd->port = PORT_TP;
8535 } else {
8536 cmd->supported |= SUPPORTED_FIBRE;
8537 cmd->port = PORT_FIBRE;
8540 cmd->advertising = tp->link_config.advertising;
8541 if (netif_running(dev)) {
8542 cmd->speed = tp->link_config.active_speed;
8543 cmd->duplex = tp->link_config.active_duplex;
8545 cmd->phy_address = PHY_ADDR;
8546 cmd->transceiver = 0;
8547 cmd->autoneg = tp->link_config.autoneg;
8548 cmd->maxtxpkt = 0;
8549 cmd->maxrxpkt = 0;
8550 return 0;
8553 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8555 struct tg3 *tp = netdev_priv(dev);
8557 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
8558 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
8559 return -EAGAIN;
8560 return phy_ethtool_sset(tp->mdio_bus->phy_map[PHY_ADDR], cmd);
8563 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
8564 /* These are the only valid advertisement bits allowed. */
8565 if (cmd->autoneg == AUTONEG_ENABLE &&
8566 (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
8567 ADVERTISED_1000baseT_Full |
8568 ADVERTISED_Autoneg |
8569 ADVERTISED_FIBRE)))
8570 return -EINVAL;
8571 /* Fiber can only do SPEED_1000. */
8572 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
8573 (cmd->speed != SPEED_1000))
8574 return -EINVAL;
8575 /* Copper cannot force SPEED_1000. */
8576 } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
8577 (cmd->speed == SPEED_1000))
8578 return -EINVAL;
8579 else if ((cmd->speed == SPEED_1000) &&
8580 (tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8581 return -EINVAL;
8583 tg3_full_lock(tp, 0);
8585 tp->link_config.autoneg = cmd->autoneg;
8586 if (cmd->autoneg == AUTONEG_ENABLE) {
8587 tp->link_config.advertising = (cmd->advertising |
8588 ADVERTISED_Autoneg);
8589 tp->link_config.speed = SPEED_INVALID;
8590 tp->link_config.duplex = DUPLEX_INVALID;
8591 } else {
8592 tp->link_config.advertising = 0;
8593 tp->link_config.speed = cmd->speed;
8594 tp->link_config.duplex = cmd->duplex;
8597 tp->link_config.orig_speed = tp->link_config.speed;
8598 tp->link_config.orig_duplex = tp->link_config.duplex;
8599 tp->link_config.orig_autoneg = tp->link_config.autoneg;
8601 if (netif_running(dev))
8602 tg3_setup_phy(tp, 1);
8604 tg3_full_unlock(tp);
8606 return 0;
8609 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
8611 struct tg3 *tp = netdev_priv(dev);
8613 strcpy(info->driver, DRV_MODULE_NAME);
8614 strcpy(info->version, DRV_MODULE_VERSION);
8615 strcpy(info->fw_version, tp->fw_ver);
8616 strcpy(info->bus_info, pci_name(tp->pdev));
8619 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8621 struct tg3 *tp = netdev_priv(dev);
8623 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
8624 device_can_wakeup(&tp->pdev->dev))
8625 wol->supported = WAKE_MAGIC;
8626 else
8627 wol->supported = 0;
8628 wol->wolopts = 0;
8629 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
8630 device_can_wakeup(&tp->pdev->dev))
8631 wol->wolopts = WAKE_MAGIC;
8632 memset(&wol->sopass, 0, sizeof(wol->sopass));
8635 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8637 struct tg3 *tp = netdev_priv(dev);
8638 struct device *dp = &tp->pdev->dev;
8640 if (wol->wolopts & ~WAKE_MAGIC)
8641 return -EINVAL;
8642 if ((wol->wolopts & WAKE_MAGIC) &&
8643 !((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp)))
8644 return -EINVAL;
8646 spin_lock_bh(&tp->lock);
8647 if (wol->wolopts & WAKE_MAGIC) {
8648 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
8649 device_set_wakeup_enable(dp, true);
8650 } else {
8651 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
8652 device_set_wakeup_enable(dp, false);
8654 spin_unlock_bh(&tp->lock);
8656 return 0;
8659 static u32 tg3_get_msglevel(struct net_device *dev)
8661 struct tg3 *tp = netdev_priv(dev);
8662 return tp->msg_enable;
8665 static void tg3_set_msglevel(struct net_device *dev, u32 value)
8667 struct tg3 *tp = netdev_priv(dev);
8668 tp->msg_enable = value;
8671 static int tg3_set_tso(struct net_device *dev, u32 value)
8673 struct tg3 *tp = netdev_priv(dev);
8675 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8676 if (value)
8677 return -EINVAL;
8678 return 0;
8680 if ((dev->features & NETIF_F_IPV6_CSUM) &&
8681 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)) {
8682 if (value) {
8683 dev->features |= NETIF_F_TSO6;
8684 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8685 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
8686 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
8687 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8688 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8689 dev->features |= NETIF_F_TSO_ECN;
8690 } else
8691 dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
8693 return ethtool_op_set_tso(dev, value);
8696 static int tg3_nway_reset(struct net_device *dev)
8698 struct tg3 *tp = netdev_priv(dev);
8699 int r;
8701 if (!netif_running(dev))
8702 return -EAGAIN;
8704 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8705 return -EINVAL;
8707 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
8708 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
8709 return -EAGAIN;
8710 r = phy_start_aneg(tp->mdio_bus->phy_map[PHY_ADDR]);
8711 } else {
8712 u32 bmcr;
8714 spin_lock_bh(&tp->lock);
8715 r = -EINVAL;
8716 tg3_readphy(tp, MII_BMCR, &bmcr);
8717 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
8718 ((bmcr & BMCR_ANENABLE) ||
8719 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
8720 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
8721 BMCR_ANENABLE);
8722 r = 0;
8724 spin_unlock_bh(&tp->lock);
8727 return r;
8730 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8732 struct tg3 *tp = netdev_priv(dev);
8734 ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
8735 ering->rx_mini_max_pending = 0;
8736 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8737 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
8738 else
8739 ering->rx_jumbo_max_pending = 0;
8741 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
8743 ering->rx_pending = tp->rx_pending;
8744 ering->rx_mini_pending = 0;
8745 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8746 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
8747 else
8748 ering->rx_jumbo_pending = 0;
8750 ering->tx_pending = tp->tx_pending;
8753 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8755 struct tg3 *tp = netdev_priv(dev);
8756 int irq_sync = 0, err = 0;
8758 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
8759 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
8760 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
8761 (ering->tx_pending <= MAX_SKB_FRAGS) ||
8762 ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
8763 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
8764 return -EINVAL;
8766 if (netif_running(dev)) {
8767 tg3_phy_stop(tp);
8768 tg3_netif_stop(tp);
8769 irq_sync = 1;
8772 tg3_full_lock(tp, irq_sync);
8774 tp->rx_pending = ering->rx_pending;
8776 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
8777 tp->rx_pending > 63)
8778 tp->rx_pending = 63;
8779 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
8780 tp->tx_pending = ering->tx_pending;
8782 if (netif_running(dev)) {
8783 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8784 err = tg3_restart_hw(tp, 1);
8785 if (!err)
8786 tg3_netif_start(tp);
8789 tg3_full_unlock(tp);
8791 if (irq_sync && !err)
8792 tg3_phy_start(tp);
8794 return err;
8797 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8799 struct tg3 *tp = netdev_priv(dev);
8801 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
8803 if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
8804 epause->rx_pause = 1;
8805 else
8806 epause->rx_pause = 0;
8808 if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
8809 epause->tx_pause = 1;
8810 else
8811 epause->tx_pause = 0;
8814 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8816 struct tg3 *tp = netdev_priv(dev);
8817 int err = 0;
8819 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
8820 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
8821 return -EAGAIN;
8823 if (epause->autoneg) {
8824 u32 newadv;
8825 struct phy_device *phydev;
8827 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
8829 if (epause->rx_pause) {
8830 if (epause->tx_pause)
8831 newadv = ADVERTISED_Pause;
8832 else
8833 newadv = ADVERTISED_Pause |
8834 ADVERTISED_Asym_Pause;
8835 } else if (epause->tx_pause) {
8836 newadv = ADVERTISED_Asym_Pause;
8837 } else
8838 newadv = 0;
8840 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
8841 u32 oldadv = phydev->advertising &
8842 (ADVERTISED_Pause |
8843 ADVERTISED_Asym_Pause);
8844 if (oldadv != newadv) {
8845 phydev->advertising &=
8846 ~(ADVERTISED_Pause |
8847 ADVERTISED_Asym_Pause);
8848 phydev->advertising |= newadv;
8849 err = phy_start_aneg(phydev);
8851 } else {
8852 tp->link_config.advertising &=
8853 ~(ADVERTISED_Pause |
8854 ADVERTISED_Asym_Pause);
8855 tp->link_config.advertising |= newadv;
8857 } else {
8858 if (epause->rx_pause)
8859 tp->link_config.flowctrl |= FLOW_CTRL_RX;
8860 else
8861 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
8863 if (epause->tx_pause)
8864 tp->link_config.flowctrl |= FLOW_CTRL_TX;
8865 else
8866 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
8868 if (netif_running(dev))
8869 tg3_setup_flow_control(tp, 0, 0);
8871 } else {
8872 int irq_sync = 0;
8874 if (netif_running(dev)) {
8875 tg3_netif_stop(tp);
8876 irq_sync = 1;
8879 tg3_full_lock(tp, irq_sync);
8881 if (epause->autoneg)
8882 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8883 else
8884 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
8885 if (epause->rx_pause)
8886 tp->link_config.flowctrl |= FLOW_CTRL_RX;
8887 else
8888 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
8889 if (epause->tx_pause)
8890 tp->link_config.flowctrl |= FLOW_CTRL_TX;
8891 else
8892 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
8894 if (netif_running(dev)) {
8895 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8896 err = tg3_restart_hw(tp, 1);
8897 if (!err)
8898 tg3_netif_start(tp);
8901 tg3_full_unlock(tp);
8904 return err;
8907 static u32 tg3_get_rx_csum(struct net_device *dev)
8909 struct tg3 *tp = netdev_priv(dev);
8910 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
8913 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
8915 struct tg3 *tp = netdev_priv(dev);
8917 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8918 if (data != 0)
8919 return -EINVAL;
8920 return 0;
8923 spin_lock_bh(&tp->lock);
8924 if (data)
8925 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8926 else
8927 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
8928 spin_unlock_bh(&tp->lock);
8930 return 0;
8933 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
8935 struct tg3 *tp = netdev_priv(dev);
8937 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8938 if (data != 0)
8939 return -EINVAL;
8940 return 0;
8943 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
8944 ethtool_op_set_tx_ipv6_csum(dev, data);
8945 else
8946 ethtool_op_set_tx_csum(dev, data);
8948 return 0;
8951 static int tg3_get_sset_count (struct net_device *dev, int sset)
8953 switch (sset) {
8954 case ETH_SS_TEST:
8955 return TG3_NUM_TEST;
8956 case ETH_SS_STATS:
8957 return TG3_NUM_STATS;
8958 default:
8959 return -EOPNOTSUPP;
8963 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
8965 switch (stringset) {
8966 case ETH_SS_STATS:
8967 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
8968 break;
8969 case ETH_SS_TEST:
8970 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
8971 break;
8972 default:
8973 WARN_ON(1); /* we need a WARN() */
8974 break;
8978 static int tg3_phys_id(struct net_device *dev, u32 data)
8980 struct tg3 *tp = netdev_priv(dev);
8981 int i;
8983 if (!netif_running(tp->dev))
8984 return -EAGAIN;
8986 if (data == 0)
8987 data = UINT_MAX / 2;
8989 for (i = 0; i < (data * 2); i++) {
8990 if ((i % 2) == 0)
8991 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8992 LED_CTRL_1000MBPS_ON |
8993 LED_CTRL_100MBPS_ON |
8994 LED_CTRL_10MBPS_ON |
8995 LED_CTRL_TRAFFIC_OVERRIDE |
8996 LED_CTRL_TRAFFIC_BLINK |
8997 LED_CTRL_TRAFFIC_LED);
8999 else
9000 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9001 LED_CTRL_TRAFFIC_OVERRIDE);
9003 if (msleep_interruptible(500))
9004 break;
9006 tw32(MAC_LED_CTRL, tp->led_ctrl);
9007 return 0;
9010 static void tg3_get_ethtool_stats (struct net_device *dev,
9011 struct ethtool_stats *estats, u64 *tmp_stats)
9013 struct tg3 *tp = netdev_priv(dev);
9014 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
9017 #define NVRAM_TEST_SIZE 0x100
9018 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
9019 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
9020 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
9021 #define NVRAM_SELFBOOT_HW_SIZE 0x20
9022 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
9024 static int tg3_test_nvram(struct tg3 *tp)
9026 u32 csum, magic;
9027 __le32 *buf;
9028 int i, j, k, err = 0, size;
9030 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
9031 return -EIO;
9033 if (magic == TG3_EEPROM_MAGIC)
9034 size = NVRAM_TEST_SIZE;
9035 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
9036 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
9037 TG3_EEPROM_SB_FORMAT_1) {
9038 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
9039 case TG3_EEPROM_SB_REVISION_0:
9040 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
9041 break;
9042 case TG3_EEPROM_SB_REVISION_2:
9043 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
9044 break;
9045 case TG3_EEPROM_SB_REVISION_3:
9046 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
9047 break;
9048 default:
9049 return 0;
9051 } else
9052 return 0;
9053 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
9054 size = NVRAM_SELFBOOT_HW_SIZE;
9055 else
9056 return -EIO;
9058 buf = kmalloc(size, GFP_KERNEL);
9059 if (buf == NULL)
9060 return -ENOMEM;
9062 err = -EIO;
9063 for (i = 0, j = 0; i < size; i += 4, j++) {
9064 if ((err = tg3_nvram_read_le(tp, i, &buf[j])) != 0)
9065 break;
9067 if (i < size)
9068 goto out;
9070 /* Selfboot format */
9071 magic = swab32(le32_to_cpu(buf[0]));
9072 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
9073 TG3_EEPROM_MAGIC_FW) {
9074 u8 *buf8 = (u8 *) buf, csum8 = 0;
9076 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
9077 TG3_EEPROM_SB_REVISION_2) {
9078 /* For rev 2, the csum doesn't include the MBA. */
9079 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
9080 csum8 += buf8[i];
9081 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
9082 csum8 += buf8[i];
9083 } else {
9084 for (i = 0; i < size; i++)
9085 csum8 += buf8[i];
9088 if (csum8 == 0) {
9089 err = 0;
9090 goto out;
9093 err = -EIO;
9094 goto out;
9097 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
9098 TG3_EEPROM_MAGIC_HW) {
9099 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
9100 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
9101 u8 *buf8 = (u8 *) buf;
9103 /* Separate the parity bits and the data bytes. */
9104 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
9105 if ((i == 0) || (i == 8)) {
9106 int l;
9107 u8 msk;
9109 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
9110 parity[k++] = buf8[i] & msk;
9111 i++;
9113 else if (i == 16) {
9114 int l;
9115 u8 msk;
9117 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
9118 parity[k++] = buf8[i] & msk;
9119 i++;
9121 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
9122 parity[k++] = buf8[i] & msk;
9123 i++;
9125 data[j++] = buf8[i];
9128 err = -EIO;
9129 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
9130 u8 hw8 = hweight8(data[i]);
9132 if ((hw8 & 0x1) && parity[i])
9133 goto out;
9134 else if (!(hw8 & 0x1) && !parity[i])
9135 goto out;
9137 err = 0;
9138 goto out;
9141 /* Bootstrap checksum at offset 0x10 */
9142 csum = calc_crc((unsigned char *) buf, 0x10);
9143 if(csum != le32_to_cpu(buf[0x10/4]))
9144 goto out;
9146 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
9147 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
9148 if (csum != le32_to_cpu(buf[0xfc/4]))
9149 goto out;
9151 err = 0;
9153 out:
9154 kfree(buf);
9155 return err;
9158 #define TG3_SERDES_TIMEOUT_SEC 2
9159 #define TG3_COPPER_TIMEOUT_SEC 6
9161 static int tg3_test_link(struct tg3 *tp)
9163 int i, max;
9165 if (!netif_running(tp->dev))
9166 return -ENODEV;
9168 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9169 max = TG3_SERDES_TIMEOUT_SEC;
9170 else
9171 max = TG3_COPPER_TIMEOUT_SEC;
9173 for (i = 0; i < max; i++) {
9174 if (netif_carrier_ok(tp->dev))
9175 return 0;
9177 if (msleep_interruptible(1000))
9178 break;
9181 return -EIO;
9184 /* Only test the commonly used registers */
9185 static int tg3_test_registers(struct tg3 *tp)
9187 int i, is_5705, is_5750;
9188 u32 offset, read_mask, write_mask, val, save_val, read_val;
9189 static struct {
9190 u16 offset;
9191 u16 flags;
9192 #define TG3_FL_5705 0x1
9193 #define TG3_FL_NOT_5705 0x2
9194 #define TG3_FL_NOT_5788 0x4
9195 #define TG3_FL_NOT_5750 0x8
9196 u32 read_mask;
9197 u32 write_mask;
9198 } reg_tbl[] = {
9199 /* MAC Control Registers */
9200 { MAC_MODE, TG3_FL_NOT_5705,
9201 0x00000000, 0x00ef6f8c },
9202 { MAC_MODE, TG3_FL_5705,
9203 0x00000000, 0x01ef6b8c },
9204 { MAC_STATUS, TG3_FL_NOT_5705,
9205 0x03800107, 0x00000000 },
9206 { MAC_STATUS, TG3_FL_5705,
9207 0x03800100, 0x00000000 },
9208 { MAC_ADDR_0_HIGH, 0x0000,
9209 0x00000000, 0x0000ffff },
9210 { MAC_ADDR_0_LOW, 0x0000,
9211 0x00000000, 0xffffffff },
9212 { MAC_RX_MTU_SIZE, 0x0000,
9213 0x00000000, 0x0000ffff },
9214 { MAC_TX_MODE, 0x0000,
9215 0x00000000, 0x00000070 },
9216 { MAC_TX_LENGTHS, 0x0000,
9217 0x00000000, 0x00003fff },
9218 { MAC_RX_MODE, TG3_FL_NOT_5705,
9219 0x00000000, 0x000007fc },
9220 { MAC_RX_MODE, TG3_FL_5705,
9221 0x00000000, 0x000007dc },
9222 { MAC_HASH_REG_0, 0x0000,
9223 0x00000000, 0xffffffff },
9224 { MAC_HASH_REG_1, 0x0000,
9225 0x00000000, 0xffffffff },
9226 { MAC_HASH_REG_2, 0x0000,
9227 0x00000000, 0xffffffff },
9228 { MAC_HASH_REG_3, 0x0000,
9229 0x00000000, 0xffffffff },
9231 /* Receive Data and Receive BD Initiator Control Registers. */
9232 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
9233 0x00000000, 0xffffffff },
9234 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
9235 0x00000000, 0xffffffff },
9236 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
9237 0x00000000, 0x00000003 },
9238 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
9239 0x00000000, 0xffffffff },
9240 { RCVDBDI_STD_BD+0, 0x0000,
9241 0x00000000, 0xffffffff },
9242 { RCVDBDI_STD_BD+4, 0x0000,
9243 0x00000000, 0xffffffff },
9244 { RCVDBDI_STD_BD+8, 0x0000,
9245 0x00000000, 0xffff0002 },
9246 { RCVDBDI_STD_BD+0xc, 0x0000,
9247 0x00000000, 0xffffffff },
9249 /* Receive BD Initiator Control Registers. */
9250 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
9251 0x00000000, 0xffffffff },
9252 { RCVBDI_STD_THRESH, TG3_FL_5705,
9253 0x00000000, 0x000003ff },
9254 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
9255 0x00000000, 0xffffffff },
9257 /* Host Coalescing Control Registers. */
9258 { HOSTCC_MODE, TG3_FL_NOT_5705,
9259 0x00000000, 0x00000004 },
9260 { HOSTCC_MODE, TG3_FL_5705,
9261 0x00000000, 0x000000f6 },
9262 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
9263 0x00000000, 0xffffffff },
9264 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
9265 0x00000000, 0x000003ff },
9266 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
9267 0x00000000, 0xffffffff },
9268 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
9269 0x00000000, 0x000003ff },
9270 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
9271 0x00000000, 0xffffffff },
9272 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9273 0x00000000, 0x000000ff },
9274 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
9275 0x00000000, 0xffffffff },
9276 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9277 0x00000000, 0x000000ff },
9278 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
9279 0x00000000, 0xffffffff },
9280 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
9281 0x00000000, 0xffffffff },
9282 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9283 0x00000000, 0xffffffff },
9284 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9285 0x00000000, 0x000000ff },
9286 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9287 0x00000000, 0xffffffff },
9288 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9289 0x00000000, 0x000000ff },
9290 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
9291 0x00000000, 0xffffffff },
9292 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
9293 0x00000000, 0xffffffff },
9294 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
9295 0x00000000, 0xffffffff },
9296 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
9297 0x00000000, 0xffffffff },
9298 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
9299 0x00000000, 0xffffffff },
9300 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
9301 0xffffffff, 0x00000000 },
9302 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
9303 0xffffffff, 0x00000000 },
9305 /* Buffer Manager Control Registers. */
9306 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
9307 0x00000000, 0x007fff80 },
9308 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
9309 0x00000000, 0x007fffff },
9310 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
9311 0x00000000, 0x0000003f },
9312 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
9313 0x00000000, 0x000001ff },
9314 { BUFMGR_MB_HIGH_WATER, 0x0000,
9315 0x00000000, 0x000001ff },
9316 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
9317 0xffffffff, 0x00000000 },
9318 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
9319 0xffffffff, 0x00000000 },
9321 /* Mailbox Registers */
9322 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
9323 0x00000000, 0x000001ff },
9324 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
9325 0x00000000, 0x000001ff },
9326 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
9327 0x00000000, 0x000007ff },
9328 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
9329 0x00000000, 0x000001ff },
9331 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
9334 is_5705 = is_5750 = 0;
9335 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9336 is_5705 = 1;
9337 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9338 is_5750 = 1;
9341 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
9342 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
9343 continue;
9345 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
9346 continue;
9348 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9349 (reg_tbl[i].flags & TG3_FL_NOT_5788))
9350 continue;
9352 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
9353 continue;
9355 offset = (u32) reg_tbl[i].offset;
9356 read_mask = reg_tbl[i].read_mask;
9357 write_mask = reg_tbl[i].write_mask;
9359 /* Save the original register content */
9360 save_val = tr32(offset);
9362 /* Determine the read-only value. */
9363 read_val = save_val & read_mask;
9365 /* Write zero to the register, then make sure the read-only bits
9366 * are not changed and the read/write bits are all zeros.
9368 tw32(offset, 0);
9370 val = tr32(offset);
9372 /* Test the read-only and read/write bits. */
9373 if (((val & read_mask) != read_val) || (val & write_mask))
9374 goto out;
9376 /* Write ones to all the bits defined by RdMask and WrMask, then
9377 * make sure the read-only bits are not changed and the
9378 * read/write bits are all ones.
9380 tw32(offset, read_mask | write_mask);
9382 val = tr32(offset);
9384 /* Test the read-only bits. */
9385 if ((val & read_mask) != read_val)
9386 goto out;
9388 /* Test the read/write bits. */
9389 if ((val & write_mask) != write_mask)
9390 goto out;
9392 tw32(offset, save_val);
9395 return 0;
9397 out:
9398 if (netif_msg_hw(tp))
9399 printk(KERN_ERR PFX "Register test failed at offset %x\n",
9400 offset);
9401 tw32(offset, save_val);
9402 return -EIO;
9405 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
9407 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
9408 int i;
9409 u32 j;
9411 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
9412 for (j = 0; j < len; j += 4) {
9413 u32 val;
9415 tg3_write_mem(tp, offset + j, test_pattern[i]);
9416 tg3_read_mem(tp, offset + j, &val);
9417 if (val != test_pattern[i])
9418 return -EIO;
9421 return 0;
9424 static int tg3_test_memory(struct tg3 *tp)
9426 static struct mem_entry {
9427 u32 offset;
9428 u32 len;
9429 } mem_tbl_570x[] = {
9430 { 0x00000000, 0x00b50},
9431 { 0x00002000, 0x1c000},
9432 { 0xffffffff, 0x00000}
9433 }, mem_tbl_5705[] = {
9434 { 0x00000100, 0x0000c},
9435 { 0x00000200, 0x00008},
9436 { 0x00004000, 0x00800},
9437 { 0x00006000, 0x01000},
9438 { 0x00008000, 0x02000},
9439 { 0x00010000, 0x0e000},
9440 { 0xffffffff, 0x00000}
9441 }, mem_tbl_5755[] = {
9442 { 0x00000200, 0x00008},
9443 { 0x00004000, 0x00800},
9444 { 0x00006000, 0x00800},
9445 { 0x00008000, 0x02000},
9446 { 0x00010000, 0x0c000},
9447 { 0xffffffff, 0x00000}
9448 }, mem_tbl_5906[] = {
9449 { 0x00000200, 0x00008},
9450 { 0x00004000, 0x00400},
9451 { 0x00006000, 0x00400},
9452 { 0x00008000, 0x01000},
9453 { 0x00010000, 0x01000},
9454 { 0xffffffff, 0x00000}
9456 struct mem_entry *mem_tbl;
9457 int err = 0;
9458 int i;
9460 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
9461 mem_tbl = mem_tbl_5755;
9462 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9463 mem_tbl = mem_tbl_5906;
9464 else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
9465 mem_tbl = mem_tbl_5705;
9466 else
9467 mem_tbl = mem_tbl_570x;
9469 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
9470 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
9471 mem_tbl[i].len)) != 0)
9472 break;
9475 return err;
9478 #define TG3_MAC_LOOPBACK 0
9479 #define TG3_PHY_LOOPBACK 1
9481 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
9483 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
9484 u32 desc_idx;
9485 struct sk_buff *skb, *rx_skb;
9486 u8 *tx_data;
9487 dma_addr_t map;
9488 int num_pkts, tx_len, rx_len, i, err;
9489 struct tg3_rx_buffer_desc *desc;
9491 if (loopback_mode == TG3_MAC_LOOPBACK) {
9492 /* HW errata - mac loopback fails in some cases on 5780.
9493 * Normal traffic and PHY loopback are not affected by
9494 * errata.
9496 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
9497 return 0;
9499 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
9500 MAC_MODE_PORT_INT_LPBACK;
9501 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9502 mac_mode |= MAC_MODE_LINK_POLARITY;
9503 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9504 mac_mode |= MAC_MODE_PORT_MODE_MII;
9505 else
9506 mac_mode |= MAC_MODE_PORT_MODE_GMII;
9507 tw32(MAC_MODE, mac_mode);
9508 } else if (loopback_mode == TG3_PHY_LOOPBACK) {
9509 u32 val;
9511 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9512 u32 phytest;
9514 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
9515 u32 phy;
9517 tg3_writephy(tp, MII_TG3_EPHY_TEST,
9518 phytest | MII_TG3_EPHY_SHADOW_EN);
9519 if (!tg3_readphy(tp, 0x1b, &phy))
9520 tg3_writephy(tp, 0x1b, phy & ~0x20);
9521 tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
9523 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
9524 } else
9525 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
9527 tg3_phy_toggle_automdix(tp, 0);
9529 tg3_writephy(tp, MII_BMCR, val);
9530 udelay(40);
9532 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
9533 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9534 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
9535 mac_mode |= MAC_MODE_PORT_MODE_MII;
9536 } else
9537 mac_mode |= MAC_MODE_PORT_MODE_GMII;
9539 /* reset to prevent losing 1st rx packet intermittently */
9540 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
9541 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9542 udelay(10);
9543 tw32_f(MAC_RX_MODE, tp->rx_mode);
9545 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
9546 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
9547 mac_mode &= ~MAC_MODE_LINK_POLARITY;
9548 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
9549 mac_mode |= MAC_MODE_LINK_POLARITY;
9550 tg3_writephy(tp, MII_TG3_EXT_CTRL,
9551 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
9553 tw32(MAC_MODE, mac_mode);
9555 else
9556 return -EINVAL;
9558 err = -EIO;
9560 tx_len = 1514;
9561 skb = netdev_alloc_skb(tp->dev, tx_len);
9562 if (!skb)
9563 return -ENOMEM;
9565 tx_data = skb_put(skb, tx_len);
9566 memcpy(tx_data, tp->dev->dev_addr, 6);
9567 memset(tx_data + 6, 0x0, 8);
9569 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
9571 for (i = 14; i < tx_len; i++)
9572 tx_data[i] = (u8) (i & 0xff);
9574 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
9576 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9577 HOSTCC_MODE_NOW);
9579 udelay(10);
9581 rx_start_idx = tp->hw_status->idx[0].rx_producer;
9583 num_pkts = 0;
9585 tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
9587 tp->tx_prod++;
9588 num_pkts++;
9590 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
9591 tp->tx_prod);
9592 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
9594 udelay(10);
9596 /* 250 usec to allow enough time on some 10/100 Mbps devices. */
9597 for (i = 0; i < 25; i++) {
9598 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9599 HOSTCC_MODE_NOW);
9601 udelay(10);
9603 tx_idx = tp->hw_status->idx[0].tx_consumer;
9604 rx_idx = tp->hw_status->idx[0].rx_producer;
9605 if ((tx_idx == tp->tx_prod) &&
9606 (rx_idx == (rx_start_idx + num_pkts)))
9607 break;
9610 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
9611 dev_kfree_skb(skb);
9613 if (tx_idx != tp->tx_prod)
9614 goto out;
9616 if (rx_idx != rx_start_idx + num_pkts)
9617 goto out;
9619 desc = &tp->rx_rcb[rx_start_idx];
9620 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
9621 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
9622 if (opaque_key != RXD_OPAQUE_RING_STD)
9623 goto out;
9625 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
9626 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
9627 goto out;
9629 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
9630 if (rx_len != tx_len)
9631 goto out;
9633 rx_skb = tp->rx_std_buffers[desc_idx].skb;
9635 map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
9636 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
9638 for (i = 14; i < tx_len; i++) {
9639 if (*(rx_skb->data + i) != (u8) (i & 0xff))
9640 goto out;
9642 err = 0;
9644 /* tg3_free_rings will unmap and free the rx_skb */
9645 out:
9646 return err;
9649 #define TG3_MAC_LOOPBACK_FAILED 1
9650 #define TG3_PHY_LOOPBACK_FAILED 2
9651 #define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
9652 TG3_PHY_LOOPBACK_FAILED)
9654 static int tg3_test_loopback(struct tg3 *tp)
9656 int err = 0;
9657 u32 cpmuctrl = 0;
9659 if (!netif_running(tp->dev))
9660 return TG3_LOOPBACK_FAILED;
9662 err = tg3_reset_hw(tp, 1);
9663 if (err)
9664 return TG3_LOOPBACK_FAILED;
9666 /* Turn off gphy autopowerdown. */
9667 if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
9668 tg3_phy_toggle_apd(tp, false);
9670 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
9671 int i;
9672 u32 status;
9674 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
9676 /* Wait for up to 40 microseconds to acquire lock. */
9677 for (i = 0; i < 4; i++) {
9678 status = tr32(TG3_CPMU_MUTEX_GNT);
9679 if (status == CPMU_MUTEX_GNT_DRIVER)
9680 break;
9681 udelay(10);
9684 if (status != CPMU_MUTEX_GNT_DRIVER)
9685 return TG3_LOOPBACK_FAILED;
9687 /* Turn off link-based power management. */
9688 cpmuctrl = tr32(TG3_CPMU_CTRL);
9689 tw32(TG3_CPMU_CTRL,
9690 cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
9691 CPMU_CTRL_LINK_AWARE_MODE));
9694 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
9695 err |= TG3_MAC_LOOPBACK_FAILED;
9697 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
9698 tw32(TG3_CPMU_CTRL, cpmuctrl);
9700 /* Release the mutex */
9701 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
9704 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
9705 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
9706 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
9707 err |= TG3_PHY_LOOPBACK_FAILED;
9710 /* Re-enable gphy autopowerdown. */
9711 if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
9712 tg3_phy_toggle_apd(tp, true);
9714 return err;
9717 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
9718 u64 *data)
9720 struct tg3 *tp = netdev_priv(dev);
9722 if (tp->link_config.phy_is_low_power)
9723 tg3_set_power_state(tp, PCI_D0);
9725 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
9727 if (tg3_test_nvram(tp) != 0) {
9728 etest->flags |= ETH_TEST_FL_FAILED;
9729 data[0] = 1;
9731 if (tg3_test_link(tp) != 0) {
9732 etest->flags |= ETH_TEST_FL_FAILED;
9733 data[1] = 1;
9735 if (etest->flags & ETH_TEST_FL_OFFLINE) {
9736 int err, err2 = 0, irq_sync = 0;
9738 if (netif_running(dev)) {
9739 tg3_phy_stop(tp);
9740 tg3_netif_stop(tp);
9741 irq_sync = 1;
9744 tg3_full_lock(tp, irq_sync);
9746 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
9747 err = tg3_nvram_lock(tp);
9748 tg3_halt_cpu(tp, RX_CPU_BASE);
9749 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9750 tg3_halt_cpu(tp, TX_CPU_BASE);
9751 if (!err)
9752 tg3_nvram_unlock(tp);
9754 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
9755 tg3_phy_reset(tp);
9757 if (tg3_test_registers(tp) != 0) {
9758 etest->flags |= ETH_TEST_FL_FAILED;
9759 data[2] = 1;
9761 if (tg3_test_memory(tp) != 0) {
9762 etest->flags |= ETH_TEST_FL_FAILED;
9763 data[3] = 1;
9765 if ((data[4] = tg3_test_loopback(tp)) != 0)
9766 etest->flags |= ETH_TEST_FL_FAILED;
9768 tg3_full_unlock(tp);
9770 if (tg3_test_interrupt(tp) != 0) {
9771 etest->flags |= ETH_TEST_FL_FAILED;
9772 data[5] = 1;
9775 tg3_full_lock(tp, 0);
9777 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9778 if (netif_running(dev)) {
9779 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
9780 err2 = tg3_restart_hw(tp, 1);
9781 if (!err2)
9782 tg3_netif_start(tp);
9785 tg3_full_unlock(tp);
9787 if (irq_sync && !err2)
9788 tg3_phy_start(tp);
9790 if (tp->link_config.phy_is_low_power)
9791 tg3_set_power_state(tp, PCI_D3hot);
9795 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9797 struct mii_ioctl_data *data = if_mii(ifr);
9798 struct tg3 *tp = netdev_priv(dev);
9799 int err;
9801 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9802 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9803 return -EAGAIN;
9804 return phy_mii_ioctl(tp->mdio_bus->phy_map[PHY_ADDR], data, cmd);
9807 switch(cmd) {
9808 case SIOCGMIIPHY:
9809 data->phy_id = PHY_ADDR;
9811 /* fallthru */
9812 case SIOCGMIIREG: {
9813 u32 mii_regval;
9815 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9816 break; /* We have no PHY */
9818 if (tp->link_config.phy_is_low_power)
9819 return -EAGAIN;
9821 spin_lock_bh(&tp->lock);
9822 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
9823 spin_unlock_bh(&tp->lock);
9825 data->val_out = mii_regval;
9827 return err;
9830 case SIOCSMIIREG:
9831 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9832 break; /* We have no PHY */
9834 if (!capable(CAP_NET_ADMIN))
9835 return -EPERM;
9837 if (tp->link_config.phy_is_low_power)
9838 return -EAGAIN;
9840 spin_lock_bh(&tp->lock);
9841 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
9842 spin_unlock_bh(&tp->lock);
9844 return err;
9846 default:
9847 /* do nothing */
9848 break;
9850 return -EOPNOTSUPP;
9853 #if TG3_VLAN_TAG_USED
9854 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
9856 struct tg3 *tp = netdev_priv(dev);
9858 if (netif_running(dev))
9859 tg3_netif_stop(tp);
9861 tg3_full_lock(tp, 0);
9863 tp->vlgrp = grp;
9865 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
9866 __tg3_set_rx_mode(dev);
9868 if (netif_running(dev))
9869 tg3_netif_start(tp);
9871 tg3_full_unlock(tp);
9873 #endif
9875 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9877 struct tg3 *tp = netdev_priv(dev);
9879 memcpy(ec, &tp->coal, sizeof(*ec));
9880 return 0;
9883 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9885 struct tg3 *tp = netdev_priv(dev);
9886 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
9887 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
9889 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
9890 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
9891 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
9892 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
9893 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
9896 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
9897 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
9898 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
9899 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
9900 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
9901 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
9902 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
9903 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
9904 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
9905 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
9906 return -EINVAL;
9908 /* No rx interrupts will be generated if both are zero */
9909 if ((ec->rx_coalesce_usecs == 0) &&
9910 (ec->rx_max_coalesced_frames == 0))
9911 return -EINVAL;
9913 /* No tx interrupts will be generated if both are zero */
9914 if ((ec->tx_coalesce_usecs == 0) &&
9915 (ec->tx_max_coalesced_frames == 0))
9916 return -EINVAL;
9918 /* Only copy relevant parameters, ignore all others. */
9919 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
9920 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
9921 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
9922 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
9923 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
9924 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
9925 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
9926 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
9927 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
9929 if (netif_running(dev)) {
9930 tg3_full_lock(tp, 0);
9931 __tg3_set_coalesce(tp, &tp->coal);
9932 tg3_full_unlock(tp);
9934 return 0;
9937 static const struct ethtool_ops tg3_ethtool_ops = {
9938 .get_settings = tg3_get_settings,
9939 .set_settings = tg3_set_settings,
9940 .get_drvinfo = tg3_get_drvinfo,
9941 .get_regs_len = tg3_get_regs_len,
9942 .get_regs = tg3_get_regs,
9943 .get_wol = tg3_get_wol,
9944 .set_wol = tg3_set_wol,
9945 .get_msglevel = tg3_get_msglevel,
9946 .set_msglevel = tg3_set_msglevel,
9947 .nway_reset = tg3_nway_reset,
9948 .get_link = ethtool_op_get_link,
9949 .get_eeprom_len = tg3_get_eeprom_len,
9950 .get_eeprom = tg3_get_eeprom,
9951 .set_eeprom = tg3_set_eeprom,
9952 .get_ringparam = tg3_get_ringparam,
9953 .set_ringparam = tg3_set_ringparam,
9954 .get_pauseparam = tg3_get_pauseparam,
9955 .set_pauseparam = tg3_set_pauseparam,
9956 .get_rx_csum = tg3_get_rx_csum,
9957 .set_rx_csum = tg3_set_rx_csum,
9958 .set_tx_csum = tg3_set_tx_csum,
9959 .set_sg = ethtool_op_set_sg,
9960 .set_tso = tg3_set_tso,
9961 .self_test = tg3_self_test,
9962 .get_strings = tg3_get_strings,
9963 .phys_id = tg3_phys_id,
9964 .get_ethtool_stats = tg3_get_ethtool_stats,
9965 .get_coalesce = tg3_get_coalesce,
9966 .set_coalesce = tg3_set_coalesce,
9967 .get_sset_count = tg3_get_sset_count,
9970 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
9972 u32 cursize, val, magic;
9974 tp->nvram_size = EEPROM_CHIP_SIZE;
9976 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
9977 return;
9979 if ((magic != TG3_EEPROM_MAGIC) &&
9980 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
9981 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
9982 return;
9985 * Size the chip by reading offsets at increasing powers of two.
9986 * When we encounter our validation signature, we know the addressing
9987 * has wrapped around, and thus have our chip size.
9989 cursize = 0x10;
9991 while (cursize < tp->nvram_size) {
9992 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
9993 return;
9995 if (val == magic)
9996 break;
9998 cursize <<= 1;
10001 tp->nvram_size = cursize;
10004 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
10006 u32 val;
10008 if (tg3_nvram_read_swab(tp, 0, &val) != 0)
10009 return;
10011 /* Selfboot format */
10012 if (val != TG3_EEPROM_MAGIC) {
10013 tg3_get_eeprom_size(tp);
10014 return;
10017 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
10018 if (val != 0) {
10019 tp->nvram_size = (val >> 16) * 1024;
10020 return;
10023 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10026 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
10028 u32 nvcfg1;
10030 nvcfg1 = tr32(NVRAM_CFG1);
10031 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
10032 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10034 else {
10035 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10036 tw32(NVRAM_CFG1, nvcfg1);
10039 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
10040 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
10041 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
10042 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
10043 tp->nvram_jedecnum = JEDEC_ATMEL;
10044 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10045 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10046 break;
10047 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
10048 tp->nvram_jedecnum = JEDEC_ATMEL;
10049 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
10050 break;
10051 case FLASH_VENDOR_ATMEL_EEPROM:
10052 tp->nvram_jedecnum = JEDEC_ATMEL;
10053 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10054 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10055 break;
10056 case FLASH_VENDOR_ST:
10057 tp->nvram_jedecnum = JEDEC_ST;
10058 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
10059 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10060 break;
10061 case FLASH_VENDOR_SAIFUN:
10062 tp->nvram_jedecnum = JEDEC_SAIFUN;
10063 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
10064 break;
10065 case FLASH_VENDOR_SST_SMALL:
10066 case FLASH_VENDOR_SST_LARGE:
10067 tp->nvram_jedecnum = JEDEC_SST;
10068 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
10069 break;
10072 else {
10073 tp->nvram_jedecnum = JEDEC_ATMEL;
10074 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10075 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10079 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
10081 u32 nvcfg1;
10083 nvcfg1 = tr32(NVRAM_CFG1);
10085 /* NVRAM protection for TPM */
10086 if (nvcfg1 & (1 << 27))
10087 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10089 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10090 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
10091 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
10092 tp->nvram_jedecnum = JEDEC_ATMEL;
10093 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10094 break;
10095 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10096 tp->nvram_jedecnum = JEDEC_ATMEL;
10097 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10098 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10099 break;
10100 case FLASH_5752VENDOR_ST_M45PE10:
10101 case FLASH_5752VENDOR_ST_M45PE20:
10102 case FLASH_5752VENDOR_ST_M45PE40:
10103 tp->nvram_jedecnum = JEDEC_ST;
10104 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10105 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10106 break;
10109 if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
10110 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
10111 case FLASH_5752PAGE_SIZE_256:
10112 tp->nvram_pagesize = 256;
10113 break;
10114 case FLASH_5752PAGE_SIZE_512:
10115 tp->nvram_pagesize = 512;
10116 break;
10117 case FLASH_5752PAGE_SIZE_1K:
10118 tp->nvram_pagesize = 1024;
10119 break;
10120 case FLASH_5752PAGE_SIZE_2K:
10121 tp->nvram_pagesize = 2048;
10122 break;
10123 case FLASH_5752PAGE_SIZE_4K:
10124 tp->nvram_pagesize = 4096;
10125 break;
10126 case FLASH_5752PAGE_SIZE_264:
10127 tp->nvram_pagesize = 264;
10128 break;
10131 else {
10132 /* For eeprom, set pagesize to maximum eeprom size */
10133 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10135 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10136 tw32(NVRAM_CFG1, nvcfg1);
10140 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
10142 u32 nvcfg1, protect = 0;
10144 nvcfg1 = tr32(NVRAM_CFG1);
10146 /* NVRAM protection for TPM */
10147 if (nvcfg1 & (1 << 27)) {
10148 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10149 protect = 1;
10152 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10153 switch (nvcfg1) {
10154 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10155 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10156 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10157 case FLASH_5755VENDOR_ATMEL_FLASH_5:
10158 tp->nvram_jedecnum = JEDEC_ATMEL;
10159 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10160 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10161 tp->nvram_pagesize = 264;
10162 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
10163 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
10164 tp->nvram_size = (protect ? 0x3e200 :
10165 TG3_NVRAM_SIZE_512KB);
10166 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
10167 tp->nvram_size = (protect ? 0x1f200 :
10168 TG3_NVRAM_SIZE_256KB);
10169 else
10170 tp->nvram_size = (protect ? 0x1f200 :
10171 TG3_NVRAM_SIZE_128KB);
10172 break;
10173 case FLASH_5752VENDOR_ST_M45PE10:
10174 case FLASH_5752VENDOR_ST_M45PE20:
10175 case FLASH_5752VENDOR_ST_M45PE40:
10176 tp->nvram_jedecnum = JEDEC_ST;
10177 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10178 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10179 tp->nvram_pagesize = 256;
10180 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
10181 tp->nvram_size = (protect ?
10182 TG3_NVRAM_SIZE_64KB :
10183 TG3_NVRAM_SIZE_128KB);
10184 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
10185 tp->nvram_size = (protect ?
10186 TG3_NVRAM_SIZE_64KB :
10187 TG3_NVRAM_SIZE_256KB);
10188 else
10189 tp->nvram_size = (protect ?
10190 TG3_NVRAM_SIZE_128KB :
10191 TG3_NVRAM_SIZE_512KB);
10192 break;
10196 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
10198 u32 nvcfg1;
10200 nvcfg1 = tr32(NVRAM_CFG1);
10202 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10203 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
10204 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
10205 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
10206 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
10207 tp->nvram_jedecnum = JEDEC_ATMEL;
10208 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10209 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10211 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10212 tw32(NVRAM_CFG1, nvcfg1);
10213 break;
10214 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10215 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10216 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10217 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10218 tp->nvram_jedecnum = JEDEC_ATMEL;
10219 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10220 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10221 tp->nvram_pagesize = 264;
10222 break;
10223 case FLASH_5752VENDOR_ST_M45PE10:
10224 case FLASH_5752VENDOR_ST_M45PE20:
10225 case FLASH_5752VENDOR_ST_M45PE40:
10226 tp->nvram_jedecnum = JEDEC_ST;
10227 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10228 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10229 tp->nvram_pagesize = 256;
10230 break;
10234 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
10236 u32 nvcfg1, protect = 0;
10238 nvcfg1 = tr32(NVRAM_CFG1);
10240 /* NVRAM protection for TPM */
10241 if (nvcfg1 & (1 << 27)) {
10242 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10243 protect = 1;
10246 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10247 switch (nvcfg1) {
10248 case FLASH_5761VENDOR_ATMEL_ADB021D:
10249 case FLASH_5761VENDOR_ATMEL_ADB041D:
10250 case FLASH_5761VENDOR_ATMEL_ADB081D:
10251 case FLASH_5761VENDOR_ATMEL_ADB161D:
10252 case FLASH_5761VENDOR_ATMEL_MDB021D:
10253 case FLASH_5761VENDOR_ATMEL_MDB041D:
10254 case FLASH_5761VENDOR_ATMEL_MDB081D:
10255 case FLASH_5761VENDOR_ATMEL_MDB161D:
10256 tp->nvram_jedecnum = JEDEC_ATMEL;
10257 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10258 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10259 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10260 tp->nvram_pagesize = 256;
10261 break;
10262 case FLASH_5761VENDOR_ST_A_M45PE20:
10263 case FLASH_5761VENDOR_ST_A_M45PE40:
10264 case FLASH_5761VENDOR_ST_A_M45PE80:
10265 case FLASH_5761VENDOR_ST_A_M45PE16:
10266 case FLASH_5761VENDOR_ST_M_M45PE20:
10267 case FLASH_5761VENDOR_ST_M_M45PE40:
10268 case FLASH_5761VENDOR_ST_M_M45PE80:
10269 case FLASH_5761VENDOR_ST_M_M45PE16:
10270 tp->nvram_jedecnum = JEDEC_ST;
10271 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10272 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10273 tp->nvram_pagesize = 256;
10274 break;
10277 if (protect) {
10278 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
10279 } else {
10280 switch (nvcfg1) {
10281 case FLASH_5761VENDOR_ATMEL_ADB161D:
10282 case FLASH_5761VENDOR_ATMEL_MDB161D:
10283 case FLASH_5761VENDOR_ST_A_M45PE16:
10284 case FLASH_5761VENDOR_ST_M_M45PE16:
10285 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
10286 break;
10287 case FLASH_5761VENDOR_ATMEL_ADB081D:
10288 case FLASH_5761VENDOR_ATMEL_MDB081D:
10289 case FLASH_5761VENDOR_ST_A_M45PE80:
10290 case FLASH_5761VENDOR_ST_M_M45PE80:
10291 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
10292 break;
10293 case FLASH_5761VENDOR_ATMEL_ADB041D:
10294 case FLASH_5761VENDOR_ATMEL_MDB041D:
10295 case FLASH_5761VENDOR_ST_A_M45PE40:
10296 case FLASH_5761VENDOR_ST_M_M45PE40:
10297 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10298 break;
10299 case FLASH_5761VENDOR_ATMEL_ADB021D:
10300 case FLASH_5761VENDOR_ATMEL_MDB021D:
10301 case FLASH_5761VENDOR_ST_A_M45PE20:
10302 case FLASH_5761VENDOR_ST_M_M45PE20:
10303 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
10304 break;
10309 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
10311 tp->nvram_jedecnum = JEDEC_ATMEL;
10312 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10313 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10316 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
10318 u32 nvcfg1;
10320 nvcfg1 = tr32(NVRAM_CFG1);
10322 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10323 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
10324 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
10325 tp->nvram_jedecnum = JEDEC_ATMEL;
10326 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10327 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10329 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10330 tw32(NVRAM_CFG1, nvcfg1);
10331 return;
10332 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10333 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
10334 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
10335 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
10336 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
10337 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
10338 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
10339 tp->nvram_jedecnum = JEDEC_ATMEL;
10340 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10341 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10343 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10344 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10345 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
10346 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
10347 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
10348 break;
10349 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
10350 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
10351 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
10352 break;
10353 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
10354 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
10355 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10356 break;
10358 break;
10359 case FLASH_5752VENDOR_ST_M45PE10:
10360 case FLASH_5752VENDOR_ST_M45PE20:
10361 case FLASH_5752VENDOR_ST_M45PE40:
10362 tp->nvram_jedecnum = JEDEC_ST;
10363 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10364 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10366 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10367 case FLASH_5752VENDOR_ST_M45PE10:
10368 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
10369 break;
10370 case FLASH_5752VENDOR_ST_M45PE20:
10371 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
10372 break;
10373 case FLASH_5752VENDOR_ST_M45PE40:
10374 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10375 break;
10377 break;
10378 default:
10379 return;
10382 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
10383 case FLASH_5752PAGE_SIZE_256:
10384 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10385 tp->nvram_pagesize = 256;
10386 break;
10387 case FLASH_5752PAGE_SIZE_512:
10388 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10389 tp->nvram_pagesize = 512;
10390 break;
10391 case FLASH_5752PAGE_SIZE_1K:
10392 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10393 tp->nvram_pagesize = 1024;
10394 break;
10395 case FLASH_5752PAGE_SIZE_2K:
10396 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10397 tp->nvram_pagesize = 2048;
10398 break;
10399 case FLASH_5752PAGE_SIZE_4K:
10400 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10401 tp->nvram_pagesize = 4096;
10402 break;
10403 case FLASH_5752PAGE_SIZE_264:
10404 tp->nvram_pagesize = 264;
10405 break;
10406 case FLASH_5752PAGE_SIZE_528:
10407 tp->nvram_pagesize = 528;
10408 break;
10412 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
10413 static void __devinit tg3_nvram_init(struct tg3 *tp)
10415 tw32_f(GRC_EEPROM_ADDR,
10416 (EEPROM_ADDR_FSM_RESET |
10417 (EEPROM_DEFAULT_CLOCK_PERIOD <<
10418 EEPROM_ADDR_CLKPERD_SHIFT)));
10420 msleep(1);
10422 /* Enable seeprom accesses. */
10423 tw32_f(GRC_LOCAL_CTRL,
10424 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
10425 udelay(100);
10427 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10428 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
10429 tp->tg3_flags |= TG3_FLAG_NVRAM;
10431 if (tg3_nvram_lock(tp)) {
10432 printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
10433 "tg3_nvram_init failed.\n", tp->dev->name);
10434 return;
10436 tg3_enable_nvram_access(tp);
10438 tp->nvram_size = 0;
10440 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10441 tg3_get_5752_nvram_info(tp);
10442 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10443 tg3_get_5755_nvram_info(tp);
10444 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10445 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
10446 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
10447 tg3_get_5787_nvram_info(tp);
10448 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
10449 tg3_get_5761_nvram_info(tp);
10450 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10451 tg3_get_5906_nvram_info(tp);
10452 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
10453 tg3_get_57780_nvram_info(tp);
10454 else
10455 tg3_get_nvram_info(tp);
10457 if (tp->nvram_size == 0)
10458 tg3_get_nvram_size(tp);
10460 tg3_disable_nvram_access(tp);
10461 tg3_nvram_unlock(tp);
10463 } else {
10464 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
10466 tg3_get_eeprom_size(tp);
10470 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
10471 u32 offset, u32 *val)
10473 u32 tmp;
10474 int i;
10476 if (offset > EEPROM_ADDR_ADDR_MASK ||
10477 (offset % 4) != 0)
10478 return -EINVAL;
10480 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
10481 EEPROM_ADDR_DEVID_MASK |
10482 EEPROM_ADDR_READ);
10483 tw32(GRC_EEPROM_ADDR,
10484 tmp |
10485 (0 << EEPROM_ADDR_DEVID_SHIFT) |
10486 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
10487 EEPROM_ADDR_ADDR_MASK) |
10488 EEPROM_ADDR_READ | EEPROM_ADDR_START);
10490 for (i = 0; i < 1000; i++) {
10491 tmp = tr32(GRC_EEPROM_ADDR);
10493 if (tmp & EEPROM_ADDR_COMPLETE)
10494 break;
10495 msleep(1);
10497 if (!(tmp & EEPROM_ADDR_COMPLETE))
10498 return -EBUSY;
10500 *val = tr32(GRC_EEPROM_DATA);
10501 return 0;
10504 #define NVRAM_CMD_TIMEOUT 10000
10506 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
10508 int i;
10510 tw32(NVRAM_CMD, nvram_cmd);
10511 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
10512 udelay(10);
10513 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
10514 udelay(10);
10515 break;
10518 if (i == NVRAM_CMD_TIMEOUT) {
10519 return -EBUSY;
10521 return 0;
10524 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
10526 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10527 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10528 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10529 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10530 (tp->nvram_jedecnum == JEDEC_ATMEL))
10532 addr = ((addr / tp->nvram_pagesize) <<
10533 ATMEL_AT45DB0X1B_PAGE_POS) +
10534 (addr % tp->nvram_pagesize);
10536 return addr;
10539 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
10541 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10542 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10543 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10544 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10545 (tp->nvram_jedecnum == JEDEC_ATMEL))
10547 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
10548 tp->nvram_pagesize) +
10549 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
10551 return addr;
10554 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
10556 int ret;
10558 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
10559 return tg3_nvram_read_using_eeprom(tp, offset, val);
10561 offset = tg3_nvram_phys_addr(tp, offset);
10563 if (offset > NVRAM_ADDR_MSK)
10564 return -EINVAL;
10566 ret = tg3_nvram_lock(tp);
10567 if (ret)
10568 return ret;
10570 tg3_enable_nvram_access(tp);
10572 tw32(NVRAM_ADDR, offset);
10573 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
10574 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
10576 if (ret == 0)
10577 *val = swab32(tr32(NVRAM_RDDATA));
10579 tg3_disable_nvram_access(tp);
10581 tg3_nvram_unlock(tp);
10583 return ret;
10586 static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val)
10588 u32 v;
10589 int res = tg3_nvram_read(tp, offset, &v);
10590 if (!res)
10591 *val = cpu_to_le32(v);
10592 return res;
10595 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
10597 int err;
10598 u32 tmp;
10600 err = tg3_nvram_read(tp, offset, &tmp);
10601 *val = swab32(tmp);
10602 return err;
10605 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
10606 u32 offset, u32 len, u8 *buf)
10608 int i, j, rc = 0;
10609 u32 val;
10611 for (i = 0; i < len; i += 4) {
10612 u32 addr;
10613 __le32 data;
10615 addr = offset + i;
10617 memcpy(&data, buf + i, 4);
10619 tw32(GRC_EEPROM_DATA, le32_to_cpu(data));
10621 val = tr32(GRC_EEPROM_ADDR);
10622 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
10624 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
10625 EEPROM_ADDR_READ);
10626 tw32(GRC_EEPROM_ADDR, val |
10627 (0 << EEPROM_ADDR_DEVID_SHIFT) |
10628 (addr & EEPROM_ADDR_ADDR_MASK) |
10629 EEPROM_ADDR_START |
10630 EEPROM_ADDR_WRITE);
10632 for (j = 0; j < 1000; j++) {
10633 val = tr32(GRC_EEPROM_ADDR);
10635 if (val & EEPROM_ADDR_COMPLETE)
10636 break;
10637 msleep(1);
10639 if (!(val & EEPROM_ADDR_COMPLETE)) {
10640 rc = -EBUSY;
10641 break;
10645 return rc;
10648 /* offset and length are dword aligned */
10649 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
10650 u8 *buf)
10652 int ret = 0;
10653 u32 pagesize = tp->nvram_pagesize;
10654 u32 pagemask = pagesize - 1;
10655 u32 nvram_cmd;
10656 u8 *tmp;
10658 tmp = kmalloc(pagesize, GFP_KERNEL);
10659 if (tmp == NULL)
10660 return -ENOMEM;
10662 while (len) {
10663 int j;
10664 u32 phy_addr, page_off, size;
10666 phy_addr = offset & ~pagemask;
10668 for (j = 0; j < pagesize; j += 4) {
10669 if ((ret = tg3_nvram_read_le(tp, phy_addr + j,
10670 (__le32 *) (tmp + j))))
10671 break;
10673 if (ret)
10674 break;
10676 page_off = offset & pagemask;
10677 size = pagesize;
10678 if (len < size)
10679 size = len;
10681 len -= size;
10683 memcpy(tmp + page_off, buf, size);
10685 offset = offset + (pagesize - page_off);
10687 tg3_enable_nvram_access(tp);
10690 * Before we can erase the flash page, we need
10691 * to issue a special "write enable" command.
10693 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10695 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10696 break;
10698 /* Erase the target page */
10699 tw32(NVRAM_ADDR, phy_addr);
10701 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
10702 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
10704 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10705 break;
10707 /* Issue another write enable to start the write. */
10708 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10710 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10711 break;
10713 for (j = 0; j < pagesize; j += 4) {
10714 __be32 data;
10716 data = *((__be32 *) (tmp + j));
10717 /* swab32(le32_to_cpu(data)), actually */
10718 tw32(NVRAM_WRDATA, be32_to_cpu(data));
10720 tw32(NVRAM_ADDR, phy_addr + j);
10722 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
10723 NVRAM_CMD_WR;
10725 if (j == 0)
10726 nvram_cmd |= NVRAM_CMD_FIRST;
10727 else if (j == (pagesize - 4))
10728 nvram_cmd |= NVRAM_CMD_LAST;
10730 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
10731 break;
10733 if (ret)
10734 break;
10737 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10738 tg3_nvram_exec_cmd(tp, nvram_cmd);
10740 kfree(tmp);
10742 return ret;
10745 /* offset and length are dword aligned */
10746 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
10747 u8 *buf)
10749 int i, ret = 0;
10751 for (i = 0; i < len; i += 4, offset += 4) {
10752 u32 page_off, phy_addr, nvram_cmd;
10753 __be32 data;
10755 memcpy(&data, buf + i, 4);
10756 tw32(NVRAM_WRDATA, be32_to_cpu(data));
10758 page_off = offset % tp->nvram_pagesize;
10760 phy_addr = tg3_nvram_phys_addr(tp, offset);
10762 tw32(NVRAM_ADDR, phy_addr);
10764 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
10766 if ((page_off == 0) || (i == 0))
10767 nvram_cmd |= NVRAM_CMD_FIRST;
10768 if (page_off == (tp->nvram_pagesize - 4))
10769 nvram_cmd |= NVRAM_CMD_LAST;
10771 if (i == (len - 4))
10772 nvram_cmd |= NVRAM_CMD_LAST;
10774 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
10775 !(tp->tg3_flags3 & TG3_FLG3_5755_PLUS) &&
10776 (tp->nvram_jedecnum == JEDEC_ST) &&
10777 (nvram_cmd & NVRAM_CMD_FIRST)) {
10779 if ((ret = tg3_nvram_exec_cmd(tp,
10780 NVRAM_CMD_WREN | NVRAM_CMD_GO |
10781 NVRAM_CMD_DONE)))
10783 break;
10785 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
10786 /* We always do complete word writes to eeprom. */
10787 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
10790 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
10791 break;
10793 return ret;
10796 /* offset and length are dword aligned */
10797 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
10799 int ret;
10801 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
10802 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
10803 ~GRC_LCLCTRL_GPIO_OUTPUT1);
10804 udelay(40);
10807 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
10808 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
10810 else {
10811 u32 grc_mode;
10813 ret = tg3_nvram_lock(tp);
10814 if (ret)
10815 return ret;
10817 tg3_enable_nvram_access(tp);
10818 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
10819 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
10820 tw32(NVRAM_WRITE1, 0x406);
10822 grc_mode = tr32(GRC_MODE);
10823 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
10825 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
10826 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
10828 ret = tg3_nvram_write_block_buffered(tp, offset, len,
10829 buf);
10831 else {
10832 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
10833 buf);
10836 grc_mode = tr32(GRC_MODE);
10837 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
10839 tg3_disable_nvram_access(tp);
10840 tg3_nvram_unlock(tp);
10843 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
10844 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10845 udelay(40);
10848 return ret;
10851 struct subsys_tbl_ent {
10852 u16 subsys_vendor, subsys_devid;
10853 u32 phy_id;
10856 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
10857 /* Broadcom boards. */
10858 { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
10859 { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
10860 { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
10861 { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 }, /* BCM95700A9 */
10862 { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
10863 { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
10864 { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 }, /* BCM95701A7 */
10865 { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
10866 { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
10867 { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
10868 { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
10870 /* 3com boards. */
10871 { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
10872 { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
10873 { PCI_VENDOR_ID_3COM, 0x1004, 0 }, /* 3C996SX */
10874 { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
10875 { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
10877 /* DELL boards. */
10878 { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
10879 { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
10880 { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
10881 { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
10883 /* Compaq boards. */
10884 { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
10885 { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
10886 { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 }, /* CHANGELING */
10887 { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
10888 { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
10890 /* IBM boards. */
10891 { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
10894 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
10896 int i;
10898 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
10899 if ((subsys_id_to_phy_id[i].subsys_vendor ==
10900 tp->pdev->subsystem_vendor) &&
10901 (subsys_id_to_phy_id[i].subsys_devid ==
10902 tp->pdev->subsystem_device))
10903 return &subsys_id_to_phy_id[i];
10905 return NULL;
10908 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
10910 u32 val;
10911 u16 pmcsr;
10913 /* On some early chips the SRAM cannot be accessed in D3hot state,
10914 * so need make sure we're in D0.
10916 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
10917 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10918 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
10919 msleep(1);
10921 /* Make sure register accesses (indirect or otherwise)
10922 * will function correctly.
10924 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10925 tp->misc_host_ctrl);
10927 /* The memory arbiter has to be enabled in order for SRAM accesses
10928 * to succeed. Normally on powerup the tg3 chip firmware will make
10929 * sure it is enabled, but other entities such as system netboot
10930 * code might disable it.
10932 val = tr32(MEMARB_MODE);
10933 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
10935 tp->phy_id = PHY_ID_INVALID;
10936 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10938 /* Assume an onboard device and WOL capable by default. */
10939 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
10941 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10942 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
10943 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10944 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
10946 val = tr32(VCPU_CFGSHDW);
10947 if (val & VCPU_CFGSHDW_ASPM_DBNC)
10948 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
10949 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
10950 (val & VCPU_CFGSHDW_WOL_MAGPKT))
10951 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
10952 goto done;
10955 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
10956 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
10957 u32 nic_cfg, led_cfg;
10958 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
10959 int eeprom_phy_serdes = 0;
10961 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
10962 tp->nic_sram_data_cfg = nic_cfg;
10964 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
10965 ver >>= NIC_SRAM_DATA_VER_SHIFT;
10966 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
10967 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
10968 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
10969 (ver > 0) && (ver < 0x100))
10970 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
10972 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
10973 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
10975 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
10976 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
10977 eeprom_phy_serdes = 1;
10979 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
10980 if (nic_phy_id != 0) {
10981 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
10982 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
10984 eeprom_phy_id = (id1 >> 16) << 10;
10985 eeprom_phy_id |= (id2 & 0xfc00) << 16;
10986 eeprom_phy_id |= (id2 & 0x03ff) << 0;
10987 } else
10988 eeprom_phy_id = 0;
10990 tp->phy_id = eeprom_phy_id;
10991 if (eeprom_phy_serdes) {
10992 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
10993 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
10994 else
10995 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10998 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10999 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
11000 SHASTA_EXT_LED_MODE_MASK);
11001 else
11002 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
11004 switch (led_cfg) {
11005 default:
11006 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
11007 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11008 break;
11010 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
11011 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11012 break;
11014 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
11015 tp->led_ctrl = LED_CTRL_MODE_MAC;
11017 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
11018 * read on some older 5700/5701 bootcode.
11020 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
11021 ASIC_REV_5700 ||
11022 GET_ASIC_REV(tp->pci_chip_rev_id) ==
11023 ASIC_REV_5701)
11024 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11026 break;
11028 case SHASTA_EXT_LED_SHARED:
11029 tp->led_ctrl = LED_CTRL_MODE_SHARED;
11030 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
11031 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
11032 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11033 LED_CTRL_MODE_PHY_2);
11034 break;
11036 case SHASTA_EXT_LED_MAC:
11037 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
11038 break;
11040 case SHASTA_EXT_LED_COMBO:
11041 tp->led_ctrl = LED_CTRL_MODE_COMBO;
11042 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
11043 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11044 LED_CTRL_MODE_PHY_2);
11045 break;
11049 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11050 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
11051 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
11052 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11054 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
11055 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11057 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
11058 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
11059 if ((tp->pdev->subsystem_vendor ==
11060 PCI_VENDOR_ID_ARIMA) &&
11061 (tp->pdev->subsystem_device == 0x205a ||
11062 tp->pdev->subsystem_device == 0x2063))
11063 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11064 } else {
11065 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11066 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11069 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
11070 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
11071 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
11072 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
11075 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
11076 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
11077 tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
11079 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
11080 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
11081 tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
11083 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
11084 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE))
11085 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
11087 if (cfg2 & (1 << 17))
11088 tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
11090 /* serdes signal pre-emphasis in register 0x590 set by */
11091 /* bootcode if bit 18 is set */
11092 if (cfg2 & (1 << 18))
11093 tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
11095 if (((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
11096 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
11097 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
11098 tp->tg3_flags3 |= TG3_FLG3_PHY_ENABLE_APD;
11100 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11101 u32 cfg3;
11103 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
11104 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
11105 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
11108 if (cfg4 & NIC_SRAM_RGMII_STD_IBND_DISABLE)
11109 tp->tg3_flags3 |= TG3_FLG3_RGMII_STD_IBND_DISABLE;
11110 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
11111 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN;
11112 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
11113 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN;
11115 done:
11116 device_init_wakeup(&tp->pdev->dev, tp->tg3_flags & TG3_FLAG_WOL_CAP);
11117 device_set_wakeup_enable(&tp->pdev->dev,
11118 tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
11121 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
11123 int i;
11124 u32 val;
11126 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
11127 tw32(OTP_CTRL, cmd);
11129 /* Wait for up to 1 ms for command to execute. */
11130 for (i = 0; i < 100; i++) {
11131 val = tr32(OTP_STATUS);
11132 if (val & OTP_STATUS_CMD_DONE)
11133 break;
11134 udelay(10);
11137 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
11140 /* Read the gphy configuration from the OTP region of the chip. The gphy
11141 * configuration is a 32-bit value that straddles the alignment boundary.
11142 * We do two 32-bit reads and then shift and merge the results.
11144 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
11146 u32 bhalf_otp, thalf_otp;
11148 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
11150 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
11151 return 0;
11153 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
11155 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11156 return 0;
11158 thalf_otp = tr32(OTP_READ_DATA);
11160 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
11162 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11163 return 0;
11165 bhalf_otp = tr32(OTP_READ_DATA);
11167 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
11170 static int __devinit tg3_phy_probe(struct tg3 *tp)
11172 u32 hw_phy_id_1, hw_phy_id_2;
11173 u32 hw_phy_id, hw_phy_id_masked;
11174 int err;
11176 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
11177 return tg3_phy_init(tp);
11179 /* Reading the PHY ID register can conflict with ASF
11180 * firwmare access to the PHY hardware.
11182 err = 0;
11183 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11184 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
11185 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
11186 } else {
11187 /* Now read the physical PHY_ID from the chip and verify
11188 * that it is sane. If it doesn't look good, we fall back
11189 * to either the hard-coded table based PHY_ID and failing
11190 * that the value found in the eeprom area.
11192 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
11193 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
11195 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
11196 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
11197 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
11199 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
11202 if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
11203 tp->phy_id = hw_phy_id;
11204 if (hw_phy_id_masked == PHY_ID_BCM8002)
11205 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11206 else
11207 tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
11208 } else {
11209 if (tp->phy_id != PHY_ID_INVALID) {
11210 /* Do nothing, phy ID already set up in
11211 * tg3_get_eeprom_hw_cfg().
11213 } else {
11214 struct subsys_tbl_ent *p;
11216 /* No eeprom signature? Try the hardcoded
11217 * subsys device table.
11219 p = lookup_by_subsys(tp);
11220 if (!p)
11221 return -ENODEV;
11223 tp->phy_id = p->phy_id;
11224 if (!tp->phy_id ||
11225 tp->phy_id == PHY_ID_BCM8002)
11226 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11230 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
11231 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
11232 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
11233 u32 bmsr, adv_reg, tg3_ctrl, mask;
11235 tg3_readphy(tp, MII_BMSR, &bmsr);
11236 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
11237 (bmsr & BMSR_LSTATUS))
11238 goto skip_phy_reset;
11240 err = tg3_phy_reset(tp);
11241 if (err)
11242 return err;
11244 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
11245 ADVERTISE_100HALF | ADVERTISE_100FULL |
11246 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
11247 tg3_ctrl = 0;
11248 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
11249 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
11250 MII_TG3_CTRL_ADV_1000_FULL);
11251 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11252 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
11253 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
11254 MII_TG3_CTRL_ENABLE_AS_MASTER);
11257 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11258 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11259 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
11260 if (!tg3_copper_is_advertising_all(tp, mask)) {
11261 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11263 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11264 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11266 tg3_writephy(tp, MII_BMCR,
11267 BMCR_ANENABLE | BMCR_ANRESTART);
11269 tg3_phy_set_wirespeed(tp);
11271 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11272 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11273 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11276 skip_phy_reset:
11277 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
11278 err = tg3_init_5401phy_dsp(tp);
11279 if (err)
11280 return err;
11283 if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
11284 err = tg3_init_5401phy_dsp(tp);
11287 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
11288 tp->link_config.advertising =
11289 (ADVERTISED_1000baseT_Half |
11290 ADVERTISED_1000baseT_Full |
11291 ADVERTISED_Autoneg |
11292 ADVERTISED_FIBRE);
11293 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
11294 tp->link_config.advertising &=
11295 ~(ADVERTISED_1000baseT_Half |
11296 ADVERTISED_1000baseT_Full);
11298 return err;
11301 static void __devinit tg3_read_partno(struct tg3 *tp)
11303 unsigned char vpd_data[256];
11304 unsigned int i;
11305 u32 magic;
11307 if (tg3_nvram_read_swab(tp, 0x0, &magic))
11308 goto out_not_found;
11310 if (magic == TG3_EEPROM_MAGIC) {
11311 for (i = 0; i < 256; i += 4) {
11312 u32 tmp;
11314 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
11315 goto out_not_found;
11317 vpd_data[i + 0] = ((tmp >> 0) & 0xff);
11318 vpd_data[i + 1] = ((tmp >> 8) & 0xff);
11319 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
11320 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
11322 } else {
11323 int vpd_cap;
11325 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
11326 for (i = 0; i < 256; i += 4) {
11327 u32 tmp, j = 0;
11328 __le32 v;
11329 u16 tmp16;
11331 pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
11333 while (j++ < 100) {
11334 pci_read_config_word(tp->pdev, vpd_cap +
11335 PCI_VPD_ADDR, &tmp16);
11336 if (tmp16 & 0x8000)
11337 break;
11338 msleep(1);
11340 if (!(tmp16 & 0x8000))
11341 goto out_not_found;
11343 pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
11344 &tmp);
11345 v = cpu_to_le32(tmp);
11346 memcpy(&vpd_data[i], &v, 4);
11350 /* Now parse and find the part number. */
11351 for (i = 0; i < 254; ) {
11352 unsigned char val = vpd_data[i];
11353 unsigned int block_end;
11355 if (val == 0x82 || val == 0x91) {
11356 i = (i + 3 +
11357 (vpd_data[i + 1] +
11358 (vpd_data[i + 2] << 8)));
11359 continue;
11362 if (val != 0x90)
11363 goto out_not_found;
11365 block_end = (i + 3 +
11366 (vpd_data[i + 1] +
11367 (vpd_data[i + 2] << 8)));
11368 i += 3;
11370 if (block_end > 256)
11371 goto out_not_found;
11373 while (i < (block_end - 2)) {
11374 if (vpd_data[i + 0] == 'P' &&
11375 vpd_data[i + 1] == 'N') {
11376 int partno_len = vpd_data[i + 2];
11378 i += 3;
11379 if (partno_len > 24 || (partno_len + i) > 256)
11380 goto out_not_found;
11382 memcpy(tp->board_part_number,
11383 &vpd_data[i], partno_len);
11385 /* Success. */
11386 return;
11388 i += 3 + vpd_data[i + 2];
11391 /* Part number not found. */
11392 goto out_not_found;
11395 out_not_found:
11396 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11397 strcpy(tp->board_part_number, "BCM95906");
11398 else
11399 strcpy(tp->board_part_number, "none");
11402 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
11404 u32 val;
11406 if (tg3_nvram_read_swab(tp, offset, &val) ||
11407 (val & 0xfc000000) != 0x0c000000 ||
11408 tg3_nvram_read_swab(tp, offset + 4, &val) ||
11409 val != 0)
11410 return 0;
11412 return 1;
11415 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
11417 u32 offset, major, minor, build;
11419 tp->fw_ver[0] = 's';
11420 tp->fw_ver[1] = 'b';
11421 tp->fw_ver[2] = '\0';
11423 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
11424 return;
11426 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
11427 case TG3_EEPROM_SB_REVISION_0:
11428 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
11429 break;
11430 case TG3_EEPROM_SB_REVISION_2:
11431 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
11432 break;
11433 case TG3_EEPROM_SB_REVISION_3:
11434 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
11435 break;
11436 default:
11437 return;
11440 if (tg3_nvram_read_swab(tp, offset, &val))
11441 return;
11443 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
11444 TG3_EEPROM_SB_EDH_BLD_SHFT;
11445 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
11446 TG3_EEPROM_SB_EDH_MAJ_SHFT;
11447 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
11449 if (minor > 99 || build > 26)
11450 return;
11452 snprintf(&tp->fw_ver[2], 30, " v%d.%02d", major, minor);
11454 if (build > 0) {
11455 tp->fw_ver[8] = 'a' + build - 1;
11456 tp->fw_ver[9] = '\0';
11460 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
11462 u32 val, offset, start;
11463 u32 ver_offset;
11464 int i, bcnt;
11466 if (tg3_nvram_read_swab(tp, 0, &val))
11467 return;
11469 if (val != TG3_EEPROM_MAGIC) {
11470 if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
11471 tg3_read_sb_ver(tp, val);
11473 return;
11476 if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
11477 tg3_nvram_read_swab(tp, 0x4, &start))
11478 return;
11480 offset = tg3_nvram_logical_addr(tp, offset);
11482 if (!tg3_fw_img_is_valid(tp, offset) ||
11483 tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
11484 return;
11486 offset = offset + ver_offset - start;
11487 for (i = 0; i < 16; i += 4) {
11488 __le32 v;
11489 if (tg3_nvram_read_le(tp, offset + i, &v))
11490 return;
11492 memcpy(tp->fw_ver + i, &v, 4);
11495 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11496 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
11497 return;
11499 for (offset = TG3_NVM_DIR_START;
11500 offset < TG3_NVM_DIR_END;
11501 offset += TG3_NVM_DIRENT_SIZE) {
11502 if (tg3_nvram_read_swab(tp, offset, &val))
11503 return;
11505 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
11506 break;
11509 if (offset == TG3_NVM_DIR_END)
11510 return;
11512 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
11513 start = 0x08000000;
11514 else if (tg3_nvram_read_swab(tp, offset - 4, &start))
11515 return;
11517 if (tg3_nvram_read_swab(tp, offset + 4, &offset) ||
11518 !tg3_fw_img_is_valid(tp, offset) ||
11519 tg3_nvram_read_swab(tp, offset + 8, &val))
11520 return;
11522 offset += val - start;
11524 bcnt = strlen(tp->fw_ver);
11526 tp->fw_ver[bcnt++] = ',';
11527 tp->fw_ver[bcnt++] = ' ';
11529 for (i = 0; i < 4; i++) {
11530 __le32 v;
11531 if (tg3_nvram_read_le(tp, offset, &v))
11532 return;
11534 offset += sizeof(v);
11536 if (bcnt > TG3_VER_SIZE - sizeof(v)) {
11537 memcpy(&tp->fw_ver[bcnt], &v, TG3_VER_SIZE - bcnt);
11538 break;
11541 memcpy(&tp->fw_ver[bcnt], &v, sizeof(v));
11542 bcnt += sizeof(v);
11545 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
11548 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
11550 static int __devinit tg3_get_invariants(struct tg3 *tp)
11552 static struct pci_device_id write_reorder_chipsets[] = {
11553 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11554 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
11555 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11556 PCI_DEVICE_ID_AMD_8131_BRIDGE) },
11557 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
11558 PCI_DEVICE_ID_VIA_8385_0) },
11559 { },
11561 u32 misc_ctrl_reg;
11562 u32 pci_state_reg, grc_misc_cfg;
11563 u32 val;
11564 u16 pci_cmd;
11565 int err;
11567 /* Force memory write invalidate off. If we leave it on,
11568 * then on 5700_BX chips we have to enable a workaround.
11569 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
11570 * to match the cacheline size. The Broadcom driver have this
11571 * workaround but turns MWI off all the times so never uses
11572 * it. This seems to suggest that the workaround is insufficient.
11574 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11575 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
11576 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11578 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
11579 * has the register indirect write enable bit set before
11580 * we try to access any of the MMIO registers. It is also
11581 * critical that the PCI-X hw workaround situation is decided
11582 * before that as well.
11584 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11585 &misc_ctrl_reg);
11587 tp->pci_chip_rev_id = (misc_ctrl_reg >>
11588 MISC_HOST_CTRL_CHIPREV_SHIFT);
11589 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
11590 u32 prod_id_asic_rev;
11592 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
11593 &prod_id_asic_rev);
11594 tp->pci_chip_rev_id = prod_id_asic_rev;
11597 /* Wrong chip ID in 5752 A0. This code can be removed later
11598 * as A0 is not in production.
11600 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
11601 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
11603 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
11604 * we need to disable memory and use config. cycles
11605 * only to access all registers. The 5702/03 chips
11606 * can mistakenly decode the special cycles from the
11607 * ICH chipsets as memory write cycles, causing corruption
11608 * of register and memory space. Only certain ICH bridges
11609 * will drive special cycles with non-zero data during the
11610 * address phase which can fall within the 5703's address
11611 * range. This is not an ICH bug as the PCI spec allows
11612 * non-zero address during special cycles. However, only
11613 * these ICH bridges are known to drive non-zero addresses
11614 * during special cycles.
11616 * Since special cycles do not cross PCI bridges, we only
11617 * enable this workaround if the 5703 is on the secondary
11618 * bus of these ICH bridges.
11620 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
11621 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
11622 static struct tg3_dev_id {
11623 u32 vendor;
11624 u32 device;
11625 u32 rev;
11626 } ich_chipsets[] = {
11627 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
11628 PCI_ANY_ID },
11629 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
11630 PCI_ANY_ID },
11631 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
11632 0xa },
11633 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
11634 PCI_ANY_ID },
11635 { },
11637 struct tg3_dev_id *pci_id = &ich_chipsets[0];
11638 struct pci_dev *bridge = NULL;
11640 while (pci_id->vendor != 0) {
11641 bridge = pci_get_device(pci_id->vendor, pci_id->device,
11642 bridge);
11643 if (!bridge) {
11644 pci_id++;
11645 continue;
11647 if (pci_id->rev != PCI_ANY_ID) {
11648 if (bridge->revision > pci_id->rev)
11649 continue;
11651 if (bridge->subordinate &&
11652 (bridge->subordinate->number ==
11653 tp->pdev->bus->number)) {
11655 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
11656 pci_dev_put(bridge);
11657 break;
11662 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
11663 static struct tg3_dev_id {
11664 u32 vendor;
11665 u32 device;
11666 } bridge_chipsets[] = {
11667 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
11668 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
11669 { },
11671 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
11672 struct pci_dev *bridge = NULL;
11674 while (pci_id->vendor != 0) {
11675 bridge = pci_get_device(pci_id->vendor,
11676 pci_id->device,
11677 bridge);
11678 if (!bridge) {
11679 pci_id++;
11680 continue;
11682 if (bridge->subordinate &&
11683 (bridge->subordinate->number <=
11684 tp->pdev->bus->number) &&
11685 (bridge->subordinate->subordinate >=
11686 tp->pdev->bus->number)) {
11687 tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG;
11688 pci_dev_put(bridge);
11689 break;
11694 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
11695 * DMA addresses > 40-bit. This bridge may have other additional
11696 * 57xx devices behind it in some 4-port NIC designs for example.
11697 * Any tg3 device found behind the bridge will also need the 40-bit
11698 * DMA workaround.
11700 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
11701 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11702 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
11703 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
11704 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
11706 else {
11707 struct pci_dev *bridge = NULL;
11709 do {
11710 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
11711 PCI_DEVICE_ID_SERVERWORKS_EPB,
11712 bridge);
11713 if (bridge && bridge->subordinate &&
11714 (bridge->subordinate->number <=
11715 tp->pdev->bus->number) &&
11716 (bridge->subordinate->subordinate >=
11717 tp->pdev->bus->number)) {
11718 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
11719 pci_dev_put(bridge);
11720 break;
11722 } while (bridge);
11725 /* Initialize misc host control in PCI block. */
11726 tp->misc_host_ctrl |= (misc_ctrl_reg &
11727 MISC_HOST_CTRL_CHIPREV);
11728 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11729 tp->misc_host_ctrl);
11731 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11732 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
11733 tp->pdev_peer = tg3_find_peer(tp);
11735 /* Intentionally exclude ASIC_REV_5906 */
11736 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11737 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11738 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11739 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
11740 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
11741 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
11742 tp->tg3_flags3 |= TG3_FLG3_5755_PLUS;
11744 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11745 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
11746 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
11747 (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
11748 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
11749 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
11751 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
11752 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
11753 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
11755 /* 5700 B0 chips do not support checksumming correctly due
11756 * to hardware bugs.
11758 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
11759 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
11760 else {
11761 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
11762 tp->dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
11763 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
11764 tp->dev->features |= NETIF_F_IPV6_CSUM;
11767 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
11768 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
11769 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
11770 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
11771 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
11772 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
11773 tp->pdev_peer == tp->pdev))
11774 tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
11776 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
11777 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11778 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
11779 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
11780 } else {
11781 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
11782 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
11783 ASIC_REV_5750 &&
11784 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
11785 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
11789 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
11790 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
11791 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
11793 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
11794 &pci_state_reg);
11796 tp->pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
11797 if (tp->pcie_cap != 0) {
11798 u16 lnkctl;
11800 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
11802 pcie_set_readrq(tp->pdev, 4096);
11804 pci_read_config_word(tp->pdev,
11805 tp->pcie_cap + PCI_EXP_LNKCTL,
11806 &lnkctl);
11807 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
11808 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11809 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
11810 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11811 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
11812 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
11813 tp->tg3_flags3 |= TG3_FLG3_CLKREQ_BUG;
11815 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
11816 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
11817 } else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
11818 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
11819 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
11820 if (!tp->pcix_cap) {
11821 printk(KERN_ERR PFX "Cannot find PCI-X "
11822 "capability, aborting.\n");
11823 return -EIO;
11826 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
11827 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
11830 /* If we have an AMD 762 or VIA K8T800 chipset, write
11831 * reordering to the mailbox registers done by the host
11832 * controller can cause major troubles. We read back from
11833 * every mailbox register write to force the writes to be
11834 * posted to the chip in order.
11836 if (pci_dev_present(write_reorder_chipsets) &&
11837 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
11838 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
11840 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
11841 &tp->pci_cacheline_sz);
11842 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
11843 &tp->pci_lat_timer);
11844 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
11845 tp->pci_lat_timer < 64) {
11846 tp->pci_lat_timer = 64;
11847 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
11848 tp->pci_lat_timer);
11851 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
11852 /* 5700 BX chips need to have their TX producer index
11853 * mailboxes written twice to workaround a bug.
11855 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
11857 /* If we are in PCI-X mode, enable register write workaround.
11859 * The workaround is to use indirect register accesses
11860 * for all chip writes not to mailbox registers.
11862 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
11863 u32 pm_reg;
11865 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
11867 /* The chip can have it's power management PCI config
11868 * space registers clobbered due to this bug.
11869 * So explicitly force the chip into D0 here.
11871 pci_read_config_dword(tp->pdev,
11872 tp->pm_cap + PCI_PM_CTRL,
11873 &pm_reg);
11874 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
11875 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
11876 pci_write_config_dword(tp->pdev,
11877 tp->pm_cap + PCI_PM_CTRL,
11878 pm_reg);
11880 /* Also, force SERR#/PERR# in PCI command. */
11881 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11882 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
11883 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11887 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
11888 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
11889 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
11890 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
11892 /* Chip-specific fixup from Broadcom driver */
11893 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
11894 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
11895 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
11896 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
11899 /* Default fast path register access methods */
11900 tp->read32 = tg3_read32;
11901 tp->write32 = tg3_write32;
11902 tp->read32_mbox = tg3_read32;
11903 tp->write32_mbox = tg3_write32;
11904 tp->write32_tx_mbox = tg3_write32;
11905 tp->write32_rx_mbox = tg3_write32;
11907 /* Various workaround register access methods */
11908 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
11909 tp->write32 = tg3_write_indirect_reg32;
11910 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11911 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
11912 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
11914 * Back to back register writes can cause problems on these
11915 * chips, the workaround is to read back all reg writes
11916 * except those to mailbox regs.
11918 * See tg3_write_indirect_reg32().
11920 tp->write32 = tg3_write_flush_reg32;
11924 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
11925 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
11926 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11927 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
11928 tp->write32_rx_mbox = tg3_write_flush_reg32;
11931 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
11932 tp->read32 = tg3_read_indirect_reg32;
11933 tp->write32 = tg3_write_indirect_reg32;
11934 tp->read32_mbox = tg3_read_indirect_mbox;
11935 tp->write32_mbox = tg3_write_indirect_mbox;
11936 tp->write32_tx_mbox = tg3_write_indirect_mbox;
11937 tp->write32_rx_mbox = tg3_write_indirect_mbox;
11939 iounmap(tp->regs);
11940 tp->regs = NULL;
11942 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11943 pci_cmd &= ~PCI_COMMAND_MEMORY;
11944 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11946 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11947 tp->read32_mbox = tg3_read32_mbox_5906;
11948 tp->write32_mbox = tg3_write32_mbox_5906;
11949 tp->write32_tx_mbox = tg3_write32_mbox_5906;
11950 tp->write32_rx_mbox = tg3_write32_mbox_5906;
11953 if (tp->write32 == tg3_write_indirect_reg32 ||
11954 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
11955 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11956 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
11957 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
11959 /* Get eeprom hw config before calling tg3_set_power_state().
11960 * In particular, the TG3_FLG2_IS_NIC flag must be
11961 * determined before calling tg3_set_power_state() so that
11962 * we know whether or not to switch out of Vaux power.
11963 * When the flag is set, it means that GPIO1 is used for eeprom
11964 * write protect and also implies that it is a LOM where GPIOs
11965 * are not used to switch power.
11967 tg3_get_eeprom_hw_cfg(tp);
11969 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
11970 /* Allow reads and writes to the
11971 * APE register and memory space.
11973 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
11974 PCISTATE_ALLOW_APE_SHMEM_WR;
11975 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
11976 pci_state_reg);
11979 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11980 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
11981 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
11982 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
11983 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
11985 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
11986 * GPIO1 driven high will bring 5700's external PHY out of reset.
11987 * It is also used as eeprom write protect on LOMs.
11989 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
11990 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
11991 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
11992 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
11993 GRC_LCLCTRL_GPIO_OUTPUT1);
11994 /* Unused GPIO3 must be driven as output on 5752 because there
11995 * are no pull-up resistors on unused GPIO pins.
11997 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
11998 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
12000 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12001 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
12002 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12004 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) {
12005 /* Turn off the debug UART. */
12006 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12007 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
12008 /* Keep VMain power. */
12009 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
12010 GRC_LCLCTRL_GPIO_OUTPUT0;
12013 /* Force the chip into D0. */
12014 err = tg3_set_power_state(tp, PCI_D0);
12015 if (err) {
12016 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
12017 pci_name(tp->pdev));
12018 return err;
12021 /* Derive initial jumbo mode from MTU assigned in
12022 * ether_setup() via the alloc_etherdev() call
12024 if (tp->dev->mtu > ETH_DATA_LEN &&
12025 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12026 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
12028 /* Determine WakeOnLan speed to use. */
12029 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12030 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
12031 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
12032 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
12033 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
12034 } else {
12035 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
12038 /* A few boards don't want Ethernet@WireSpeed phy feature */
12039 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12040 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
12041 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
12042 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
12043 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
12044 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
12045 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
12047 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
12048 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
12049 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
12050 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
12051 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
12053 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
12054 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906 &&
12055 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
12056 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780) {
12057 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12058 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12059 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12060 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
12061 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
12062 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
12063 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
12064 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
12065 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
12066 } else
12067 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
12070 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12071 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
12072 tp->phy_otp = tg3_read_otp_phycfg(tp);
12073 if (tp->phy_otp == 0)
12074 tp->phy_otp = TG3_OTP_DEFAULT;
12077 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)
12078 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
12079 else
12080 tp->mi_mode = MAC_MI_MODE_BASE;
12082 tp->coalesce_mode = 0;
12083 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
12084 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
12085 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
12087 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12088 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
12089 tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB;
12091 err = tg3_mdio_init(tp);
12092 if (err)
12093 return err;
12095 /* Initialize data/descriptor byte/word swapping. */
12096 val = tr32(GRC_MODE);
12097 val &= GRC_MODE_HOST_STACKUP;
12098 tw32(GRC_MODE, val | tp->grc_mode);
12100 tg3_switch_clocks(tp);
12102 /* Clear this out for sanity. */
12103 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
12105 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
12106 &pci_state_reg);
12107 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
12108 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
12109 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
12111 if (chiprevid == CHIPREV_ID_5701_A0 ||
12112 chiprevid == CHIPREV_ID_5701_B0 ||
12113 chiprevid == CHIPREV_ID_5701_B2 ||
12114 chiprevid == CHIPREV_ID_5701_B5) {
12115 void __iomem *sram_base;
12117 /* Write some dummy words into the SRAM status block
12118 * area, see if it reads back correctly. If the return
12119 * value is bad, force enable the PCIX workaround.
12121 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
12123 writel(0x00000000, sram_base);
12124 writel(0x00000000, sram_base + 4);
12125 writel(0xffffffff, sram_base + 4);
12126 if (readl(sram_base) != 0x00000000)
12127 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
12131 udelay(50);
12132 tg3_nvram_init(tp);
12134 grc_misc_cfg = tr32(GRC_MISC_CFG);
12135 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
12137 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
12138 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
12139 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
12140 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
12142 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
12143 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
12144 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
12145 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
12146 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
12147 HOSTCC_MODE_CLRTICK_TXBD);
12149 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
12150 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12151 tp->misc_host_ctrl);
12154 /* Preserve the APE MAC_MODE bits */
12155 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
12156 tp->mac_mode = tr32(MAC_MODE) |
12157 MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
12158 else
12159 tp->mac_mode = TG3_DEF_MAC_MODE;
12161 /* these are limited to 10/100 only */
12162 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12163 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
12164 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
12165 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
12166 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
12167 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
12168 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
12169 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
12170 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
12171 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
12172 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
12173 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
12174 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12175 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
12177 err = tg3_phy_probe(tp);
12178 if (err) {
12179 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
12180 pci_name(tp->pdev), err);
12181 /* ... but do not return immediately ... */
12182 tg3_mdio_fini(tp);
12185 tg3_read_partno(tp);
12186 tg3_read_fw_ver(tp);
12188 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
12189 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
12190 } else {
12191 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
12192 tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
12193 else
12194 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
12197 /* 5700 {AX,BX} chips have a broken status block link
12198 * change bit implementation, so we must use the
12199 * status register in those cases.
12201 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
12202 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
12203 else
12204 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
12206 /* The led_ctrl is set during tg3_phy_probe, here we might
12207 * have to force the link status polling mechanism based
12208 * upon subsystem IDs.
12210 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
12211 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
12212 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
12213 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
12214 TG3_FLAG_USE_LINKCHG_REG);
12217 /* For all SERDES we poll the MAC status register. */
12218 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
12219 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
12220 else
12221 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
12223 tp->rx_offset = NET_IP_ALIGN;
12224 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
12225 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
12226 tp->rx_offset = 0;
12228 tp->rx_std_max_post = TG3_RX_RING_SIZE;
12230 /* Increment the rx prod index on the rx std ring by at most
12231 * 8 for these chips to workaround hw errata.
12233 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12234 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
12235 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12236 tp->rx_std_max_post = 8;
12238 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
12239 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
12240 PCIE_PWR_MGMT_L1_THRESH_MSK;
12242 return err;
12245 #ifdef CONFIG_SPARC
12246 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
12248 struct net_device *dev = tp->dev;
12249 struct pci_dev *pdev = tp->pdev;
12250 struct device_node *dp = pci_device_to_OF_node(pdev);
12251 const unsigned char *addr;
12252 int len;
12254 addr = of_get_property(dp, "local-mac-address", &len);
12255 if (addr && len == 6) {
12256 memcpy(dev->dev_addr, addr, 6);
12257 memcpy(dev->perm_addr, dev->dev_addr, 6);
12258 return 0;
12260 return -ENODEV;
12263 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
12265 struct net_device *dev = tp->dev;
12267 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
12268 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
12269 return 0;
12271 #endif
12273 static int __devinit tg3_get_device_address(struct tg3 *tp)
12275 struct net_device *dev = tp->dev;
12276 u32 hi, lo, mac_offset;
12277 int addr_ok = 0;
12279 #ifdef CONFIG_SPARC
12280 if (!tg3_get_macaddr_sparc(tp))
12281 return 0;
12282 #endif
12284 mac_offset = 0x7c;
12285 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
12286 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
12287 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
12288 mac_offset = 0xcc;
12289 if (tg3_nvram_lock(tp))
12290 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
12291 else
12292 tg3_nvram_unlock(tp);
12294 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12295 mac_offset = 0x10;
12297 /* First try to get it from MAC address mailbox. */
12298 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
12299 if ((hi >> 16) == 0x484b) {
12300 dev->dev_addr[0] = (hi >> 8) & 0xff;
12301 dev->dev_addr[1] = (hi >> 0) & 0xff;
12303 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
12304 dev->dev_addr[2] = (lo >> 24) & 0xff;
12305 dev->dev_addr[3] = (lo >> 16) & 0xff;
12306 dev->dev_addr[4] = (lo >> 8) & 0xff;
12307 dev->dev_addr[5] = (lo >> 0) & 0xff;
12309 /* Some old bootcode may report a 0 MAC address in SRAM */
12310 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
12312 if (!addr_ok) {
12313 /* Next, try NVRAM. */
12314 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
12315 !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
12316 dev->dev_addr[0] = ((hi >> 16) & 0xff);
12317 dev->dev_addr[1] = ((hi >> 24) & 0xff);
12318 dev->dev_addr[2] = ((lo >> 0) & 0xff);
12319 dev->dev_addr[3] = ((lo >> 8) & 0xff);
12320 dev->dev_addr[4] = ((lo >> 16) & 0xff);
12321 dev->dev_addr[5] = ((lo >> 24) & 0xff);
12323 /* Finally just fetch it out of the MAC control regs. */
12324 else {
12325 hi = tr32(MAC_ADDR_0_HIGH);
12326 lo = tr32(MAC_ADDR_0_LOW);
12328 dev->dev_addr[5] = lo & 0xff;
12329 dev->dev_addr[4] = (lo >> 8) & 0xff;
12330 dev->dev_addr[3] = (lo >> 16) & 0xff;
12331 dev->dev_addr[2] = (lo >> 24) & 0xff;
12332 dev->dev_addr[1] = hi & 0xff;
12333 dev->dev_addr[0] = (hi >> 8) & 0xff;
12337 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
12338 #ifdef CONFIG_SPARC
12339 if (!tg3_get_default_macaddr_sparc(tp))
12340 return 0;
12341 #endif
12342 return -EINVAL;
12344 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
12345 return 0;
12348 #define BOUNDARY_SINGLE_CACHELINE 1
12349 #define BOUNDARY_MULTI_CACHELINE 2
12351 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
12353 int cacheline_size;
12354 u8 byte;
12355 int goal;
12357 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
12358 if (byte == 0)
12359 cacheline_size = 1024;
12360 else
12361 cacheline_size = (int) byte * 4;
12363 /* On 5703 and later chips, the boundary bits have no
12364 * effect.
12366 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12367 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12368 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12369 goto out;
12371 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
12372 goal = BOUNDARY_MULTI_CACHELINE;
12373 #else
12374 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
12375 goal = BOUNDARY_SINGLE_CACHELINE;
12376 #else
12377 goal = 0;
12378 #endif
12379 #endif
12381 if (!goal)
12382 goto out;
12384 /* PCI controllers on most RISC systems tend to disconnect
12385 * when a device tries to burst across a cache-line boundary.
12386 * Therefore, letting tg3 do so just wastes PCI bandwidth.
12388 * Unfortunately, for PCI-E there are only limited
12389 * write-side controls for this, and thus for reads
12390 * we will still get the disconnects. We'll also waste
12391 * these PCI cycles for both read and write for chips
12392 * other than 5700 and 5701 which do not implement the
12393 * boundary bits.
12395 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12396 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
12397 switch (cacheline_size) {
12398 case 16:
12399 case 32:
12400 case 64:
12401 case 128:
12402 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12403 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
12404 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
12405 } else {
12406 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12407 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12409 break;
12411 case 256:
12412 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
12413 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
12414 break;
12416 default:
12417 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12418 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12419 break;
12421 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12422 switch (cacheline_size) {
12423 case 16:
12424 case 32:
12425 case 64:
12426 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12427 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12428 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
12429 break;
12431 /* fallthrough */
12432 case 128:
12433 default:
12434 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12435 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
12436 break;
12438 } else {
12439 switch (cacheline_size) {
12440 case 16:
12441 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12442 val |= (DMA_RWCTRL_READ_BNDRY_16 |
12443 DMA_RWCTRL_WRITE_BNDRY_16);
12444 break;
12446 /* fallthrough */
12447 case 32:
12448 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12449 val |= (DMA_RWCTRL_READ_BNDRY_32 |
12450 DMA_RWCTRL_WRITE_BNDRY_32);
12451 break;
12453 /* fallthrough */
12454 case 64:
12455 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12456 val |= (DMA_RWCTRL_READ_BNDRY_64 |
12457 DMA_RWCTRL_WRITE_BNDRY_64);
12458 break;
12460 /* fallthrough */
12461 case 128:
12462 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12463 val |= (DMA_RWCTRL_READ_BNDRY_128 |
12464 DMA_RWCTRL_WRITE_BNDRY_128);
12465 break;
12467 /* fallthrough */
12468 case 256:
12469 val |= (DMA_RWCTRL_READ_BNDRY_256 |
12470 DMA_RWCTRL_WRITE_BNDRY_256);
12471 break;
12472 case 512:
12473 val |= (DMA_RWCTRL_READ_BNDRY_512 |
12474 DMA_RWCTRL_WRITE_BNDRY_512);
12475 break;
12476 case 1024:
12477 default:
12478 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
12479 DMA_RWCTRL_WRITE_BNDRY_1024);
12480 break;
12484 out:
12485 return val;
12488 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
12490 struct tg3_internal_buffer_desc test_desc;
12491 u32 sram_dma_descs;
12492 int i, ret;
12494 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
12496 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
12497 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
12498 tw32(RDMAC_STATUS, 0);
12499 tw32(WDMAC_STATUS, 0);
12501 tw32(BUFMGR_MODE, 0);
12502 tw32(FTQ_RESET, 0);
12504 test_desc.addr_hi = ((u64) buf_dma) >> 32;
12505 test_desc.addr_lo = buf_dma & 0xffffffff;
12506 test_desc.nic_mbuf = 0x00002100;
12507 test_desc.len = size;
12510 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
12511 * the *second* time the tg3 driver was getting loaded after an
12512 * initial scan.
12514 * Broadcom tells me:
12515 * ...the DMA engine is connected to the GRC block and a DMA
12516 * reset may affect the GRC block in some unpredictable way...
12517 * The behavior of resets to individual blocks has not been tested.
12519 * Broadcom noted the GRC reset will also reset all sub-components.
12521 if (to_device) {
12522 test_desc.cqid_sqid = (13 << 8) | 2;
12524 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
12525 udelay(40);
12526 } else {
12527 test_desc.cqid_sqid = (16 << 8) | 7;
12529 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
12530 udelay(40);
12532 test_desc.flags = 0x00000005;
12534 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
12535 u32 val;
12537 val = *(((u32 *)&test_desc) + i);
12538 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
12539 sram_dma_descs + (i * sizeof(u32)));
12540 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
12542 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
12544 if (to_device) {
12545 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
12546 } else {
12547 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
12550 ret = -ENODEV;
12551 for (i = 0; i < 40; i++) {
12552 u32 val;
12554 if (to_device)
12555 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
12556 else
12557 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
12558 if ((val & 0xffff) == sram_dma_descs) {
12559 ret = 0;
12560 break;
12563 udelay(100);
12566 return ret;
12569 #define TEST_BUFFER_SIZE 0x2000
12571 static int __devinit tg3_test_dma(struct tg3 *tp)
12573 dma_addr_t buf_dma;
12574 u32 *buf, saved_dma_rwctrl;
12575 int ret;
12577 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
12578 if (!buf) {
12579 ret = -ENOMEM;
12580 goto out_nofree;
12583 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
12584 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
12586 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
12588 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12589 /* DMA read watermark not used on PCIE */
12590 tp->dma_rwctrl |= 0x00180000;
12591 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
12592 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
12593 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
12594 tp->dma_rwctrl |= 0x003f0000;
12595 else
12596 tp->dma_rwctrl |= 0x003f000f;
12597 } else {
12598 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12599 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
12600 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
12601 u32 read_water = 0x7;
12603 /* If the 5704 is behind the EPB bridge, we can
12604 * do the less restrictive ONE_DMA workaround for
12605 * better performance.
12607 if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
12608 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12609 tp->dma_rwctrl |= 0x8000;
12610 else if (ccval == 0x6 || ccval == 0x7)
12611 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
12613 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
12614 read_water = 4;
12615 /* Set bit 23 to enable PCIX hw bug fix */
12616 tp->dma_rwctrl |=
12617 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
12618 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
12619 (1 << 23);
12620 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
12621 /* 5780 always in PCIX mode */
12622 tp->dma_rwctrl |= 0x00144000;
12623 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
12624 /* 5714 always in PCIX mode */
12625 tp->dma_rwctrl |= 0x00148000;
12626 } else {
12627 tp->dma_rwctrl |= 0x001b000f;
12631 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12632 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12633 tp->dma_rwctrl &= 0xfffffff0;
12635 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12636 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
12637 /* Remove this if it causes problems for some boards. */
12638 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
12640 /* On 5700/5701 chips, we need to set this bit.
12641 * Otherwise the chip will issue cacheline transactions
12642 * to streamable DMA memory with not all the byte
12643 * enables turned on. This is an error on several
12644 * RISC PCI controllers, in particular sparc64.
12646 * On 5703/5704 chips, this bit has been reassigned
12647 * a different meaning. In particular, it is used
12648 * on those chips to enable a PCI-X workaround.
12650 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
12653 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12655 #if 0
12656 /* Unneeded, already done by tg3_get_invariants. */
12657 tg3_switch_clocks(tp);
12658 #endif
12660 ret = 0;
12661 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12662 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
12663 goto out;
12665 /* It is best to perform DMA test with maximum write burst size
12666 * to expose the 5700/5701 write DMA bug.
12668 saved_dma_rwctrl = tp->dma_rwctrl;
12669 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12670 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12672 while (1) {
12673 u32 *p = buf, i;
12675 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
12676 p[i] = i;
12678 /* Send the buffer to the chip. */
12679 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
12680 if (ret) {
12681 printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
12682 break;
12685 #if 0
12686 /* validate data reached card RAM correctly. */
12687 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
12688 u32 val;
12689 tg3_read_mem(tp, 0x2100 + (i*4), &val);
12690 if (le32_to_cpu(val) != p[i]) {
12691 printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i);
12692 /* ret = -ENODEV here? */
12694 p[i] = 0;
12696 #endif
12697 /* Now read it back. */
12698 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
12699 if (ret) {
12700 printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
12702 break;
12705 /* Verify it. */
12706 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
12707 if (p[i] == i)
12708 continue;
12710 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
12711 DMA_RWCTRL_WRITE_BNDRY_16) {
12712 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12713 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
12714 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12715 break;
12716 } else {
12717 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
12718 ret = -ENODEV;
12719 goto out;
12723 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
12724 /* Success. */
12725 ret = 0;
12726 break;
12729 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
12730 DMA_RWCTRL_WRITE_BNDRY_16) {
12731 static struct pci_device_id dma_wait_state_chipsets[] = {
12732 { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
12733 PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
12734 { },
12737 /* DMA test passed without adjusting DMA boundary,
12738 * now look for chipsets that are known to expose the
12739 * DMA bug without failing the test.
12741 if (pci_dev_present(dma_wait_state_chipsets)) {
12742 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12743 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
12745 else
12746 /* Safe to use the calculated DMA boundary. */
12747 tp->dma_rwctrl = saved_dma_rwctrl;
12749 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12752 out:
12753 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
12754 out_nofree:
12755 return ret;
12758 static void __devinit tg3_init_link_config(struct tg3 *tp)
12760 tp->link_config.advertising =
12761 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
12762 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
12763 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
12764 ADVERTISED_Autoneg | ADVERTISED_MII);
12765 tp->link_config.speed = SPEED_INVALID;
12766 tp->link_config.duplex = DUPLEX_INVALID;
12767 tp->link_config.autoneg = AUTONEG_ENABLE;
12768 tp->link_config.active_speed = SPEED_INVALID;
12769 tp->link_config.active_duplex = DUPLEX_INVALID;
12770 tp->link_config.phy_is_low_power = 0;
12771 tp->link_config.orig_speed = SPEED_INVALID;
12772 tp->link_config.orig_duplex = DUPLEX_INVALID;
12773 tp->link_config.orig_autoneg = AUTONEG_INVALID;
12776 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
12778 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12779 tp->bufmgr_config.mbuf_read_dma_low_water =
12780 DEFAULT_MB_RDMA_LOW_WATER_5705;
12781 tp->bufmgr_config.mbuf_mac_rx_low_water =
12782 DEFAULT_MB_MACRX_LOW_WATER_5705;
12783 tp->bufmgr_config.mbuf_high_water =
12784 DEFAULT_MB_HIGH_WATER_5705;
12785 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12786 tp->bufmgr_config.mbuf_mac_rx_low_water =
12787 DEFAULT_MB_MACRX_LOW_WATER_5906;
12788 tp->bufmgr_config.mbuf_high_water =
12789 DEFAULT_MB_HIGH_WATER_5906;
12792 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
12793 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
12794 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
12795 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
12796 tp->bufmgr_config.mbuf_high_water_jumbo =
12797 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
12798 } else {
12799 tp->bufmgr_config.mbuf_read_dma_low_water =
12800 DEFAULT_MB_RDMA_LOW_WATER;
12801 tp->bufmgr_config.mbuf_mac_rx_low_water =
12802 DEFAULT_MB_MACRX_LOW_WATER;
12803 tp->bufmgr_config.mbuf_high_water =
12804 DEFAULT_MB_HIGH_WATER;
12806 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
12807 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
12808 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
12809 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
12810 tp->bufmgr_config.mbuf_high_water_jumbo =
12811 DEFAULT_MB_HIGH_WATER_JUMBO;
12814 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
12815 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
12818 static char * __devinit tg3_phy_string(struct tg3 *tp)
12820 switch (tp->phy_id & PHY_ID_MASK) {
12821 case PHY_ID_BCM5400: return "5400";
12822 case PHY_ID_BCM5401: return "5401";
12823 case PHY_ID_BCM5411: return "5411";
12824 case PHY_ID_BCM5701: return "5701";
12825 case PHY_ID_BCM5703: return "5703";
12826 case PHY_ID_BCM5704: return "5704";
12827 case PHY_ID_BCM5705: return "5705";
12828 case PHY_ID_BCM5750: return "5750";
12829 case PHY_ID_BCM5752: return "5752";
12830 case PHY_ID_BCM5714: return "5714";
12831 case PHY_ID_BCM5780: return "5780";
12832 case PHY_ID_BCM5755: return "5755";
12833 case PHY_ID_BCM5787: return "5787";
12834 case PHY_ID_BCM5784: return "5784";
12835 case PHY_ID_BCM5756: return "5722/5756";
12836 case PHY_ID_BCM5906: return "5906";
12837 case PHY_ID_BCM5761: return "5761";
12838 case PHY_ID_BCM8002: return "8002/serdes";
12839 case 0: return "serdes";
12840 default: return "unknown";
12844 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
12846 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12847 strcpy(str, "PCI Express");
12848 return str;
12849 } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
12850 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
12852 strcpy(str, "PCIX:");
12854 if ((clock_ctrl == 7) ||
12855 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
12856 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
12857 strcat(str, "133MHz");
12858 else if (clock_ctrl == 0)
12859 strcat(str, "33MHz");
12860 else if (clock_ctrl == 2)
12861 strcat(str, "50MHz");
12862 else if (clock_ctrl == 4)
12863 strcat(str, "66MHz");
12864 else if (clock_ctrl == 6)
12865 strcat(str, "100MHz");
12866 } else {
12867 strcpy(str, "PCI:");
12868 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
12869 strcat(str, "66MHz");
12870 else
12871 strcat(str, "33MHz");
12873 if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
12874 strcat(str, ":32-bit");
12875 else
12876 strcat(str, ":64-bit");
12877 return str;
12880 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
12882 struct pci_dev *peer;
12883 unsigned int func, devnr = tp->pdev->devfn & ~7;
12885 for (func = 0; func < 8; func++) {
12886 peer = pci_get_slot(tp->pdev->bus, devnr | func);
12887 if (peer && peer != tp->pdev)
12888 break;
12889 pci_dev_put(peer);
12891 /* 5704 can be configured in single-port mode, set peer to
12892 * tp->pdev in that case.
12894 if (!peer) {
12895 peer = tp->pdev;
12896 return peer;
12900 * We don't need to keep the refcount elevated; there's no way
12901 * to remove one half of this device without removing the other
12903 pci_dev_put(peer);
12905 return peer;
12908 static void __devinit tg3_init_coal(struct tg3 *tp)
12910 struct ethtool_coalesce *ec = &tp->coal;
12912 memset(ec, 0, sizeof(*ec));
12913 ec->cmd = ETHTOOL_GCOALESCE;
12914 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
12915 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
12916 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
12917 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
12918 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
12919 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
12920 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
12921 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
12922 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
12924 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
12925 HOSTCC_MODE_CLRTICK_TXBD)) {
12926 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
12927 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
12928 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
12929 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
12932 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12933 ec->rx_coalesce_usecs_irq = 0;
12934 ec->tx_coalesce_usecs_irq = 0;
12935 ec->stats_block_coalesce_usecs = 0;
12939 static const struct net_device_ops tg3_netdev_ops = {
12940 .ndo_open = tg3_open,
12941 .ndo_stop = tg3_close,
12942 .ndo_start_xmit = tg3_start_xmit,
12943 .ndo_get_stats = tg3_get_stats,
12944 .ndo_validate_addr = eth_validate_addr,
12945 .ndo_set_multicast_list = tg3_set_rx_mode,
12946 .ndo_set_mac_address = tg3_set_mac_addr,
12947 .ndo_do_ioctl = tg3_ioctl,
12948 .ndo_tx_timeout = tg3_tx_timeout,
12949 .ndo_change_mtu = tg3_change_mtu,
12950 #if TG3_VLAN_TAG_USED
12951 .ndo_vlan_rx_register = tg3_vlan_rx_register,
12952 #endif
12953 #ifdef CONFIG_NET_POLL_CONTROLLER
12954 .ndo_poll_controller = tg3_poll_controller,
12955 #endif
12958 static const struct net_device_ops tg3_netdev_ops_dma_bug = {
12959 .ndo_open = tg3_open,
12960 .ndo_stop = tg3_close,
12961 .ndo_start_xmit = tg3_start_xmit_dma_bug,
12962 .ndo_get_stats = tg3_get_stats,
12963 .ndo_validate_addr = eth_validate_addr,
12964 .ndo_set_multicast_list = tg3_set_rx_mode,
12965 .ndo_set_mac_address = tg3_set_mac_addr,
12966 .ndo_do_ioctl = tg3_ioctl,
12967 .ndo_tx_timeout = tg3_tx_timeout,
12968 .ndo_change_mtu = tg3_change_mtu,
12969 #if TG3_VLAN_TAG_USED
12970 .ndo_vlan_rx_register = tg3_vlan_rx_register,
12971 #endif
12972 #ifdef CONFIG_NET_POLL_CONTROLLER
12973 .ndo_poll_controller = tg3_poll_controller,
12974 #endif
12977 static int __devinit tg3_init_one(struct pci_dev *pdev,
12978 const struct pci_device_id *ent)
12980 static int tg3_version_printed = 0;
12981 struct net_device *dev;
12982 struct tg3 *tp;
12983 int err, pm_cap;
12984 char str[40];
12985 u64 dma_mask, persist_dma_mask;
12987 if (tg3_version_printed++ == 0)
12988 printk(KERN_INFO "%s", version);
12990 err = pci_enable_device(pdev);
12991 if (err) {
12992 printk(KERN_ERR PFX "Cannot enable PCI device, "
12993 "aborting.\n");
12994 return err;
12997 err = pci_request_regions(pdev, DRV_MODULE_NAME);
12998 if (err) {
12999 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
13000 "aborting.\n");
13001 goto err_out_disable_pdev;
13004 pci_set_master(pdev);
13006 /* Find power-management capability. */
13007 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
13008 if (pm_cap == 0) {
13009 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
13010 "aborting.\n");
13011 err = -EIO;
13012 goto err_out_free_res;
13015 dev = alloc_etherdev(sizeof(*tp));
13016 if (!dev) {
13017 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
13018 err = -ENOMEM;
13019 goto err_out_free_res;
13022 SET_NETDEV_DEV(dev, &pdev->dev);
13024 #if TG3_VLAN_TAG_USED
13025 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
13026 #endif
13028 tp = netdev_priv(dev);
13029 tp->pdev = pdev;
13030 tp->dev = dev;
13031 tp->pm_cap = pm_cap;
13032 tp->rx_mode = TG3_DEF_RX_MODE;
13033 tp->tx_mode = TG3_DEF_TX_MODE;
13035 if (tg3_debug > 0)
13036 tp->msg_enable = tg3_debug;
13037 else
13038 tp->msg_enable = TG3_DEF_MSG_ENABLE;
13040 /* The word/byte swap controls here control register access byte
13041 * swapping. DMA data byte swapping is controlled in the GRC_MODE
13042 * setting below.
13044 tp->misc_host_ctrl =
13045 MISC_HOST_CTRL_MASK_PCI_INT |
13046 MISC_HOST_CTRL_WORD_SWAP |
13047 MISC_HOST_CTRL_INDIR_ACCESS |
13048 MISC_HOST_CTRL_PCISTATE_RW;
13050 /* The NONFRM (non-frame) byte/word swap controls take effect
13051 * on descriptor entries, anything which isn't packet data.
13053 * The StrongARM chips on the board (one for tx, one for rx)
13054 * are running in big-endian mode.
13056 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
13057 GRC_MODE_WSWAP_NONFRM_DATA);
13058 #ifdef __BIG_ENDIAN
13059 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
13060 #endif
13061 spin_lock_init(&tp->lock);
13062 spin_lock_init(&tp->indirect_lock);
13063 INIT_WORK(&tp->reset_task, tg3_reset_task);
13065 tp->regs = pci_ioremap_bar(pdev, BAR_0);
13066 if (!tp->regs) {
13067 printk(KERN_ERR PFX "Cannot map device registers, "
13068 "aborting.\n");
13069 err = -ENOMEM;
13070 goto err_out_free_dev;
13073 tg3_init_link_config(tp);
13075 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
13076 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
13077 tp->tx_pending = TG3_DEF_TX_RING_PENDING;
13079 netif_napi_add(dev, &tp->napi, tg3_poll, 64);
13080 dev->ethtool_ops = &tg3_ethtool_ops;
13081 dev->watchdog_timeo = TG3_TX_TIMEOUT;
13082 dev->irq = pdev->irq;
13084 err = tg3_get_invariants(tp);
13085 if (err) {
13086 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
13087 "aborting.\n");
13088 goto err_out_iounmap;
13091 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
13092 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13093 dev->netdev_ops = &tg3_netdev_ops;
13094 else
13095 dev->netdev_ops = &tg3_netdev_ops_dma_bug;
13098 /* The EPB bridge inside 5714, 5715, and 5780 and any
13099 * device behind the EPB cannot support DMA addresses > 40-bit.
13100 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
13101 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
13102 * do DMA address check in tg3_start_xmit().
13104 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
13105 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
13106 else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
13107 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
13108 #ifdef CONFIG_HIGHMEM
13109 dma_mask = DMA_64BIT_MASK;
13110 #endif
13111 } else
13112 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
13114 /* Configure DMA attributes. */
13115 if (dma_mask > DMA_32BIT_MASK) {
13116 err = pci_set_dma_mask(pdev, dma_mask);
13117 if (!err) {
13118 dev->features |= NETIF_F_HIGHDMA;
13119 err = pci_set_consistent_dma_mask(pdev,
13120 persist_dma_mask);
13121 if (err < 0) {
13122 printk(KERN_ERR PFX "Unable to obtain 64 bit "
13123 "DMA for consistent allocations\n");
13124 goto err_out_iounmap;
13128 if (err || dma_mask == DMA_32BIT_MASK) {
13129 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
13130 if (err) {
13131 printk(KERN_ERR PFX "No usable DMA configuration, "
13132 "aborting.\n");
13133 goto err_out_iounmap;
13137 tg3_init_bufmgr_config(tp);
13139 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
13140 tp->fw_needed = FIRMWARE_TG3;
13142 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
13143 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
13145 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13146 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13147 tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
13148 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13149 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
13150 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
13151 } else {
13152 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
13153 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13154 tp->fw_needed = FIRMWARE_TG3TSO5;
13155 else
13156 tp->fw_needed = FIRMWARE_TG3TSO;
13159 /* TSO is on by default on chips that support hardware TSO.
13160 * Firmware TSO on older chips gives lower performance, so it
13161 * is off by default, but can be enabled using ethtool.
13163 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
13164 if (dev->features & NETIF_F_IP_CSUM)
13165 dev->features |= NETIF_F_TSO;
13166 if ((dev->features & NETIF_F_IPV6_CSUM) &&
13167 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2))
13168 dev->features |= NETIF_F_TSO6;
13169 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13170 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13171 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
13172 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13173 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
13174 dev->features |= NETIF_F_TSO_ECN;
13178 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
13179 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
13180 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
13181 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
13182 tp->rx_pending = 63;
13185 err = tg3_get_device_address(tp);
13186 if (err) {
13187 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
13188 "aborting.\n");
13189 goto err_out_fw;
13192 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
13193 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
13194 if (!tp->aperegs) {
13195 printk(KERN_ERR PFX "Cannot map APE registers, "
13196 "aborting.\n");
13197 err = -ENOMEM;
13198 goto err_out_fw;
13201 tg3_ape_lock_init(tp);
13205 * Reset chip in case UNDI or EFI driver did not shutdown
13206 * DMA self test will enable WDMAC and we'll see (spurious)
13207 * pending DMA on the PCI bus at that point.
13209 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
13210 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
13211 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
13212 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13215 err = tg3_test_dma(tp);
13216 if (err) {
13217 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
13218 goto err_out_apeunmap;
13221 /* flow control autonegotiation is default behavior */
13222 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
13223 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13225 tg3_init_coal(tp);
13227 pci_set_drvdata(pdev, dev);
13229 err = register_netdev(dev);
13230 if (err) {
13231 printk(KERN_ERR PFX "Cannot register net device, "
13232 "aborting.\n");
13233 goto err_out_apeunmap;
13236 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
13237 dev->name,
13238 tp->board_part_number,
13239 tp->pci_chip_rev_id,
13240 tg3_bus_string(tp, str),
13241 dev->dev_addr);
13243 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
13244 printk(KERN_INFO
13245 "%s: attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
13246 tp->dev->name,
13247 tp->mdio_bus->phy_map[PHY_ADDR]->drv->name,
13248 dev_name(&tp->mdio_bus->phy_map[PHY_ADDR]->dev));
13249 else
13250 printk(KERN_INFO
13251 "%s: attached PHY is %s (%s Ethernet) (WireSpeed[%d])\n",
13252 tp->dev->name, tg3_phy_string(tp),
13253 ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
13254 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
13255 "10/100/1000Base-T")),
13256 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0);
13258 printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
13259 dev->name,
13260 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
13261 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
13262 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
13263 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
13264 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
13265 printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
13266 dev->name, tp->dma_rwctrl,
13267 (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
13268 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
13270 return 0;
13272 err_out_apeunmap:
13273 if (tp->aperegs) {
13274 iounmap(tp->aperegs);
13275 tp->aperegs = NULL;
13278 err_out_fw:
13279 if (tp->fw)
13280 release_firmware(tp->fw);
13282 err_out_iounmap:
13283 if (tp->regs) {
13284 iounmap(tp->regs);
13285 tp->regs = NULL;
13288 err_out_free_dev:
13289 free_netdev(dev);
13291 err_out_free_res:
13292 pci_release_regions(pdev);
13294 err_out_disable_pdev:
13295 pci_disable_device(pdev);
13296 pci_set_drvdata(pdev, NULL);
13297 return err;
13300 static void __devexit tg3_remove_one(struct pci_dev *pdev)
13302 struct net_device *dev = pci_get_drvdata(pdev);
13304 if (dev) {
13305 struct tg3 *tp = netdev_priv(dev);
13307 if (tp->fw)
13308 release_firmware(tp->fw);
13310 flush_scheduled_work();
13312 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
13313 tg3_phy_fini(tp);
13314 tg3_mdio_fini(tp);
13317 unregister_netdev(dev);
13318 if (tp->aperegs) {
13319 iounmap(tp->aperegs);
13320 tp->aperegs = NULL;
13322 if (tp->regs) {
13323 iounmap(tp->regs);
13324 tp->regs = NULL;
13326 free_netdev(dev);
13327 pci_release_regions(pdev);
13328 pci_disable_device(pdev);
13329 pci_set_drvdata(pdev, NULL);
13333 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
13335 struct net_device *dev = pci_get_drvdata(pdev);
13336 struct tg3 *tp = netdev_priv(dev);
13337 pci_power_t target_state;
13338 int err;
13340 /* PCI register 4 needs to be saved whether netif_running() or not.
13341 * MSI address and data need to be saved if using MSI and
13342 * netif_running().
13344 pci_save_state(pdev);
13346 if (!netif_running(dev))
13347 return 0;
13349 flush_scheduled_work();
13350 tg3_phy_stop(tp);
13351 tg3_netif_stop(tp);
13353 del_timer_sync(&tp->timer);
13355 tg3_full_lock(tp, 1);
13356 tg3_disable_ints(tp);
13357 tg3_full_unlock(tp);
13359 netif_device_detach(dev);
13361 tg3_full_lock(tp, 0);
13362 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13363 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
13364 tg3_full_unlock(tp);
13366 target_state = pdev->pm_cap ? pci_target_state(pdev) : PCI_D3hot;
13368 err = tg3_set_power_state(tp, target_state);
13369 if (err) {
13370 int err2;
13372 tg3_full_lock(tp, 0);
13374 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
13375 err2 = tg3_restart_hw(tp, 1);
13376 if (err2)
13377 goto out;
13379 tp->timer.expires = jiffies + tp->timer_offset;
13380 add_timer(&tp->timer);
13382 netif_device_attach(dev);
13383 tg3_netif_start(tp);
13385 out:
13386 tg3_full_unlock(tp);
13388 if (!err2)
13389 tg3_phy_start(tp);
13392 return err;
13395 static int tg3_resume(struct pci_dev *pdev)
13397 struct net_device *dev = pci_get_drvdata(pdev);
13398 struct tg3 *tp = netdev_priv(dev);
13399 int err;
13401 pci_restore_state(tp->pdev);
13403 if (!netif_running(dev))
13404 return 0;
13406 err = tg3_set_power_state(tp, PCI_D0);
13407 if (err)
13408 return err;
13410 netif_device_attach(dev);
13412 tg3_full_lock(tp, 0);
13414 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
13415 err = tg3_restart_hw(tp, 1);
13416 if (err)
13417 goto out;
13419 tp->timer.expires = jiffies + tp->timer_offset;
13420 add_timer(&tp->timer);
13422 tg3_netif_start(tp);
13424 out:
13425 tg3_full_unlock(tp);
13427 if (!err)
13428 tg3_phy_start(tp);
13430 return err;
13433 static struct pci_driver tg3_driver = {
13434 .name = DRV_MODULE_NAME,
13435 .id_table = tg3_pci_tbl,
13436 .probe = tg3_init_one,
13437 .remove = __devexit_p(tg3_remove_one),
13438 .suspend = tg3_suspend,
13439 .resume = tg3_resume
13442 static int __init tg3_init(void)
13444 return pci_register_driver(&tg3_driver);
13447 static void __exit tg3_cleanup(void)
13449 pci_unregister_driver(&tg3_driver);
13452 module_init(tg3_init);
13453 module_exit(tg3_cleanup);