Merge branch 'mini2440-dev-unlikely' into mini2440-dev
[linux-2.6/mini2440.git] / drivers / net / igb / igb_ethtool.c
blobb243ed3b0c3620de498982636240573138f0217b
1 /*******************************************************************************
3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2009 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *******************************************************************************/
28 /* ethtool support for igb */
30 #include <linux/vmalloc.h>
31 #include <linux/netdevice.h>
32 #include <linux/pci.h>
33 #include <linux/delay.h>
34 #include <linux/interrupt.h>
35 #include <linux/if_ether.h>
36 #include <linux/ethtool.h>
37 #include <linux/sched.h>
39 #include "igb.h"
41 struct igb_stats {
42 char stat_string[ETH_GSTRING_LEN];
43 int sizeof_stat;
44 int stat_offset;
47 #define IGB_STAT(m) FIELD_SIZEOF(struct igb_adapter, m), \
48 offsetof(struct igb_adapter, m)
49 static const struct igb_stats igb_gstrings_stats[] = {
50 { "rx_packets", IGB_STAT(stats.gprc) },
51 { "tx_packets", IGB_STAT(stats.gptc) },
52 { "rx_bytes", IGB_STAT(stats.gorc) },
53 { "tx_bytes", IGB_STAT(stats.gotc) },
54 { "rx_broadcast", IGB_STAT(stats.bprc) },
55 { "tx_broadcast", IGB_STAT(stats.bptc) },
56 { "rx_multicast", IGB_STAT(stats.mprc) },
57 { "tx_multicast", IGB_STAT(stats.mptc) },
58 { "rx_errors", IGB_STAT(net_stats.rx_errors) },
59 { "tx_errors", IGB_STAT(net_stats.tx_errors) },
60 { "tx_dropped", IGB_STAT(net_stats.tx_dropped) },
61 { "multicast", IGB_STAT(stats.mprc) },
62 { "collisions", IGB_STAT(stats.colc) },
63 { "rx_length_errors", IGB_STAT(net_stats.rx_length_errors) },
64 { "rx_over_errors", IGB_STAT(net_stats.rx_over_errors) },
65 { "rx_crc_errors", IGB_STAT(stats.crcerrs) },
66 { "rx_frame_errors", IGB_STAT(net_stats.rx_frame_errors) },
67 { "rx_no_buffer_count", IGB_STAT(stats.rnbc) },
68 { "rx_queue_drop_packet_count", IGB_STAT(net_stats.rx_fifo_errors) },
69 { "rx_missed_errors", IGB_STAT(stats.mpc) },
70 { "tx_aborted_errors", IGB_STAT(stats.ecol) },
71 { "tx_carrier_errors", IGB_STAT(stats.tncrs) },
72 { "tx_fifo_errors", IGB_STAT(net_stats.tx_fifo_errors) },
73 { "tx_heartbeat_errors", IGB_STAT(net_stats.tx_heartbeat_errors) },
74 { "tx_window_errors", IGB_STAT(stats.latecol) },
75 { "tx_abort_late_coll", IGB_STAT(stats.latecol) },
76 { "tx_deferred_ok", IGB_STAT(stats.dc) },
77 { "tx_single_coll_ok", IGB_STAT(stats.scc) },
78 { "tx_multi_coll_ok", IGB_STAT(stats.mcc) },
79 { "tx_timeout_count", IGB_STAT(tx_timeout_count) },
80 { "tx_restart_queue", IGB_STAT(restart_queue) },
81 { "rx_long_length_errors", IGB_STAT(stats.roc) },
82 { "rx_short_length_errors", IGB_STAT(stats.ruc) },
83 { "rx_align_errors", IGB_STAT(stats.algnerrc) },
84 { "tx_tcp_seg_good", IGB_STAT(stats.tsctc) },
85 { "tx_tcp_seg_failed", IGB_STAT(stats.tsctfc) },
86 { "rx_flow_control_xon", IGB_STAT(stats.xonrxc) },
87 { "rx_flow_control_xoff", IGB_STAT(stats.xoffrxc) },
88 { "tx_flow_control_xon", IGB_STAT(stats.xontxc) },
89 { "tx_flow_control_xoff", IGB_STAT(stats.xofftxc) },
90 { "rx_long_byte_count", IGB_STAT(stats.gorc) },
91 { "rx_csum_offload_good", IGB_STAT(hw_csum_good) },
92 { "rx_csum_offload_errors", IGB_STAT(hw_csum_err) },
93 { "tx_dma_out_of_sync", IGB_STAT(stats.doosync) },
94 { "alloc_rx_buff_failed", IGB_STAT(alloc_rx_buff_failed) },
95 { "tx_smbus", IGB_STAT(stats.mgptc) },
96 { "rx_smbus", IGB_STAT(stats.mgprc) },
97 { "dropped_smbus", IGB_STAT(stats.mgpdc) },
100 #define IGB_QUEUE_STATS_LEN \
101 (((((struct igb_adapter *)netdev_priv(netdev))->num_rx_queues)* \
102 (sizeof(struct igb_rx_queue_stats) / sizeof(u64))) + \
103 ((((struct igb_adapter *)netdev_priv(netdev))->num_tx_queues) * \
104 (sizeof(struct igb_tx_queue_stats) / sizeof(u64))))
105 #define IGB_GLOBAL_STATS_LEN \
106 sizeof(igb_gstrings_stats) / sizeof(struct igb_stats)
107 #define IGB_STATS_LEN (IGB_GLOBAL_STATS_LEN + IGB_QUEUE_STATS_LEN)
108 static const char igb_gstrings_test[][ETH_GSTRING_LEN] = {
109 "Register test (offline)", "Eeprom test (offline)",
110 "Interrupt test (offline)", "Loopback test (offline)",
111 "Link test (on/offline)"
113 #define IGB_TEST_LEN sizeof(igb_gstrings_test) / ETH_GSTRING_LEN
115 static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
117 struct igb_adapter *adapter = netdev_priv(netdev);
118 struct e1000_hw *hw = &adapter->hw;
120 if (hw->phy.media_type == e1000_media_type_copper) {
122 ecmd->supported = (SUPPORTED_10baseT_Half |
123 SUPPORTED_10baseT_Full |
124 SUPPORTED_100baseT_Half |
125 SUPPORTED_100baseT_Full |
126 SUPPORTED_1000baseT_Full|
127 SUPPORTED_Autoneg |
128 SUPPORTED_TP);
129 ecmd->advertising = ADVERTISED_TP;
131 if (hw->mac.autoneg == 1) {
132 ecmd->advertising |= ADVERTISED_Autoneg;
133 /* the e1000 autoneg seems to match ethtool nicely */
134 ecmd->advertising |= hw->phy.autoneg_advertised;
137 ecmd->port = PORT_TP;
138 ecmd->phy_address = hw->phy.addr;
139 } else {
140 ecmd->supported = (SUPPORTED_1000baseT_Full |
141 SUPPORTED_FIBRE |
142 SUPPORTED_Autoneg);
144 ecmd->advertising = (ADVERTISED_1000baseT_Full |
145 ADVERTISED_FIBRE |
146 ADVERTISED_Autoneg);
148 ecmd->port = PORT_FIBRE;
151 ecmd->transceiver = XCVR_INTERNAL;
153 if (rd32(E1000_STATUS) & E1000_STATUS_LU) {
155 adapter->hw.mac.ops.get_speed_and_duplex(hw,
156 &adapter->link_speed,
157 &adapter->link_duplex);
158 ecmd->speed = adapter->link_speed;
160 /* unfortunately FULL_DUPLEX != DUPLEX_FULL
161 * and HALF_DUPLEX != DUPLEX_HALF */
163 if (adapter->link_duplex == FULL_DUPLEX)
164 ecmd->duplex = DUPLEX_FULL;
165 else
166 ecmd->duplex = DUPLEX_HALF;
167 } else {
168 ecmd->speed = -1;
169 ecmd->duplex = -1;
172 ecmd->autoneg = hw->mac.autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
173 return 0;
176 static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
178 struct igb_adapter *adapter = netdev_priv(netdev);
179 struct e1000_hw *hw = &adapter->hw;
181 /* When SoL/IDER sessions are active, autoneg/speed/duplex
182 * cannot be changed */
183 if (igb_check_reset_block(hw)) {
184 dev_err(&adapter->pdev->dev, "Cannot change link "
185 "characteristics when SoL/IDER is active.\n");
186 return -EINVAL;
189 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
190 msleep(1);
192 if (ecmd->autoneg == AUTONEG_ENABLE) {
193 hw->mac.autoneg = 1;
194 hw->phy.autoneg_advertised = ecmd->advertising |
195 ADVERTISED_TP |
196 ADVERTISED_Autoneg;
197 ecmd->advertising = hw->phy.autoneg_advertised;
198 if (adapter->fc_autoneg)
199 hw->fc.requested_mode = e1000_fc_default;
200 } else {
201 if (igb_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex)) {
202 clear_bit(__IGB_RESETTING, &adapter->state);
203 return -EINVAL;
207 /* reset the link */
208 if (netif_running(adapter->netdev)) {
209 igb_down(adapter);
210 igb_up(adapter);
211 } else
212 igb_reset(adapter);
214 clear_bit(__IGB_RESETTING, &adapter->state);
215 return 0;
218 static void igb_get_pauseparam(struct net_device *netdev,
219 struct ethtool_pauseparam *pause)
221 struct igb_adapter *adapter = netdev_priv(netdev);
222 struct e1000_hw *hw = &adapter->hw;
224 pause->autoneg =
225 (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE);
227 if (hw->fc.current_mode == e1000_fc_rx_pause)
228 pause->rx_pause = 1;
229 else if (hw->fc.current_mode == e1000_fc_tx_pause)
230 pause->tx_pause = 1;
231 else if (hw->fc.current_mode == e1000_fc_full) {
232 pause->rx_pause = 1;
233 pause->tx_pause = 1;
237 static int igb_set_pauseparam(struct net_device *netdev,
238 struct ethtool_pauseparam *pause)
240 struct igb_adapter *adapter = netdev_priv(netdev);
241 struct e1000_hw *hw = &adapter->hw;
242 int retval = 0;
244 adapter->fc_autoneg = pause->autoneg;
246 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
247 msleep(1);
249 if (adapter->fc_autoneg == AUTONEG_ENABLE) {
250 hw->fc.requested_mode = e1000_fc_default;
251 if (netif_running(adapter->netdev)) {
252 igb_down(adapter);
253 igb_up(adapter);
254 } else
255 igb_reset(adapter);
256 } else {
257 if (pause->rx_pause && pause->tx_pause)
258 hw->fc.requested_mode = e1000_fc_full;
259 else if (pause->rx_pause && !pause->tx_pause)
260 hw->fc.requested_mode = e1000_fc_rx_pause;
261 else if (!pause->rx_pause && pause->tx_pause)
262 hw->fc.requested_mode = e1000_fc_tx_pause;
263 else if (!pause->rx_pause && !pause->tx_pause)
264 hw->fc.requested_mode = e1000_fc_none;
266 hw->fc.current_mode = hw->fc.requested_mode;
268 retval = ((hw->phy.media_type == e1000_media_type_copper) ?
269 igb_force_mac_fc(hw) : igb_setup_link(hw));
272 clear_bit(__IGB_RESETTING, &adapter->state);
273 return retval;
276 static u32 igb_get_rx_csum(struct net_device *netdev)
278 struct igb_adapter *adapter = netdev_priv(netdev);
279 return !(adapter->flags & IGB_FLAG_RX_CSUM_DISABLED);
282 static int igb_set_rx_csum(struct net_device *netdev, u32 data)
284 struct igb_adapter *adapter = netdev_priv(netdev);
286 if (data)
287 adapter->flags &= ~IGB_FLAG_RX_CSUM_DISABLED;
288 else
289 adapter->flags |= IGB_FLAG_RX_CSUM_DISABLED;
291 return 0;
294 static u32 igb_get_tx_csum(struct net_device *netdev)
296 return (netdev->features & NETIF_F_IP_CSUM) != 0;
299 static int igb_set_tx_csum(struct net_device *netdev, u32 data)
301 struct igb_adapter *adapter = netdev_priv(netdev);
303 if (data) {
304 netdev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
305 if (adapter->hw.mac.type == e1000_82576)
306 netdev->features |= NETIF_F_SCTP_CSUM;
307 } else {
308 netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
309 NETIF_F_SCTP_CSUM);
312 return 0;
315 static int igb_set_tso(struct net_device *netdev, u32 data)
317 struct igb_adapter *adapter = netdev_priv(netdev);
319 if (data) {
320 netdev->features |= NETIF_F_TSO;
321 netdev->features |= NETIF_F_TSO6;
322 } else {
323 netdev->features &= ~NETIF_F_TSO;
324 netdev->features &= ~NETIF_F_TSO6;
327 dev_info(&adapter->pdev->dev, "TSO is %s\n",
328 data ? "Enabled" : "Disabled");
329 return 0;
332 static u32 igb_get_msglevel(struct net_device *netdev)
334 struct igb_adapter *adapter = netdev_priv(netdev);
335 return adapter->msg_enable;
338 static void igb_set_msglevel(struct net_device *netdev, u32 data)
340 struct igb_adapter *adapter = netdev_priv(netdev);
341 adapter->msg_enable = data;
344 static int igb_get_regs_len(struct net_device *netdev)
346 #define IGB_REGS_LEN 551
347 return IGB_REGS_LEN * sizeof(u32);
350 static void igb_get_regs(struct net_device *netdev,
351 struct ethtool_regs *regs, void *p)
353 struct igb_adapter *adapter = netdev_priv(netdev);
354 struct e1000_hw *hw = &adapter->hw;
355 u32 *regs_buff = p;
356 u8 i;
358 memset(p, 0, IGB_REGS_LEN * sizeof(u32));
360 regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id;
362 /* General Registers */
363 regs_buff[0] = rd32(E1000_CTRL);
364 regs_buff[1] = rd32(E1000_STATUS);
365 regs_buff[2] = rd32(E1000_CTRL_EXT);
366 regs_buff[3] = rd32(E1000_MDIC);
367 regs_buff[4] = rd32(E1000_SCTL);
368 regs_buff[5] = rd32(E1000_CONNSW);
369 regs_buff[6] = rd32(E1000_VET);
370 regs_buff[7] = rd32(E1000_LEDCTL);
371 regs_buff[8] = rd32(E1000_PBA);
372 regs_buff[9] = rd32(E1000_PBS);
373 regs_buff[10] = rd32(E1000_FRTIMER);
374 regs_buff[11] = rd32(E1000_TCPTIMER);
376 /* NVM Register */
377 regs_buff[12] = rd32(E1000_EECD);
379 /* Interrupt */
380 /* Reading EICS for EICR because they read the
381 * same but EICS does not clear on read */
382 regs_buff[13] = rd32(E1000_EICS);
383 regs_buff[14] = rd32(E1000_EICS);
384 regs_buff[15] = rd32(E1000_EIMS);
385 regs_buff[16] = rd32(E1000_EIMC);
386 regs_buff[17] = rd32(E1000_EIAC);
387 regs_buff[18] = rd32(E1000_EIAM);
388 /* Reading ICS for ICR because they read the
389 * same but ICS does not clear on read */
390 regs_buff[19] = rd32(E1000_ICS);
391 regs_buff[20] = rd32(E1000_ICS);
392 regs_buff[21] = rd32(E1000_IMS);
393 regs_buff[22] = rd32(E1000_IMC);
394 regs_buff[23] = rd32(E1000_IAC);
395 regs_buff[24] = rd32(E1000_IAM);
396 regs_buff[25] = rd32(E1000_IMIRVP);
398 /* Flow Control */
399 regs_buff[26] = rd32(E1000_FCAL);
400 regs_buff[27] = rd32(E1000_FCAH);
401 regs_buff[28] = rd32(E1000_FCTTV);
402 regs_buff[29] = rd32(E1000_FCRTL);
403 regs_buff[30] = rd32(E1000_FCRTH);
404 regs_buff[31] = rd32(E1000_FCRTV);
406 /* Receive */
407 regs_buff[32] = rd32(E1000_RCTL);
408 regs_buff[33] = rd32(E1000_RXCSUM);
409 regs_buff[34] = rd32(E1000_RLPML);
410 regs_buff[35] = rd32(E1000_RFCTL);
411 regs_buff[36] = rd32(E1000_MRQC);
412 regs_buff[37] = rd32(E1000_VT_CTL);
414 /* Transmit */
415 regs_buff[38] = rd32(E1000_TCTL);
416 regs_buff[39] = rd32(E1000_TCTL_EXT);
417 regs_buff[40] = rd32(E1000_TIPG);
418 regs_buff[41] = rd32(E1000_DTXCTL);
420 /* Wake Up */
421 regs_buff[42] = rd32(E1000_WUC);
422 regs_buff[43] = rd32(E1000_WUFC);
423 regs_buff[44] = rd32(E1000_WUS);
424 regs_buff[45] = rd32(E1000_IPAV);
425 regs_buff[46] = rd32(E1000_WUPL);
427 /* MAC */
428 regs_buff[47] = rd32(E1000_PCS_CFG0);
429 regs_buff[48] = rd32(E1000_PCS_LCTL);
430 regs_buff[49] = rd32(E1000_PCS_LSTAT);
431 regs_buff[50] = rd32(E1000_PCS_ANADV);
432 regs_buff[51] = rd32(E1000_PCS_LPAB);
433 regs_buff[52] = rd32(E1000_PCS_NPTX);
434 regs_buff[53] = rd32(E1000_PCS_LPABNP);
436 /* Statistics */
437 regs_buff[54] = adapter->stats.crcerrs;
438 regs_buff[55] = adapter->stats.algnerrc;
439 regs_buff[56] = adapter->stats.symerrs;
440 regs_buff[57] = adapter->stats.rxerrc;
441 regs_buff[58] = adapter->stats.mpc;
442 regs_buff[59] = adapter->stats.scc;
443 regs_buff[60] = adapter->stats.ecol;
444 regs_buff[61] = adapter->stats.mcc;
445 regs_buff[62] = adapter->stats.latecol;
446 regs_buff[63] = adapter->stats.colc;
447 regs_buff[64] = adapter->stats.dc;
448 regs_buff[65] = adapter->stats.tncrs;
449 regs_buff[66] = adapter->stats.sec;
450 regs_buff[67] = adapter->stats.htdpmc;
451 regs_buff[68] = adapter->stats.rlec;
452 regs_buff[69] = adapter->stats.xonrxc;
453 regs_buff[70] = adapter->stats.xontxc;
454 regs_buff[71] = adapter->stats.xoffrxc;
455 regs_buff[72] = adapter->stats.xofftxc;
456 regs_buff[73] = adapter->stats.fcruc;
457 regs_buff[74] = adapter->stats.prc64;
458 regs_buff[75] = adapter->stats.prc127;
459 regs_buff[76] = adapter->stats.prc255;
460 regs_buff[77] = adapter->stats.prc511;
461 regs_buff[78] = adapter->stats.prc1023;
462 regs_buff[79] = adapter->stats.prc1522;
463 regs_buff[80] = adapter->stats.gprc;
464 regs_buff[81] = adapter->stats.bprc;
465 regs_buff[82] = adapter->stats.mprc;
466 regs_buff[83] = adapter->stats.gptc;
467 regs_buff[84] = adapter->stats.gorc;
468 regs_buff[86] = adapter->stats.gotc;
469 regs_buff[88] = adapter->stats.rnbc;
470 regs_buff[89] = adapter->stats.ruc;
471 regs_buff[90] = adapter->stats.rfc;
472 regs_buff[91] = adapter->stats.roc;
473 regs_buff[92] = adapter->stats.rjc;
474 regs_buff[93] = adapter->stats.mgprc;
475 regs_buff[94] = adapter->stats.mgpdc;
476 regs_buff[95] = adapter->stats.mgptc;
477 regs_buff[96] = adapter->stats.tor;
478 regs_buff[98] = adapter->stats.tot;
479 regs_buff[100] = adapter->stats.tpr;
480 regs_buff[101] = adapter->stats.tpt;
481 regs_buff[102] = adapter->stats.ptc64;
482 regs_buff[103] = adapter->stats.ptc127;
483 regs_buff[104] = adapter->stats.ptc255;
484 regs_buff[105] = adapter->stats.ptc511;
485 regs_buff[106] = adapter->stats.ptc1023;
486 regs_buff[107] = adapter->stats.ptc1522;
487 regs_buff[108] = adapter->stats.mptc;
488 regs_buff[109] = adapter->stats.bptc;
489 regs_buff[110] = adapter->stats.tsctc;
490 regs_buff[111] = adapter->stats.iac;
491 regs_buff[112] = adapter->stats.rpthc;
492 regs_buff[113] = adapter->stats.hgptc;
493 regs_buff[114] = adapter->stats.hgorc;
494 regs_buff[116] = adapter->stats.hgotc;
495 regs_buff[118] = adapter->stats.lenerrs;
496 regs_buff[119] = adapter->stats.scvpc;
497 regs_buff[120] = adapter->stats.hrmpc;
499 /* These should probably be added to e1000_regs.h instead */
500 #define E1000_PSRTYPE_REG(_i) (0x05480 + ((_i) * 4))
501 #define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8))
502 #define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4))
503 #define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4))
504 #define E1000_FFMT_REG(_i) (0x09000 + ((_i) * 8))
505 #define E1000_FFVT_REG(_i) (0x09800 + ((_i) * 8))
506 #define E1000_FFLT_REG(_i) (0x05F00 + ((_i) * 8))
508 for (i = 0; i < 4; i++)
509 regs_buff[121 + i] = rd32(E1000_SRRCTL(i));
510 for (i = 0; i < 4; i++)
511 regs_buff[125 + i] = rd32(E1000_PSRTYPE_REG(i));
512 for (i = 0; i < 4; i++)
513 regs_buff[129 + i] = rd32(E1000_RDBAL(i));
514 for (i = 0; i < 4; i++)
515 regs_buff[133 + i] = rd32(E1000_RDBAH(i));
516 for (i = 0; i < 4; i++)
517 regs_buff[137 + i] = rd32(E1000_RDLEN(i));
518 for (i = 0; i < 4; i++)
519 regs_buff[141 + i] = rd32(E1000_RDH(i));
520 for (i = 0; i < 4; i++)
521 regs_buff[145 + i] = rd32(E1000_RDT(i));
522 for (i = 0; i < 4; i++)
523 regs_buff[149 + i] = rd32(E1000_RXDCTL(i));
525 for (i = 0; i < 10; i++)
526 regs_buff[153 + i] = rd32(E1000_EITR(i));
527 for (i = 0; i < 8; i++)
528 regs_buff[163 + i] = rd32(E1000_IMIR(i));
529 for (i = 0; i < 8; i++)
530 regs_buff[171 + i] = rd32(E1000_IMIREXT(i));
531 for (i = 0; i < 16; i++)
532 regs_buff[179 + i] = rd32(E1000_RAL(i));
533 for (i = 0; i < 16; i++)
534 regs_buff[195 + i] = rd32(E1000_RAH(i));
536 for (i = 0; i < 4; i++)
537 regs_buff[211 + i] = rd32(E1000_TDBAL(i));
538 for (i = 0; i < 4; i++)
539 regs_buff[215 + i] = rd32(E1000_TDBAH(i));
540 for (i = 0; i < 4; i++)
541 regs_buff[219 + i] = rd32(E1000_TDLEN(i));
542 for (i = 0; i < 4; i++)
543 regs_buff[223 + i] = rd32(E1000_TDH(i));
544 for (i = 0; i < 4; i++)
545 regs_buff[227 + i] = rd32(E1000_TDT(i));
546 for (i = 0; i < 4; i++)
547 regs_buff[231 + i] = rd32(E1000_TXDCTL(i));
548 for (i = 0; i < 4; i++)
549 regs_buff[235 + i] = rd32(E1000_TDWBAL(i));
550 for (i = 0; i < 4; i++)
551 regs_buff[239 + i] = rd32(E1000_TDWBAH(i));
552 for (i = 0; i < 4; i++)
553 regs_buff[243 + i] = rd32(E1000_DCA_TXCTRL(i));
555 for (i = 0; i < 4; i++)
556 regs_buff[247 + i] = rd32(E1000_IP4AT_REG(i));
557 for (i = 0; i < 4; i++)
558 regs_buff[251 + i] = rd32(E1000_IP6AT_REG(i));
559 for (i = 0; i < 32; i++)
560 regs_buff[255 + i] = rd32(E1000_WUPM_REG(i));
561 for (i = 0; i < 128; i++)
562 regs_buff[287 + i] = rd32(E1000_FFMT_REG(i));
563 for (i = 0; i < 128; i++)
564 regs_buff[415 + i] = rd32(E1000_FFVT_REG(i));
565 for (i = 0; i < 4; i++)
566 regs_buff[543 + i] = rd32(E1000_FFLT_REG(i));
568 regs_buff[547] = rd32(E1000_TDFH);
569 regs_buff[548] = rd32(E1000_TDFT);
570 regs_buff[549] = rd32(E1000_TDFHS);
571 regs_buff[550] = rd32(E1000_TDFPC);
575 static int igb_get_eeprom_len(struct net_device *netdev)
577 struct igb_adapter *adapter = netdev_priv(netdev);
578 return adapter->hw.nvm.word_size * 2;
581 static int igb_get_eeprom(struct net_device *netdev,
582 struct ethtool_eeprom *eeprom, u8 *bytes)
584 struct igb_adapter *adapter = netdev_priv(netdev);
585 struct e1000_hw *hw = &adapter->hw;
586 u16 *eeprom_buff;
587 int first_word, last_word;
588 int ret_val = 0;
589 u16 i;
591 if (eeprom->len == 0)
592 return -EINVAL;
594 eeprom->magic = hw->vendor_id | (hw->device_id << 16);
596 first_word = eeprom->offset >> 1;
597 last_word = (eeprom->offset + eeprom->len - 1) >> 1;
599 eeprom_buff = kmalloc(sizeof(u16) *
600 (last_word - first_word + 1), GFP_KERNEL);
601 if (!eeprom_buff)
602 return -ENOMEM;
604 if (hw->nvm.type == e1000_nvm_eeprom_spi)
605 ret_val = hw->nvm.ops.read(hw, first_word,
606 last_word - first_word + 1,
607 eeprom_buff);
608 else {
609 for (i = 0; i < last_word - first_word + 1; i++) {
610 ret_val = hw->nvm.ops.read(hw, first_word + i, 1,
611 &eeprom_buff[i]);
612 if (ret_val)
613 break;
617 /* Device's eeprom is always little-endian, word addressable */
618 for (i = 0; i < last_word - first_word + 1; i++)
619 le16_to_cpus(&eeprom_buff[i]);
621 memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1),
622 eeprom->len);
623 kfree(eeprom_buff);
625 return ret_val;
628 static int igb_set_eeprom(struct net_device *netdev,
629 struct ethtool_eeprom *eeprom, u8 *bytes)
631 struct igb_adapter *adapter = netdev_priv(netdev);
632 struct e1000_hw *hw = &adapter->hw;
633 u16 *eeprom_buff;
634 void *ptr;
635 int max_len, first_word, last_word, ret_val = 0;
636 u16 i;
638 if (eeprom->len == 0)
639 return -EOPNOTSUPP;
641 if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
642 return -EFAULT;
644 max_len = hw->nvm.word_size * 2;
646 first_word = eeprom->offset >> 1;
647 last_word = (eeprom->offset + eeprom->len - 1) >> 1;
648 eeprom_buff = kmalloc(max_len, GFP_KERNEL);
649 if (!eeprom_buff)
650 return -ENOMEM;
652 ptr = (void *)eeprom_buff;
654 if (eeprom->offset & 1) {
655 /* need read/modify/write of first changed EEPROM word */
656 /* only the second byte of the word is being modified */
657 ret_val = hw->nvm.ops.read(hw, first_word, 1,
658 &eeprom_buff[0]);
659 ptr++;
661 if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) {
662 /* need read/modify/write of last changed EEPROM word */
663 /* only the first byte of the word is being modified */
664 ret_val = hw->nvm.ops.read(hw, last_word, 1,
665 &eeprom_buff[last_word - first_word]);
668 /* Device's eeprom is always little-endian, word addressable */
669 for (i = 0; i < last_word - first_word + 1; i++)
670 le16_to_cpus(&eeprom_buff[i]);
672 memcpy(ptr, bytes, eeprom->len);
674 for (i = 0; i < last_word - first_word + 1; i++)
675 eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]);
677 ret_val = hw->nvm.ops.write(hw, first_word,
678 last_word - first_word + 1, eeprom_buff);
680 /* Update the checksum over the first part of the EEPROM if needed
681 * and flush shadow RAM for 82573 controllers */
682 if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG)))
683 igb_update_nvm_checksum(hw);
685 kfree(eeprom_buff);
686 return ret_val;
689 static void igb_get_drvinfo(struct net_device *netdev,
690 struct ethtool_drvinfo *drvinfo)
692 struct igb_adapter *adapter = netdev_priv(netdev);
693 char firmware_version[32];
694 u16 eeprom_data;
696 strncpy(drvinfo->driver, igb_driver_name, 32);
697 strncpy(drvinfo->version, igb_driver_version, 32);
699 /* EEPROM image version # is reported as firmware version # for
700 * 82575 controllers */
701 adapter->hw.nvm.ops.read(&adapter->hw, 5, 1, &eeprom_data);
702 sprintf(firmware_version, "%d.%d-%d",
703 (eeprom_data & 0xF000) >> 12,
704 (eeprom_data & 0x0FF0) >> 4,
705 eeprom_data & 0x000F);
707 strncpy(drvinfo->fw_version, firmware_version, 32);
708 strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
709 drvinfo->n_stats = IGB_STATS_LEN;
710 drvinfo->testinfo_len = IGB_TEST_LEN;
711 drvinfo->regdump_len = igb_get_regs_len(netdev);
712 drvinfo->eedump_len = igb_get_eeprom_len(netdev);
715 static void igb_get_ringparam(struct net_device *netdev,
716 struct ethtool_ringparam *ring)
718 struct igb_adapter *adapter = netdev_priv(netdev);
720 ring->rx_max_pending = IGB_MAX_RXD;
721 ring->tx_max_pending = IGB_MAX_TXD;
722 ring->rx_mini_max_pending = 0;
723 ring->rx_jumbo_max_pending = 0;
724 ring->rx_pending = adapter->rx_ring_count;
725 ring->tx_pending = adapter->tx_ring_count;
726 ring->rx_mini_pending = 0;
727 ring->rx_jumbo_pending = 0;
730 static int igb_set_ringparam(struct net_device *netdev,
731 struct ethtool_ringparam *ring)
733 struct igb_adapter *adapter = netdev_priv(netdev);
734 struct igb_ring *temp_ring;
735 int i, err = 0;
736 u32 new_rx_count, new_tx_count;
738 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
739 return -EINVAL;
741 new_rx_count = max(ring->rx_pending, (u32)IGB_MIN_RXD);
742 new_rx_count = min(new_rx_count, (u32)IGB_MAX_RXD);
743 new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE);
745 new_tx_count = max(ring->tx_pending, (u32)IGB_MIN_TXD);
746 new_tx_count = min(new_tx_count, (u32)IGB_MAX_TXD);
747 new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE);
749 if ((new_tx_count == adapter->tx_ring_count) &&
750 (new_rx_count == adapter->rx_ring_count)) {
751 /* nothing to do */
752 return 0;
755 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
756 msleep(1);
758 if (!netif_running(adapter->netdev)) {
759 for (i = 0; i < adapter->num_tx_queues; i++)
760 adapter->tx_ring[i].count = new_tx_count;
761 for (i = 0; i < adapter->num_rx_queues; i++)
762 adapter->rx_ring[i].count = new_rx_count;
763 adapter->tx_ring_count = new_tx_count;
764 adapter->rx_ring_count = new_rx_count;
765 goto clear_reset;
768 if (adapter->num_tx_queues > adapter->num_rx_queues)
769 temp_ring = vmalloc(adapter->num_tx_queues * sizeof(struct igb_ring));
770 else
771 temp_ring = vmalloc(adapter->num_rx_queues * sizeof(struct igb_ring));
773 if (!temp_ring) {
774 err = -ENOMEM;
775 goto clear_reset;
778 igb_down(adapter);
781 * We can't just free everything and then setup again,
782 * because the ISRs in MSI-X mode get passed pointers
783 * to the tx and rx ring structs.
785 if (new_tx_count != adapter->tx_ring_count) {
786 memcpy(temp_ring, adapter->tx_ring,
787 adapter->num_tx_queues * sizeof(struct igb_ring));
789 for (i = 0; i < adapter->num_tx_queues; i++) {
790 temp_ring[i].count = new_tx_count;
791 err = igb_setup_tx_resources(adapter, &temp_ring[i]);
792 if (err) {
793 while (i) {
794 i--;
795 igb_free_tx_resources(&temp_ring[i]);
797 goto err_setup;
801 for (i = 0; i < adapter->num_tx_queues; i++)
802 igb_free_tx_resources(&adapter->tx_ring[i]);
804 memcpy(adapter->tx_ring, temp_ring,
805 adapter->num_tx_queues * sizeof(struct igb_ring));
807 adapter->tx_ring_count = new_tx_count;
810 if (new_rx_count != adapter->rx_ring->count) {
811 memcpy(temp_ring, adapter->rx_ring,
812 adapter->num_rx_queues * sizeof(struct igb_ring));
814 for (i = 0; i < adapter->num_rx_queues; i++) {
815 temp_ring[i].count = new_rx_count;
816 err = igb_setup_rx_resources(adapter, &temp_ring[i]);
817 if (err) {
818 while (i) {
819 i--;
820 igb_free_rx_resources(&temp_ring[i]);
822 goto err_setup;
827 for (i = 0; i < adapter->num_rx_queues; i++)
828 igb_free_rx_resources(&adapter->rx_ring[i]);
830 memcpy(adapter->rx_ring, temp_ring,
831 adapter->num_rx_queues * sizeof(struct igb_ring));
833 adapter->rx_ring_count = new_rx_count;
835 err_setup:
836 igb_up(adapter);
837 vfree(temp_ring);
838 clear_reset:
839 clear_bit(__IGB_RESETTING, &adapter->state);
840 return err;
843 /* ethtool register test data */
844 struct igb_reg_test {
845 u16 reg;
846 u16 reg_offset;
847 u16 array_len;
848 u16 test_type;
849 u32 mask;
850 u32 write;
853 /* In the hardware, registers are laid out either singly, in arrays
854 * spaced 0x100 bytes apart, or in contiguous tables. We assume
855 * most tests take place on arrays or single registers (handled
856 * as a single-element array) and special-case the tables.
857 * Table tests are always pattern tests.
859 * We also make provision for some required setup steps by specifying
860 * registers to be written without any read-back testing.
863 #define PATTERN_TEST 1
864 #define SET_READ_TEST 2
865 #define WRITE_NO_TEST 3
866 #define TABLE32_TEST 4
867 #define TABLE64_TEST_LO 5
868 #define TABLE64_TEST_HI 6
870 /* 82576 reg test */
871 static struct igb_reg_test reg_test_82576[] = {
872 { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
873 { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
874 { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
875 { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
876 { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
877 { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
878 { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
879 { E1000_RDBAL(4), 0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
880 { E1000_RDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
881 { E1000_RDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
882 /* Enable all RX queues before testing. */
883 { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
884 { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
885 /* RDH is read-only for 82576, only test RDT. */
886 { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
887 { E1000_RDT(4), 0x40, 12, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
888 { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 },
889 { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, 0 },
890 { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
891 { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
892 { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
893 { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
894 { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
895 { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
896 { E1000_TDBAL(4), 0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
897 { E1000_TDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
898 { E1000_TDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
899 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
900 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
901 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
902 { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
903 { E1000_RA, 0, 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
904 { E1000_RA, 0, 16, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF },
905 { E1000_RA2, 0, 8, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
906 { E1000_RA2, 0, 8, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF },
907 { E1000_MTA, 0, 128,TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
908 { 0, 0, 0, 0 }
911 /* 82575 register test */
912 static struct igb_reg_test reg_test_82575[] = {
913 { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
914 { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
915 { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
916 { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
917 { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
918 { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
919 { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
920 /* Enable all four RX queues before testing. */
921 { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
922 /* RDH is read-only for 82575, only test RDT. */
923 { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
924 { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 },
925 { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
926 { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
927 { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
928 { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
929 { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
930 { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
931 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
932 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB3FE, 0x003FFFFB },
933 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB3FE, 0xFFFFFFFF },
934 { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
935 { E1000_TXCW, 0x100, 1, PATTERN_TEST, 0xC000FFFF, 0x0000FFFF },
936 { E1000_RA, 0, 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
937 { E1000_RA, 0, 16, TABLE64_TEST_HI, 0x800FFFFF, 0xFFFFFFFF },
938 { E1000_MTA, 0, 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
939 { 0, 0, 0, 0 }
942 static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data,
943 int reg, u32 mask, u32 write)
945 struct e1000_hw *hw = &adapter->hw;
946 u32 pat, val;
947 u32 _test[] =
948 {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
949 for (pat = 0; pat < ARRAY_SIZE(_test); pat++) {
950 wr32(reg, (_test[pat] & write));
951 val = rd32(reg);
952 if (val != (_test[pat] & write & mask)) {
953 dev_err(&adapter->pdev->dev, "pattern test reg %04X "
954 "failed: got 0x%08X expected 0x%08X\n",
955 reg, val, (_test[pat] & write & mask));
956 *data = reg;
957 return 1;
960 return 0;
963 static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data,
964 int reg, u32 mask, u32 write)
966 struct e1000_hw *hw = &adapter->hw;
967 u32 val;
968 wr32(reg, write & mask);
969 val = rd32(reg);
970 if ((write & mask) != (val & mask)) {
971 dev_err(&adapter->pdev->dev, "set/check reg %04X test failed:"
972 " got 0x%08X expected 0x%08X\n", reg,
973 (val & mask), (write & mask));
974 *data = reg;
975 return 1;
977 return 0;
980 #define REG_PATTERN_TEST(reg, mask, write) \
981 do { \
982 if (reg_pattern_test(adapter, data, reg, mask, write)) \
983 return 1; \
984 } while (0)
986 #define REG_SET_AND_CHECK(reg, mask, write) \
987 do { \
988 if (reg_set_and_check(adapter, data, reg, mask, write)) \
989 return 1; \
990 } while (0)
992 static int igb_reg_test(struct igb_adapter *adapter, u64 *data)
994 struct e1000_hw *hw = &adapter->hw;
995 struct igb_reg_test *test;
996 u32 value, before, after;
997 u32 i, toggle;
999 toggle = 0x7FFFF3FF;
1001 switch (adapter->hw.mac.type) {
1002 case e1000_82576:
1003 test = reg_test_82576;
1004 break;
1005 default:
1006 test = reg_test_82575;
1007 break;
1010 /* Because the status register is such a special case,
1011 * we handle it separately from the rest of the register
1012 * tests. Some bits are read-only, some toggle, and some
1013 * are writable on newer MACs.
1015 before = rd32(E1000_STATUS);
1016 value = (rd32(E1000_STATUS) & toggle);
1017 wr32(E1000_STATUS, toggle);
1018 after = rd32(E1000_STATUS) & toggle;
1019 if (value != after) {
1020 dev_err(&adapter->pdev->dev, "failed STATUS register test "
1021 "got: 0x%08X expected: 0x%08X\n", after, value);
1022 *data = 1;
1023 return 1;
1025 /* restore previous status */
1026 wr32(E1000_STATUS, before);
1028 /* Perform the remainder of the register test, looping through
1029 * the test table until we either fail or reach the null entry.
1031 while (test->reg) {
1032 for (i = 0; i < test->array_len; i++) {
1033 switch (test->test_type) {
1034 case PATTERN_TEST:
1035 REG_PATTERN_TEST(test->reg +
1036 (i * test->reg_offset),
1037 test->mask,
1038 test->write);
1039 break;
1040 case SET_READ_TEST:
1041 REG_SET_AND_CHECK(test->reg +
1042 (i * test->reg_offset),
1043 test->mask,
1044 test->write);
1045 break;
1046 case WRITE_NO_TEST:
1047 writel(test->write,
1048 (adapter->hw.hw_addr + test->reg)
1049 + (i * test->reg_offset));
1050 break;
1051 case TABLE32_TEST:
1052 REG_PATTERN_TEST(test->reg + (i * 4),
1053 test->mask,
1054 test->write);
1055 break;
1056 case TABLE64_TEST_LO:
1057 REG_PATTERN_TEST(test->reg + (i * 8),
1058 test->mask,
1059 test->write);
1060 break;
1061 case TABLE64_TEST_HI:
1062 REG_PATTERN_TEST((test->reg + 4) + (i * 8),
1063 test->mask,
1064 test->write);
1065 break;
1068 test++;
1071 *data = 0;
1072 return 0;
1075 static int igb_eeprom_test(struct igb_adapter *adapter, u64 *data)
1077 u16 temp;
1078 u16 checksum = 0;
1079 u16 i;
1081 *data = 0;
1082 /* Read and add up the contents of the EEPROM */
1083 for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
1084 if ((adapter->hw.nvm.ops.read(&adapter->hw, i, 1, &temp))
1085 < 0) {
1086 *data = 1;
1087 break;
1089 checksum += temp;
1092 /* If Checksum is not Correct return error else test passed */
1093 if ((checksum != (u16) NVM_SUM) && !(*data))
1094 *data = 2;
1096 return *data;
1099 static irqreturn_t igb_test_intr(int irq, void *data)
1101 struct net_device *netdev = (struct net_device *) data;
1102 struct igb_adapter *adapter = netdev_priv(netdev);
1103 struct e1000_hw *hw = &adapter->hw;
1105 adapter->test_icr |= rd32(E1000_ICR);
1107 return IRQ_HANDLED;
1110 static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
1112 struct e1000_hw *hw = &adapter->hw;
1113 struct net_device *netdev = adapter->netdev;
1114 u32 mask, ics_mask, i = 0, shared_int = true;
1115 u32 irq = adapter->pdev->irq;
1117 *data = 0;
1119 /* Hook up test interrupt handler just for this test */
1120 if (adapter->msix_entries)
1121 /* NOTE: we don't test MSI-X interrupts here, yet */
1122 return 0;
1124 if (adapter->flags & IGB_FLAG_HAS_MSI) {
1125 shared_int = false;
1126 if (request_irq(irq, &igb_test_intr, 0, netdev->name, netdev)) {
1127 *data = 1;
1128 return -1;
1130 } else if (!request_irq(irq, &igb_test_intr, IRQF_PROBE_SHARED,
1131 netdev->name, netdev)) {
1132 shared_int = false;
1133 } else if (request_irq(irq, &igb_test_intr, IRQF_SHARED,
1134 netdev->name, netdev)) {
1135 *data = 1;
1136 return -1;
1138 dev_info(&adapter->pdev->dev, "testing %s interrupt\n",
1139 (shared_int ? "shared" : "unshared"));
1140 /* Disable all the interrupts */
1141 wr32(E1000_IMC, 0xFFFFFFFF);
1142 msleep(10);
1144 /* Define all writable bits for ICS */
1145 switch(hw->mac.type) {
1146 case e1000_82575:
1147 ics_mask = 0x37F47EDD;
1148 break;
1149 case e1000_82576:
1150 ics_mask = 0x77D4FBFD;
1151 break;
1152 default:
1153 ics_mask = 0x7FFFFFFF;
1154 break;
1157 /* Test each interrupt */
1158 for (; i < 31; i++) {
1159 /* Interrupt to test */
1160 mask = 1 << i;
1162 if (!(mask & ics_mask))
1163 continue;
1165 if (!shared_int) {
1166 /* Disable the interrupt to be reported in
1167 * the cause register and then force the same
1168 * interrupt and see if one gets posted. If
1169 * an interrupt was posted to the bus, the
1170 * test failed.
1172 adapter->test_icr = 0;
1174 /* Flush any pending interrupts */
1175 wr32(E1000_ICR, ~0);
1177 wr32(E1000_IMC, mask);
1178 wr32(E1000_ICS, mask);
1179 msleep(10);
1181 if (adapter->test_icr & mask) {
1182 *data = 3;
1183 break;
1187 /* Enable the interrupt to be reported in
1188 * the cause register and then force the same
1189 * interrupt and see if one gets posted. If
1190 * an interrupt was not posted to the bus, the
1191 * test failed.
1193 adapter->test_icr = 0;
1195 /* Flush any pending interrupts */
1196 wr32(E1000_ICR, ~0);
1198 wr32(E1000_IMS, mask);
1199 wr32(E1000_ICS, mask);
1200 msleep(10);
1202 if (!(adapter->test_icr & mask)) {
1203 *data = 4;
1204 break;
1207 if (!shared_int) {
1208 /* Disable the other interrupts to be reported in
1209 * the cause register and then force the other
1210 * interrupts and see if any get posted. If
1211 * an interrupt was posted to the bus, the
1212 * test failed.
1214 adapter->test_icr = 0;
1216 /* Flush any pending interrupts */
1217 wr32(E1000_ICR, ~0);
1219 wr32(E1000_IMC, ~mask);
1220 wr32(E1000_ICS, ~mask);
1221 msleep(10);
1223 if (adapter->test_icr & mask) {
1224 *data = 5;
1225 break;
1230 /* Disable all the interrupts */
1231 wr32(E1000_IMC, ~0);
1232 msleep(10);
1234 /* Unhook test interrupt handler */
1235 free_irq(irq, netdev);
1237 return *data;
1240 static void igb_free_desc_rings(struct igb_adapter *adapter)
1242 struct igb_ring *tx_ring = &adapter->test_tx_ring;
1243 struct igb_ring *rx_ring = &adapter->test_rx_ring;
1244 struct pci_dev *pdev = adapter->pdev;
1245 int i;
1247 if (tx_ring->desc && tx_ring->buffer_info) {
1248 for (i = 0; i < tx_ring->count; i++) {
1249 struct igb_buffer *buf = &(tx_ring->buffer_info[i]);
1250 if (buf->dma)
1251 pci_unmap_single(pdev, buf->dma, buf->length,
1252 PCI_DMA_TODEVICE);
1253 if (buf->skb)
1254 dev_kfree_skb(buf->skb);
1258 if (rx_ring->desc && rx_ring->buffer_info) {
1259 for (i = 0; i < rx_ring->count; i++) {
1260 struct igb_buffer *buf = &(rx_ring->buffer_info[i]);
1261 if (buf->dma)
1262 pci_unmap_single(pdev, buf->dma,
1263 IGB_RXBUFFER_2048,
1264 PCI_DMA_FROMDEVICE);
1265 if (buf->skb)
1266 dev_kfree_skb(buf->skb);
1270 if (tx_ring->desc) {
1271 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc,
1272 tx_ring->dma);
1273 tx_ring->desc = NULL;
1275 if (rx_ring->desc) {
1276 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc,
1277 rx_ring->dma);
1278 rx_ring->desc = NULL;
1281 kfree(tx_ring->buffer_info);
1282 tx_ring->buffer_info = NULL;
1283 kfree(rx_ring->buffer_info);
1284 rx_ring->buffer_info = NULL;
1286 return;
1289 static int igb_setup_desc_rings(struct igb_adapter *adapter)
1291 struct e1000_hw *hw = &adapter->hw;
1292 struct igb_ring *tx_ring = &adapter->test_tx_ring;
1293 struct igb_ring *rx_ring = &adapter->test_rx_ring;
1294 struct pci_dev *pdev = adapter->pdev;
1295 struct igb_buffer *buffer_info;
1296 u32 rctl;
1297 int i, ret_val;
1299 /* Setup Tx descriptor ring and Tx buffers */
1301 if (!tx_ring->count)
1302 tx_ring->count = IGB_DEFAULT_TXD;
1304 tx_ring->buffer_info = kcalloc(tx_ring->count,
1305 sizeof(struct igb_buffer),
1306 GFP_KERNEL);
1307 if (!tx_ring->buffer_info) {
1308 ret_val = 1;
1309 goto err_nomem;
1312 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
1313 tx_ring->size = ALIGN(tx_ring->size, 4096);
1314 tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
1315 &tx_ring->dma);
1316 if (!tx_ring->desc) {
1317 ret_val = 2;
1318 goto err_nomem;
1320 tx_ring->next_to_use = tx_ring->next_to_clean = 0;
1322 wr32(E1000_TDBAL(0),
1323 ((u64) tx_ring->dma & 0x00000000FFFFFFFF));
1324 wr32(E1000_TDBAH(0), ((u64) tx_ring->dma >> 32));
1325 wr32(E1000_TDLEN(0),
1326 tx_ring->count * sizeof(union e1000_adv_tx_desc));
1327 wr32(E1000_TDH(0), 0);
1328 wr32(E1000_TDT(0), 0);
1329 wr32(E1000_TCTL,
1330 E1000_TCTL_PSP | E1000_TCTL_EN |
1331 E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT |
1332 E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT);
1334 for (i = 0; i < tx_ring->count; i++) {
1335 union e1000_adv_tx_desc *tx_desc;
1336 struct sk_buff *skb;
1337 unsigned int size = 1024;
1339 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
1340 skb = alloc_skb(size, GFP_KERNEL);
1341 if (!skb) {
1342 ret_val = 3;
1343 goto err_nomem;
1345 skb_put(skb, size);
1346 buffer_info = &tx_ring->buffer_info[i];
1347 buffer_info->skb = skb;
1348 buffer_info->length = skb->len;
1349 buffer_info->dma = pci_map_single(pdev, skb->data, skb->len,
1350 PCI_DMA_TODEVICE);
1351 tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
1352 tx_desc->read.olinfo_status = cpu_to_le32(skb->len) <<
1353 E1000_ADVTXD_PAYLEN_SHIFT;
1354 tx_desc->read.cmd_type_len = cpu_to_le32(skb->len);
1355 tx_desc->read.cmd_type_len |= cpu_to_le32(E1000_TXD_CMD_EOP |
1356 E1000_TXD_CMD_IFCS |
1357 E1000_TXD_CMD_RS |
1358 E1000_ADVTXD_DTYP_DATA |
1359 E1000_ADVTXD_DCMD_DEXT);
1362 /* Setup Rx descriptor ring and Rx buffers */
1364 if (!rx_ring->count)
1365 rx_ring->count = IGB_DEFAULT_RXD;
1367 rx_ring->buffer_info = kcalloc(rx_ring->count,
1368 sizeof(struct igb_buffer),
1369 GFP_KERNEL);
1370 if (!rx_ring->buffer_info) {
1371 ret_val = 4;
1372 goto err_nomem;
1375 rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc);
1376 rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
1377 &rx_ring->dma);
1378 if (!rx_ring->desc) {
1379 ret_val = 5;
1380 goto err_nomem;
1382 rx_ring->next_to_use = rx_ring->next_to_clean = 0;
1384 rctl = rd32(E1000_RCTL);
1385 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
1386 wr32(E1000_RDBAL(0),
1387 ((u64) rx_ring->dma & 0xFFFFFFFF));
1388 wr32(E1000_RDBAH(0),
1389 ((u64) rx_ring->dma >> 32));
1390 wr32(E1000_RDLEN(0), rx_ring->size);
1391 wr32(E1000_RDH(0), 0);
1392 wr32(E1000_RDT(0), 0);
1393 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
1394 rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
1395 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
1396 wr32(E1000_RCTL, rctl);
1397 wr32(E1000_SRRCTL(0), E1000_SRRCTL_DESCTYPE_ADV_ONEBUF);
1399 for (i = 0; i < rx_ring->count; i++) {
1400 union e1000_adv_rx_desc *rx_desc;
1401 struct sk_buff *skb;
1403 buffer_info = &rx_ring->buffer_info[i];
1404 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
1405 skb = alloc_skb(IGB_RXBUFFER_2048 + NET_IP_ALIGN,
1406 GFP_KERNEL);
1407 if (!skb) {
1408 ret_val = 6;
1409 goto err_nomem;
1411 skb_reserve(skb, NET_IP_ALIGN);
1412 buffer_info->skb = skb;
1413 buffer_info->dma = pci_map_single(pdev, skb->data,
1414 IGB_RXBUFFER_2048,
1415 PCI_DMA_FROMDEVICE);
1416 rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->dma);
1417 memset(skb->data, 0x00, skb->len);
1420 return 0;
1422 err_nomem:
1423 igb_free_desc_rings(adapter);
1424 return ret_val;
1427 static void igb_phy_disable_receiver(struct igb_adapter *adapter)
1429 struct e1000_hw *hw = &adapter->hw;
1431 /* Write out to PHY registers 29 and 30 to disable the Receiver. */
1432 igb_write_phy_reg(hw, 29, 0x001F);
1433 igb_write_phy_reg(hw, 30, 0x8FFC);
1434 igb_write_phy_reg(hw, 29, 0x001A);
1435 igb_write_phy_reg(hw, 30, 0x8FF0);
1438 static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
1440 struct e1000_hw *hw = &adapter->hw;
1441 u32 ctrl_reg = 0;
1443 hw->mac.autoneg = false;
1445 if (hw->phy.type == e1000_phy_m88) {
1446 /* Auto-MDI/MDIX Off */
1447 igb_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808);
1448 /* reset to update Auto-MDI/MDIX */
1449 igb_write_phy_reg(hw, PHY_CONTROL, 0x9140);
1450 /* autoneg off */
1451 igb_write_phy_reg(hw, PHY_CONTROL, 0x8140);
1454 ctrl_reg = rd32(E1000_CTRL);
1456 /* force 1000, set loopback */
1457 igb_write_phy_reg(hw, PHY_CONTROL, 0x4140);
1459 /* Now set up the MAC to the same speed/duplex as the PHY. */
1460 ctrl_reg = rd32(E1000_CTRL);
1461 ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
1462 ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
1463 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
1464 E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
1465 E1000_CTRL_FD | /* Force Duplex to FULL */
1466 E1000_CTRL_SLU); /* Set link up enable bit */
1468 if (hw->phy.type == e1000_phy_m88)
1469 ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
1471 wr32(E1000_CTRL, ctrl_reg);
1473 /* Disable the receiver on the PHY so when a cable is plugged in, the
1474 * PHY does not begin to autoneg when a cable is reconnected to the NIC.
1476 if (hw->phy.type == e1000_phy_m88)
1477 igb_phy_disable_receiver(adapter);
1479 udelay(500);
1481 return 0;
1484 static int igb_set_phy_loopback(struct igb_adapter *adapter)
1486 return igb_integrated_phy_loopback(adapter);
1489 static int igb_setup_loopback_test(struct igb_adapter *adapter)
1491 struct e1000_hw *hw = &adapter->hw;
1492 u32 reg;
1494 if (hw->phy.media_type == e1000_media_type_internal_serdes) {
1495 reg = rd32(E1000_RCTL);
1496 reg |= E1000_RCTL_LBM_TCVR;
1497 wr32(E1000_RCTL, reg);
1499 wr32(E1000_SCTL, E1000_ENABLE_SERDES_LOOPBACK);
1501 reg = rd32(E1000_CTRL);
1502 reg &= ~(E1000_CTRL_RFCE |
1503 E1000_CTRL_TFCE |
1504 E1000_CTRL_LRST);
1505 reg |= E1000_CTRL_SLU |
1506 E1000_CTRL_FD;
1507 wr32(E1000_CTRL, reg);
1509 /* Unset switch control to serdes energy detect */
1510 reg = rd32(E1000_CONNSW);
1511 reg &= ~E1000_CONNSW_ENRGSRC;
1512 wr32(E1000_CONNSW, reg);
1514 /* Set PCS register for forced speed */
1515 reg = rd32(E1000_PCS_LCTL);
1516 reg &= ~E1000_PCS_LCTL_AN_ENABLE; /* Disable Autoneg*/
1517 reg |= E1000_PCS_LCTL_FLV_LINK_UP | /* Force link up */
1518 E1000_PCS_LCTL_FSV_1000 | /* Force 1000 */
1519 E1000_PCS_LCTL_FDV_FULL | /* SerDes Full duplex */
1520 E1000_PCS_LCTL_FSD | /* Force Speed */
1521 E1000_PCS_LCTL_FORCE_LINK; /* Force Link */
1522 wr32(E1000_PCS_LCTL, reg);
1524 return 0;
1525 } else if (hw->phy.media_type == e1000_media_type_copper) {
1526 return igb_set_phy_loopback(adapter);
1529 return 7;
1532 static void igb_loopback_cleanup(struct igb_adapter *adapter)
1534 struct e1000_hw *hw = &adapter->hw;
1535 u32 rctl;
1536 u16 phy_reg;
1538 rctl = rd32(E1000_RCTL);
1539 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
1540 wr32(E1000_RCTL, rctl);
1542 hw->mac.autoneg = true;
1543 igb_read_phy_reg(hw, PHY_CONTROL, &phy_reg);
1544 if (phy_reg & MII_CR_LOOPBACK) {
1545 phy_reg &= ~MII_CR_LOOPBACK;
1546 igb_write_phy_reg(hw, PHY_CONTROL, phy_reg);
1547 igb_phy_sw_reset(hw);
1551 static void igb_create_lbtest_frame(struct sk_buff *skb,
1552 unsigned int frame_size)
1554 memset(skb->data, 0xFF, frame_size);
1555 frame_size &= ~1;
1556 memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
1557 memset(&skb->data[frame_size / 2 + 10], 0xBE, 1);
1558 memset(&skb->data[frame_size / 2 + 12], 0xAF, 1);
1561 static int igb_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
1563 frame_size &= ~1;
1564 if (*(skb->data + 3) == 0xFF)
1565 if ((*(skb->data + frame_size / 2 + 10) == 0xBE) &&
1566 (*(skb->data + frame_size / 2 + 12) == 0xAF))
1567 return 0;
1568 return 13;
1571 static int igb_run_loopback_test(struct igb_adapter *adapter)
1573 struct e1000_hw *hw = &adapter->hw;
1574 struct igb_ring *tx_ring = &adapter->test_tx_ring;
1575 struct igb_ring *rx_ring = &adapter->test_rx_ring;
1576 struct pci_dev *pdev = adapter->pdev;
1577 int i, j, k, l, lc, good_cnt;
1578 int ret_val = 0;
1579 unsigned long time;
1581 wr32(E1000_RDT(0), rx_ring->count - 1);
1583 /* Calculate the loop count based on the largest descriptor ring
1584 * The idea is to wrap the largest ring a number of times using 64
1585 * send/receive pairs during each loop
1588 if (rx_ring->count <= tx_ring->count)
1589 lc = ((tx_ring->count / 64) * 2) + 1;
1590 else
1591 lc = ((rx_ring->count / 64) * 2) + 1;
1593 k = l = 0;
1594 for (j = 0; j <= lc; j++) { /* loop count loop */
1595 for (i = 0; i < 64; i++) { /* send the packets */
1596 igb_create_lbtest_frame(tx_ring->buffer_info[k].skb,
1597 1024);
1598 pci_dma_sync_single_for_device(pdev,
1599 tx_ring->buffer_info[k].dma,
1600 tx_ring->buffer_info[k].length,
1601 PCI_DMA_TODEVICE);
1602 k++;
1603 if (k == tx_ring->count)
1604 k = 0;
1606 wr32(E1000_TDT(0), k);
1607 msleep(200);
1608 time = jiffies; /* set the start time for the receive */
1609 good_cnt = 0;
1610 do { /* receive the sent packets */
1611 pci_dma_sync_single_for_cpu(pdev,
1612 rx_ring->buffer_info[l].dma,
1613 IGB_RXBUFFER_2048,
1614 PCI_DMA_FROMDEVICE);
1616 ret_val = igb_check_lbtest_frame(
1617 rx_ring->buffer_info[l].skb, 1024);
1618 if (!ret_val)
1619 good_cnt++;
1620 l++;
1621 if (l == rx_ring->count)
1622 l = 0;
1623 /* time + 20 msecs (200 msecs on 2.4) is more than
1624 * enough time to complete the receives, if it's
1625 * exceeded, break and error off
1627 } while (good_cnt < 64 && jiffies < (time + 20));
1628 if (good_cnt != 64) {
1629 ret_val = 13; /* ret_val is the same as mis-compare */
1630 break;
1632 if (jiffies >= (time + 20)) {
1633 ret_val = 14; /* error code for time out error */
1634 break;
1636 } /* end loop count loop */
1637 return ret_val;
1640 static int igb_loopback_test(struct igb_adapter *adapter, u64 *data)
1642 /* PHY loopback cannot be performed if SoL/IDER
1643 * sessions are active */
1644 if (igb_check_reset_block(&adapter->hw)) {
1645 dev_err(&adapter->pdev->dev,
1646 "Cannot do PHY loopback test "
1647 "when SoL/IDER is active.\n");
1648 *data = 0;
1649 goto out;
1651 *data = igb_setup_desc_rings(adapter);
1652 if (*data)
1653 goto out;
1654 *data = igb_setup_loopback_test(adapter);
1655 if (*data)
1656 goto err_loopback;
1657 *data = igb_run_loopback_test(adapter);
1658 igb_loopback_cleanup(adapter);
1660 err_loopback:
1661 igb_free_desc_rings(adapter);
1662 out:
1663 return *data;
1666 static int igb_link_test(struct igb_adapter *adapter, u64 *data)
1668 struct e1000_hw *hw = &adapter->hw;
1669 *data = 0;
1670 if (hw->phy.media_type == e1000_media_type_internal_serdes) {
1671 int i = 0;
1672 hw->mac.serdes_has_link = false;
1674 /* On some blade server designs, link establishment
1675 * could take as long as 2-3 minutes */
1676 do {
1677 hw->mac.ops.check_for_link(&adapter->hw);
1678 if (hw->mac.serdes_has_link)
1679 return *data;
1680 msleep(20);
1681 } while (i++ < 3750);
1683 *data = 1;
1684 } else {
1685 hw->mac.ops.check_for_link(&adapter->hw);
1686 if (hw->mac.autoneg)
1687 msleep(4000);
1689 if (!(rd32(E1000_STATUS) &
1690 E1000_STATUS_LU))
1691 *data = 1;
1693 return *data;
1696 static void igb_diag_test(struct net_device *netdev,
1697 struct ethtool_test *eth_test, u64 *data)
1699 struct igb_adapter *adapter = netdev_priv(netdev);
1700 u16 autoneg_advertised;
1701 u8 forced_speed_duplex, autoneg;
1702 bool if_running = netif_running(netdev);
1704 set_bit(__IGB_TESTING, &adapter->state);
1705 if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
1706 /* Offline tests */
1708 /* save speed, duplex, autoneg settings */
1709 autoneg_advertised = adapter->hw.phy.autoneg_advertised;
1710 forced_speed_duplex = adapter->hw.mac.forced_speed_duplex;
1711 autoneg = adapter->hw.mac.autoneg;
1713 dev_info(&adapter->pdev->dev, "offline testing starting\n");
1715 /* Link test performed before hardware reset so autoneg doesn't
1716 * interfere with test result */
1717 if (igb_link_test(adapter, &data[4]))
1718 eth_test->flags |= ETH_TEST_FL_FAILED;
1720 if (if_running)
1721 /* indicate we're in test mode */
1722 dev_close(netdev);
1723 else
1724 igb_reset(adapter);
1726 if (igb_reg_test(adapter, &data[0]))
1727 eth_test->flags |= ETH_TEST_FL_FAILED;
1729 igb_reset(adapter);
1730 if (igb_eeprom_test(adapter, &data[1]))
1731 eth_test->flags |= ETH_TEST_FL_FAILED;
1733 igb_reset(adapter);
1734 if (igb_intr_test(adapter, &data[2]))
1735 eth_test->flags |= ETH_TEST_FL_FAILED;
1737 igb_reset(adapter);
1738 if (igb_loopback_test(adapter, &data[3]))
1739 eth_test->flags |= ETH_TEST_FL_FAILED;
1741 /* restore speed, duplex, autoneg settings */
1742 adapter->hw.phy.autoneg_advertised = autoneg_advertised;
1743 adapter->hw.mac.forced_speed_duplex = forced_speed_duplex;
1744 adapter->hw.mac.autoneg = autoneg;
1746 /* force this routine to wait until autoneg complete/timeout */
1747 adapter->hw.phy.autoneg_wait_to_complete = true;
1748 igb_reset(adapter);
1749 adapter->hw.phy.autoneg_wait_to_complete = false;
1751 clear_bit(__IGB_TESTING, &adapter->state);
1752 if (if_running)
1753 dev_open(netdev);
1754 } else {
1755 dev_info(&adapter->pdev->dev, "online testing starting\n");
1756 /* Online tests */
1757 if (igb_link_test(adapter, &data[4]))
1758 eth_test->flags |= ETH_TEST_FL_FAILED;
1760 /* Online tests aren't run; pass by default */
1761 data[0] = 0;
1762 data[1] = 0;
1763 data[2] = 0;
1764 data[3] = 0;
1766 clear_bit(__IGB_TESTING, &adapter->state);
1768 msleep_interruptible(4 * 1000);
1771 static int igb_wol_exclusion(struct igb_adapter *adapter,
1772 struct ethtool_wolinfo *wol)
1774 struct e1000_hw *hw = &adapter->hw;
1775 int retval = 1; /* fail by default */
1777 switch (hw->device_id) {
1778 case E1000_DEV_ID_82575GB_QUAD_COPPER:
1779 /* WoL not supported */
1780 wol->supported = 0;
1781 break;
1782 case E1000_DEV_ID_82575EB_FIBER_SERDES:
1783 case E1000_DEV_ID_82576_FIBER:
1784 case E1000_DEV_ID_82576_SERDES:
1785 /* Wake events not supported on port B */
1786 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1) {
1787 wol->supported = 0;
1788 break;
1790 /* return success for non excluded adapter ports */
1791 retval = 0;
1792 break;
1793 case E1000_DEV_ID_82576_QUAD_COPPER:
1794 /* quad port adapters only support WoL on port A */
1795 if (!(adapter->flags & IGB_FLAG_QUAD_PORT_A)) {
1796 wol->supported = 0;
1797 break;
1799 /* return success for non excluded adapter ports */
1800 retval = 0;
1801 break;
1802 default:
1803 /* dual port cards only support WoL on port A from now on
1804 * unless it was enabled in the eeprom for port B
1805 * so exclude FUNC_1 ports from having WoL enabled */
1806 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1 &&
1807 !adapter->eeprom_wol) {
1808 wol->supported = 0;
1809 break;
1812 retval = 0;
1815 return retval;
1818 static void igb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1820 struct igb_adapter *adapter = netdev_priv(netdev);
1822 wol->supported = WAKE_UCAST | WAKE_MCAST |
1823 WAKE_BCAST | WAKE_MAGIC;
1824 wol->wolopts = 0;
1826 /* this function will set ->supported = 0 and return 1 if wol is not
1827 * supported by this hardware */
1828 if (igb_wol_exclusion(adapter, wol) ||
1829 !device_can_wakeup(&adapter->pdev->dev))
1830 return;
1832 /* apply any specific unsupported masks here */
1833 switch (adapter->hw.device_id) {
1834 default:
1835 break;
1838 if (adapter->wol & E1000_WUFC_EX)
1839 wol->wolopts |= WAKE_UCAST;
1840 if (adapter->wol & E1000_WUFC_MC)
1841 wol->wolopts |= WAKE_MCAST;
1842 if (adapter->wol & E1000_WUFC_BC)
1843 wol->wolopts |= WAKE_BCAST;
1844 if (adapter->wol & E1000_WUFC_MAG)
1845 wol->wolopts |= WAKE_MAGIC;
1847 return;
1850 static int igb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1852 struct igb_adapter *adapter = netdev_priv(netdev);
1854 if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
1855 return -EOPNOTSUPP;
1857 if (igb_wol_exclusion(adapter, wol) ||
1858 !device_can_wakeup(&adapter->pdev->dev))
1859 return wol->wolopts ? -EOPNOTSUPP : 0;
1861 /* these settings will always override what we currently have */
1862 adapter->wol = 0;
1864 if (wol->wolopts & WAKE_UCAST)
1865 adapter->wol |= E1000_WUFC_EX;
1866 if (wol->wolopts & WAKE_MCAST)
1867 adapter->wol |= E1000_WUFC_MC;
1868 if (wol->wolopts & WAKE_BCAST)
1869 adapter->wol |= E1000_WUFC_BC;
1870 if (wol->wolopts & WAKE_MAGIC)
1871 adapter->wol |= E1000_WUFC_MAG;
1873 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1875 return 0;
1878 /* bit defines for adapter->led_status */
1879 #define IGB_LED_ON 0
1881 static int igb_phys_id(struct net_device *netdev, u32 data)
1883 struct igb_adapter *adapter = netdev_priv(netdev);
1884 struct e1000_hw *hw = &adapter->hw;
1886 if (!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ))
1887 data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ);
1889 igb_blink_led(hw);
1890 msleep_interruptible(data * 1000);
1892 igb_led_off(hw);
1893 clear_bit(IGB_LED_ON, &adapter->led_status);
1894 igb_cleanup_led(hw);
1896 return 0;
1899 static int igb_set_coalesce(struct net_device *netdev,
1900 struct ethtool_coalesce *ec)
1902 struct igb_adapter *adapter = netdev_priv(netdev);
1903 struct e1000_hw *hw = &adapter->hw;
1904 int i;
1906 if ((ec->rx_coalesce_usecs > IGB_MAX_ITR_USECS) ||
1907 ((ec->rx_coalesce_usecs > 3) &&
1908 (ec->rx_coalesce_usecs < IGB_MIN_ITR_USECS)) ||
1909 (ec->rx_coalesce_usecs == 2))
1910 return -EINVAL;
1912 /* convert to rate of irq's per second */
1913 if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) {
1914 adapter->itr_setting = ec->rx_coalesce_usecs;
1915 adapter->itr = IGB_START_ITR;
1916 } else {
1917 adapter->itr_setting = ec->rx_coalesce_usecs << 2;
1918 adapter->itr = adapter->itr_setting;
1921 for (i = 0; i < adapter->num_rx_queues; i++)
1922 wr32(adapter->rx_ring[i].itr_register, adapter->itr);
1924 return 0;
1927 static int igb_get_coalesce(struct net_device *netdev,
1928 struct ethtool_coalesce *ec)
1930 struct igb_adapter *adapter = netdev_priv(netdev);
1932 if (adapter->itr_setting <= 3)
1933 ec->rx_coalesce_usecs = adapter->itr_setting;
1934 else
1935 ec->rx_coalesce_usecs = adapter->itr_setting >> 2;
1937 return 0;
1941 static int igb_nway_reset(struct net_device *netdev)
1943 struct igb_adapter *adapter = netdev_priv(netdev);
1944 if (netif_running(netdev))
1945 igb_reinit_locked(adapter);
1946 return 0;
1949 static int igb_get_sset_count(struct net_device *netdev, int sset)
1951 switch (sset) {
1952 case ETH_SS_STATS:
1953 return IGB_STATS_LEN;
1954 case ETH_SS_TEST:
1955 return IGB_TEST_LEN;
1956 default:
1957 return -ENOTSUPP;
1961 static void igb_get_ethtool_stats(struct net_device *netdev,
1962 struct ethtool_stats *stats, u64 *data)
1964 struct igb_adapter *adapter = netdev_priv(netdev);
1965 u64 *queue_stat;
1966 int stat_count_tx = sizeof(struct igb_tx_queue_stats) / sizeof(u64);
1967 int stat_count_rx = sizeof(struct igb_rx_queue_stats) / sizeof(u64);
1968 int j;
1969 int i;
1971 igb_update_stats(adapter);
1972 for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) {
1973 char *p = (char *)adapter+igb_gstrings_stats[i].stat_offset;
1974 data[i] = (igb_gstrings_stats[i].sizeof_stat ==
1975 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1977 for (j = 0; j < adapter->num_tx_queues; j++) {
1978 int k;
1979 queue_stat = (u64 *)&adapter->tx_ring[j].tx_stats;
1980 for (k = 0; k < stat_count_tx; k++)
1981 data[i + k] = queue_stat[k];
1982 i += k;
1984 for (j = 0; j < adapter->num_rx_queues; j++) {
1985 int k;
1986 queue_stat = (u64 *)&adapter->rx_ring[j].rx_stats;
1987 for (k = 0; k < stat_count_rx; k++)
1988 data[i + k] = queue_stat[k];
1989 i += k;
1993 static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
1995 struct igb_adapter *adapter = netdev_priv(netdev);
1996 u8 *p = data;
1997 int i;
1999 switch (stringset) {
2000 case ETH_SS_TEST:
2001 memcpy(data, *igb_gstrings_test,
2002 IGB_TEST_LEN*ETH_GSTRING_LEN);
2003 break;
2004 case ETH_SS_STATS:
2005 for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) {
2006 memcpy(p, igb_gstrings_stats[i].stat_string,
2007 ETH_GSTRING_LEN);
2008 p += ETH_GSTRING_LEN;
2010 for (i = 0; i < adapter->num_tx_queues; i++) {
2011 sprintf(p, "tx_queue_%u_packets", i);
2012 p += ETH_GSTRING_LEN;
2013 sprintf(p, "tx_queue_%u_bytes", i);
2014 p += ETH_GSTRING_LEN;
2016 for (i = 0; i < adapter->num_rx_queues; i++) {
2017 sprintf(p, "rx_queue_%u_packets", i);
2018 p += ETH_GSTRING_LEN;
2019 sprintf(p, "rx_queue_%u_bytes", i);
2020 p += ETH_GSTRING_LEN;
2021 sprintf(p, "rx_queue_%u_drops", i);
2022 p += ETH_GSTRING_LEN;
2024 /* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */
2025 break;
2029 static const struct ethtool_ops igb_ethtool_ops = {
2030 .get_settings = igb_get_settings,
2031 .set_settings = igb_set_settings,
2032 .get_drvinfo = igb_get_drvinfo,
2033 .get_regs_len = igb_get_regs_len,
2034 .get_regs = igb_get_regs,
2035 .get_wol = igb_get_wol,
2036 .set_wol = igb_set_wol,
2037 .get_msglevel = igb_get_msglevel,
2038 .set_msglevel = igb_set_msglevel,
2039 .nway_reset = igb_nway_reset,
2040 .get_link = ethtool_op_get_link,
2041 .get_eeprom_len = igb_get_eeprom_len,
2042 .get_eeprom = igb_get_eeprom,
2043 .set_eeprom = igb_set_eeprom,
2044 .get_ringparam = igb_get_ringparam,
2045 .set_ringparam = igb_set_ringparam,
2046 .get_pauseparam = igb_get_pauseparam,
2047 .set_pauseparam = igb_set_pauseparam,
2048 .get_rx_csum = igb_get_rx_csum,
2049 .set_rx_csum = igb_set_rx_csum,
2050 .get_tx_csum = igb_get_tx_csum,
2051 .set_tx_csum = igb_set_tx_csum,
2052 .get_sg = ethtool_op_get_sg,
2053 .set_sg = ethtool_op_set_sg,
2054 .get_tso = ethtool_op_get_tso,
2055 .set_tso = igb_set_tso,
2056 .self_test = igb_diag_test,
2057 .get_strings = igb_get_strings,
2058 .phys_id = igb_phys_id,
2059 .get_sset_count = igb_get_sset_count,
2060 .get_ethtool_stats = igb_get_ethtool_stats,
2061 .get_coalesce = igb_get_coalesce,
2062 .set_coalesce = igb_set_coalesce,
2065 void igb_set_ethtool_ops(struct net_device *netdev)
2067 SET_ETHTOOL_OPS(netdev, &igb_ethtool_ops);