igb: Fix SerDes autoneg flow control.
[linux-2.6/libata-dev.git] / drivers / net / ethernet / intel / igb / e1000_82575.c
blobfdaaf2709d0ae660480e3c6525e116481a369c4c
1 /*******************************************************************************
3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2012 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *******************************************************************************/
28 /* e1000_82575
29 * e1000_82576
32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34 #include <linux/types.h>
35 #include <linux/if_ether.h>
37 #include "e1000_mac.h"
38 #include "e1000_82575.h"
39 #include "e1000_i210.h"
41 static s32 igb_get_invariants_82575(struct e1000_hw *);
42 static s32 igb_acquire_phy_82575(struct e1000_hw *);
43 static void igb_release_phy_82575(struct e1000_hw *);
44 static s32 igb_acquire_nvm_82575(struct e1000_hw *);
45 static void igb_release_nvm_82575(struct e1000_hw *);
46 static s32 igb_check_for_link_82575(struct e1000_hw *);
47 static s32 igb_get_cfg_done_82575(struct e1000_hw *);
48 static s32 igb_init_hw_82575(struct e1000_hw *);
49 static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *);
50 static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16 *);
51 static s32 igb_read_phy_reg_82580(struct e1000_hw *, u32, u16 *);
52 static s32 igb_write_phy_reg_82580(struct e1000_hw *, u32, u16);
53 static s32 igb_reset_hw_82575(struct e1000_hw *);
54 static s32 igb_reset_hw_82580(struct e1000_hw *);
55 static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *, bool);
56 static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *, bool);
57 static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *, bool);
58 static s32 igb_setup_copper_link_82575(struct e1000_hw *);
59 static s32 igb_setup_serdes_link_82575(struct e1000_hw *);
60 static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16);
61 static void igb_clear_hw_cntrs_82575(struct e1000_hw *);
62 static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *, u16);
63 static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *, u16 *,
64 u16 *);
65 static s32 igb_get_phy_id_82575(struct e1000_hw *);
66 static void igb_release_swfw_sync_82575(struct e1000_hw *, u16);
67 static bool igb_sgmii_active_82575(struct e1000_hw *);
68 static s32 igb_reset_init_script_82575(struct e1000_hw *);
69 static s32 igb_read_mac_addr_82575(struct e1000_hw *);
70 static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw);
71 static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw);
72 static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw);
73 static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw);
74 static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw);
75 static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw);
76 static const u16 e1000_82580_rxpbs_table[] =
77 { 36, 72, 144, 1, 2, 4, 8, 16,
78 35, 70, 140 };
79 #define E1000_82580_RXPBS_TABLE_SIZE \
80 (sizeof(e1000_82580_rxpbs_table)/sizeof(u16))
82 /**
83 * igb_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO
84 * @hw: pointer to the HW structure
86 * Called to determine if the I2C pins are being used for I2C or as an
87 * external MDIO interface since the two options are mutually exclusive.
88 **/
89 static bool igb_sgmii_uses_mdio_82575(struct e1000_hw *hw)
91 u32 reg = 0;
92 bool ext_mdio = false;
94 switch (hw->mac.type) {
95 case e1000_82575:
96 case e1000_82576:
97 reg = rd32(E1000_MDIC);
98 ext_mdio = !!(reg & E1000_MDIC_DEST);
99 break;
100 case e1000_82580:
101 case e1000_i350:
102 case e1000_i210:
103 case e1000_i211:
104 reg = rd32(E1000_MDICNFG);
105 ext_mdio = !!(reg & E1000_MDICNFG_EXT_MDIO);
106 break;
107 default:
108 break;
110 return ext_mdio;
113 static s32 igb_get_invariants_82575(struct e1000_hw *hw)
115 struct e1000_phy_info *phy = &hw->phy;
116 struct e1000_nvm_info *nvm = &hw->nvm;
117 struct e1000_mac_info *mac = &hw->mac;
118 struct e1000_dev_spec_82575 * dev_spec = &hw->dev_spec._82575;
119 u32 eecd;
120 s32 ret_val;
121 u16 size;
122 u32 ctrl_ext = 0;
124 switch (hw->device_id) {
125 case E1000_DEV_ID_82575EB_COPPER:
126 case E1000_DEV_ID_82575EB_FIBER_SERDES:
127 case E1000_DEV_ID_82575GB_QUAD_COPPER:
128 mac->type = e1000_82575;
129 break;
130 case E1000_DEV_ID_82576:
131 case E1000_DEV_ID_82576_NS:
132 case E1000_DEV_ID_82576_NS_SERDES:
133 case E1000_DEV_ID_82576_FIBER:
134 case E1000_DEV_ID_82576_SERDES:
135 case E1000_DEV_ID_82576_QUAD_COPPER:
136 case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
137 case E1000_DEV_ID_82576_SERDES_QUAD:
138 mac->type = e1000_82576;
139 break;
140 case E1000_DEV_ID_82580_COPPER:
141 case E1000_DEV_ID_82580_FIBER:
142 case E1000_DEV_ID_82580_QUAD_FIBER:
143 case E1000_DEV_ID_82580_SERDES:
144 case E1000_DEV_ID_82580_SGMII:
145 case E1000_DEV_ID_82580_COPPER_DUAL:
146 case E1000_DEV_ID_DH89XXCC_SGMII:
147 case E1000_DEV_ID_DH89XXCC_SERDES:
148 case E1000_DEV_ID_DH89XXCC_BACKPLANE:
149 case E1000_DEV_ID_DH89XXCC_SFP:
150 mac->type = e1000_82580;
151 break;
152 case E1000_DEV_ID_I350_COPPER:
153 case E1000_DEV_ID_I350_FIBER:
154 case E1000_DEV_ID_I350_SERDES:
155 case E1000_DEV_ID_I350_SGMII:
156 mac->type = e1000_i350;
157 break;
158 case E1000_DEV_ID_I210_COPPER:
159 case E1000_DEV_ID_I210_COPPER_OEM1:
160 case E1000_DEV_ID_I210_COPPER_IT:
161 case E1000_DEV_ID_I210_FIBER:
162 case E1000_DEV_ID_I210_SERDES:
163 case E1000_DEV_ID_I210_SGMII:
164 mac->type = e1000_i210;
165 break;
166 case E1000_DEV_ID_I211_COPPER:
167 mac->type = e1000_i211;
168 break;
169 default:
170 return -E1000_ERR_MAC_INIT;
171 break;
174 /* Set media type */
176 * The 82575 uses bits 22:23 for link mode. The mode can be changed
177 * based on the EEPROM. We cannot rely upon device ID. There
178 * is no distinguishable difference between fiber and internal
179 * SerDes mode on the 82575. There can be an external PHY attached
180 * on the SGMII interface. For this, we'll set sgmii_active to true.
182 phy->media_type = e1000_media_type_copper;
183 dev_spec->sgmii_active = false;
185 ctrl_ext = rd32(E1000_CTRL_EXT);
186 switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) {
187 case E1000_CTRL_EXT_LINK_MODE_SGMII:
188 dev_spec->sgmii_active = true;
189 break;
190 case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
191 case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES:
192 hw->phy.media_type = e1000_media_type_internal_serdes;
193 break;
194 default:
195 break;
198 /* Set mta register count */
199 mac->mta_reg_count = 128;
200 /* Set rar entry count */
201 switch (mac->type) {
202 case e1000_82576:
203 mac->rar_entry_count = E1000_RAR_ENTRIES_82576;
204 break;
205 case e1000_82580:
206 mac->rar_entry_count = E1000_RAR_ENTRIES_82580;
207 break;
208 case e1000_i350:
209 mac->rar_entry_count = E1000_RAR_ENTRIES_I350;
210 break;
211 default:
212 mac->rar_entry_count = E1000_RAR_ENTRIES_82575;
213 break;
215 /* reset */
216 if (mac->type >= e1000_82580)
217 mac->ops.reset_hw = igb_reset_hw_82580;
218 else
219 mac->ops.reset_hw = igb_reset_hw_82575;
221 if (mac->type >= e1000_i210) {
222 mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_i210;
223 mac->ops.release_swfw_sync = igb_release_swfw_sync_i210;
224 } else {
225 mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_82575;
226 mac->ops.release_swfw_sync = igb_release_swfw_sync_82575;
229 /* Set if part includes ASF firmware */
230 mac->asf_firmware_present = true;
231 /* Set if manageability features are enabled. */
232 mac->arc_subsystem_valid =
233 (rd32(E1000_FWSM) & E1000_FWSM_MODE_MASK)
234 ? true : false;
235 /* enable EEE on i350 parts and later parts */
236 if (mac->type >= e1000_i350)
237 dev_spec->eee_disable = false;
238 else
239 dev_spec->eee_disable = true;
240 /* physical interface link setup */
241 mac->ops.setup_physical_interface =
242 (hw->phy.media_type == e1000_media_type_copper)
243 ? igb_setup_copper_link_82575
244 : igb_setup_serdes_link_82575;
246 /* NVM initialization */
247 eecd = rd32(E1000_EECD);
248 size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
249 E1000_EECD_SIZE_EX_SHIFT);
252 * Added to a constant, "size" becomes the left-shift value
253 * for setting word_size.
255 size += NVM_WORD_SIZE_BASE_SHIFT;
258 * Check for invalid size
260 if ((hw->mac.type == e1000_82576) && (size > 15)) {
261 pr_notice("The NVM size is not valid, defaulting to 32K\n");
262 size = 15;
265 nvm->word_size = 1 << size;
266 if (hw->mac.type < e1000_i210) {
267 nvm->opcode_bits = 8;
268 nvm->delay_usec = 1;
269 switch (nvm->override) {
270 case e1000_nvm_override_spi_large:
271 nvm->page_size = 32;
272 nvm->address_bits = 16;
273 break;
274 case e1000_nvm_override_spi_small:
275 nvm->page_size = 8;
276 nvm->address_bits = 8;
277 break;
278 default:
279 nvm->page_size = eecd
280 & E1000_EECD_ADDR_BITS ? 32 : 8;
281 nvm->address_bits = eecd
282 & E1000_EECD_ADDR_BITS ? 16 : 8;
283 break;
285 if (nvm->word_size == (1 << 15))
286 nvm->page_size = 128;
288 nvm->type = e1000_nvm_eeprom_spi;
289 } else
290 nvm->type = e1000_nvm_flash_hw;
292 /* NVM Function Pointers */
293 switch (hw->mac.type) {
294 case e1000_82580:
295 nvm->ops.validate = igb_validate_nvm_checksum_82580;
296 nvm->ops.update = igb_update_nvm_checksum_82580;
297 nvm->ops.acquire = igb_acquire_nvm_82575;
298 nvm->ops.release = igb_release_nvm_82575;
299 if (nvm->word_size < (1 << 15))
300 nvm->ops.read = igb_read_nvm_eerd;
301 else
302 nvm->ops.read = igb_read_nvm_spi;
303 nvm->ops.write = igb_write_nvm_spi;
304 break;
305 case e1000_i350:
306 nvm->ops.validate = igb_validate_nvm_checksum_i350;
307 nvm->ops.update = igb_update_nvm_checksum_i350;
308 nvm->ops.acquire = igb_acquire_nvm_82575;
309 nvm->ops.release = igb_release_nvm_82575;
310 if (nvm->word_size < (1 << 15))
311 nvm->ops.read = igb_read_nvm_eerd;
312 else
313 nvm->ops.read = igb_read_nvm_spi;
314 nvm->ops.write = igb_write_nvm_spi;
315 break;
316 case e1000_i210:
317 nvm->ops.validate = igb_validate_nvm_checksum_i210;
318 nvm->ops.update = igb_update_nvm_checksum_i210;
319 nvm->ops.acquire = igb_acquire_nvm_i210;
320 nvm->ops.release = igb_release_nvm_i210;
321 nvm->ops.read = igb_read_nvm_srrd_i210;
322 nvm->ops.write = igb_write_nvm_srwr_i210;
323 nvm->ops.valid_led_default = igb_valid_led_default_i210;
324 break;
325 case e1000_i211:
326 nvm->ops.acquire = igb_acquire_nvm_i210;
327 nvm->ops.release = igb_release_nvm_i210;
328 nvm->ops.read = igb_read_nvm_i211;
329 nvm->ops.valid_led_default = igb_valid_led_default_i210;
330 nvm->ops.validate = NULL;
331 nvm->ops.update = NULL;
332 nvm->ops.write = NULL;
333 break;
334 default:
335 nvm->ops.validate = igb_validate_nvm_checksum;
336 nvm->ops.update = igb_update_nvm_checksum;
337 nvm->ops.acquire = igb_acquire_nvm_82575;
338 nvm->ops.release = igb_release_nvm_82575;
339 if (nvm->word_size < (1 << 15))
340 nvm->ops.read = igb_read_nvm_eerd;
341 else
342 nvm->ops.read = igb_read_nvm_spi;
343 nvm->ops.write = igb_write_nvm_spi;
344 break;
347 /* if part supports SR-IOV then initialize mailbox parameters */
348 switch (mac->type) {
349 case e1000_82576:
350 case e1000_i350:
351 igb_init_mbx_params_pf(hw);
352 break;
353 default:
354 break;
357 /* setup PHY parameters */
358 if (phy->media_type != e1000_media_type_copper) {
359 phy->type = e1000_phy_none;
360 return 0;
363 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
364 phy->reset_delay_us = 100;
366 ctrl_ext = rd32(E1000_CTRL_EXT);
368 /* PHY function pointers */
369 if (igb_sgmii_active_82575(hw)) {
370 phy->ops.reset = igb_phy_hw_reset_sgmii_82575;
371 ctrl_ext |= E1000_CTRL_I2C_ENA;
372 } else {
373 phy->ops.reset = igb_phy_hw_reset;
374 ctrl_ext &= ~E1000_CTRL_I2C_ENA;
377 wr32(E1000_CTRL_EXT, ctrl_ext);
378 igb_reset_mdicnfg_82580(hw);
380 if (igb_sgmii_active_82575(hw) && !igb_sgmii_uses_mdio_82575(hw)) {
381 phy->ops.read_reg = igb_read_phy_reg_sgmii_82575;
382 phy->ops.write_reg = igb_write_phy_reg_sgmii_82575;
383 } else if ((hw->mac.type == e1000_82580)
384 || (hw->mac.type == e1000_i350)) {
385 phy->ops.read_reg = igb_read_phy_reg_82580;
386 phy->ops.write_reg = igb_write_phy_reg_82580;
387 } else if (hw->phy.type >= e1000_phy_i210) {
388 phy->ops.read_reg = igb_read_phy_reg_gs40g;
389 phy->ops.write_reg = igb_write_phy_reg_gs40g;
390 } else {
391 phy->ops.read_reg = igb_read_phy_reg_igp;
392 phy->ops.write_reg = igb_write_phy_reg_igp;
395 /* set lan id */
396 hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >>
397 E1000_STATUS_FUNC_SHIFT;
399 /* Set phy->phy_addr and phy->id. */
400 ret_val = igb_get_phy_id_82575(hw);
401 if (ret_val)
402 return ret_val;
404 /* Verify phy id and set remaining function pointers */
405 switch (phy->id) {
406 case I347AT4_E_PHY_ID:
407 case M88E1112_E_PHY_ID:
408 case M88E1111_I_PHY_ID:
409 phy->type = e1000_phy_m88;
410 phy->ops.get_phy_info = igb_get_phy_info_m88;
412 if (phy->id == I347AT4_E_PHY_ID ||
413 phy->id == M88E1112_E_PHY_ID)
414 phy->ops.get_cable_length = igb_get_cable_length_m88_gen2;
415 else
416 phy->ops.get_cable_length = igb_get_cable_length_m88;
418 if (phy->id == I210_I_PHY_ID) {
419 phy->ops.get_cable_length =
420 igb_get_cable_length_m88_gen2;
421 phy->ops.set_d0_lplu_state =
422 igb_set_d0_lplu_state_82580;
423 phy->ops.set_d3_lplu_state =
424 igb_set_d3_lplu_state_82580;
426 phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
427 break;
428 case IGP03E1000_E_PHY_ID:
429 phy->type = e1000_phy_igp_3;
430 phy->ops.get_phy_info = igb_get_phy_info_igp;
431 phy->ops.get_cable_length = igb_get_cable_length_igp_2;
432 phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_igp;
433 phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82575;
434 phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state;
435 break;
436 case I82580_I_PHY_ID:
437 case I350_I_PHY_ID:
438 phy->type = e1000_phy_82580;
439 phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_82580;
440 phy->ops.get_cable_length = igb_get_cable_length_82580;
441 phy->ops.get_phy_info = igb_get_phy_info_82580;
442 phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580;
443 phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580;
444 break;
445 case I210_I_PHY_ID:
446 phy->type = e1000_phy_i210;
447 phy->ops.get_phy_info = igb_get_phy_info_m88;
448 phy->ops.check_polarity = igb_check_polarity_m88;
449 phy->ops.get_cable_length = igb_get_cable_length_m88_gen2;
450 phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580;
451 phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580;
452 phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
453 break;
454 default:
455 return -E1000_ERR_PHY;
458 return 0;
462 * igb_acquire_phy_82575 - Acquire rights to access PHY
463 * @hw: pointer to the HW structure
465 * Acquire access rights to the correct PHY. This is a
466 * function pointer entry point called by the api module.
468 static s32 igb_acquire_phy_82575(struct e1000_hw *hw)
470 u16 mask = E1000_SWFW_PHY0_SM;
472 if (hw->bus.func == E1000_FUNC_1)
473 mask = E1000_SWFW_PHY1_SM;
474 else if (hw->bus.func == E1000_FUNC_2)
475 mask = E1000_SWFW_PHY2_SM;
476 else if (hw->bus.func == E1000_FUNC_3)
477 mask = E1000_SWFW_PHY3_SM;
479 return hw->mac.ops.acquire_swfw_sync(hw, mask);
483 * igb_release_phy_82575 - Release rights to access PHY
484 * @hw: pointer to the HW structure
486 * A wrapper to release access rights to the correct PHY. This is a
487 * function pointer entry point called by the api module.
489 static void igb_release_phy_82575(struct e1000_hw *hw)
491 u16 mask = E1000_SWFW_PHY0_SM;
493 if (hw->bus.func == E1000_FUNC_1)
494 mask = E1000_SWFW_PHY1_SM;
495 else if (hw->bus.func == E1000_FUNC_2)
496 mask = E1000_SWFW_PHY2_SM;
497 else if (hw->bus.func == E1000_FUNC_3)
498 mask = E1000_SWFW_PHY3_SM;
500 hw->mac.ops.release_swfw_sync(hw, mask);
504 * igb_read_phy_reg_sgmii_82575 - Read PHY register using sgmii
505 * @hw: pointer to the HW structure
506 * @offset: register offset to be read
507 * @data: pointer to the read data
509 * Reads the PHY register at offset using the serial gigabit media independent
510 * interface and stores the retrieved information in data.
512 static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
513 u16 *data)
515 s32 ret_val = -E1000_ERR_PARAM;
517 if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
518 hw_dbg("PHY Address %u is out of range\n", offset);
519 goto out;
522 ret_val = hw->phy.ops.acquire(hw);
523 if (ret_val)
524 goto out;
526 ret_val = igb_read_phy_reg_i2c(hw, offset, data);
528 hw->phy.ops.release(hw);
530 out:
531 return ret_val;
535 * igb_write_phy_reg_sgmii_82575 - Write PHY register using sgmii
536 * @hw: pointer to the HW structure
537 * @offset: register offset to write to
538 * @data: data to write at register offset
540 * Writes the data to PHY register at the offset using the serial gigabit
541 * media independent interface.
543 static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
544 u16 data)
546 s32 ret_val = -E1000_ERR_PARAM;
549 if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
550 hw_dbg("PHY Address %d is out of range\n", offset);
551 goto out;
554 ret_val = hw->phy.ops.acquire(hw);
555 if (ret_val)
556 goto out;
558 ret_val = igb_write_phy_reg_i2c(hw, offset, data);
560 hw->phy.ops.release(hw);
562 out:
563 return ret_val;
567 * igb_get_phy_id_82575 - Retrieve PHY addr and id
568 * @hw: pointer to the HW structure
570 * Retrieves the PHY address and ID for both PHY's which do and do not use
571 * sgmi interface.
573 static s32 igb_get_phy_id_82575(struct e1000_hw *hw)
575 struct e1000_phy_info *phy = &hw->phy;
576 s32 ret_val = 0;
577 u16 phy_id;
578 u32 ctrl_ext;
579 u32 mdic;
582 * For SGMII PHYs, we try the list of possible addresses until
583 * we find one that works. For non-SGMII PHYs
584 * (e.g. integrated copper PHYs), an address of 1 should
585 * work. The result of this function should mean phy->phy_addr
586 * and phy->id are set correctly.
588 if (!(igb_sgmii_active_82575(hw))) {
589 phy->addr = 1;
590 ret_val = igb_get_phy_id(hw);
591 goto out;
594 if (igb_sgmii_uses_mdio_82575(hw)) {
595 switch (hw->mac.type) {
596 case e1000_82575:
597 case e1000_82576:
598 mdic = rd32(E1000_MDIC);
599 mdic &= E1000_MDIC_PHY_MASK;
600 phy->addr = mdic >> E1000_MDIC_PHY_SHIFT;
601 break;
602 case e1000_82580:
603 case e1000_i350:
604 case e1000_i210:
605 case e1000_i211:
606 mdic = rd32(E1000_MDICNFG);
607 mdic &= E1000_MDICNFG_PHY_MASK;
608 phy->addr = mdic >> E1000_MDICNFG_PHY_SHIFT;
609 break;
610 default:
611 ret_val = -E1000_ERR_PHY;
612 goto out;
613 break;
615 ret_val = igb_get_phy_id(hw);
616 goto out;
619 /* Power on sgmii phy if it is disabled */
620 ctrl_ext = rd32(E1000_CTRL_EXT);
621 wr32(E1000_CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA);
622 wrfl();
623 msleep(300);
626 * The address field in the I2CCMD register is 3 bits and 0 is invalid.
627 * Therefore, we need to test 1-7
629 for (phy->addr = 1; phy->addr < 8; phy->addr++) {
630 ret_val = igb_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id);
631 if (ret_val == 0) {
632 hw_dbg("Vendor ID 0x%08X read at address %u\n",
633 phy_id, phy->addr);
635 * At the time of this writing, The M88 part is
636 * the only supported SGMII PHY product.
638 if (phy_id == M88_VENDOR)
639 break;
640 } else {
641 hw_dbg("PHY address %u was unreadable\n", phy->addr);
645 /* A valid PHY type couldn't be found. */
646 if (phy->addr == 8) {
647 phy->addr = 0;
648 ret_val = -E1000_ERR_PHY;
649 goto out;
650 } else {
651 ret_val = igb_get_phy_id(hw);
654 /* restore previous sfp cage power state */
655 wr32(E1000_CTRL_EXT, ctrl_ext);
657 out:
658 return ret_val;
662 * igb_phy_hw_reset_sgmii_82575 - Performs a PHY reset
663 * @hw: pointer to the HW structure
665 * Resets the PHY using the serial gigabit media independent interface.
667 static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *hw)
669 s32 ret_val;
672 * This isn't a true "hard" reset, but is the only reset
673 * available to us at this time.
676 hw_dbg("Soft resetting SGMII attached PHY...\n");
679 * SFP documentation requires the following to configure the SPF module
680 * to work on SGMII. No further documentation is given.
682 ret_val = hw->phy.ops.write_reg(hw, 0x1B, 0x8084);
683 if (ret_val)
684 goto out;
686 ret_val = igb_phy_sw_reset(hw);
688 out:
689 return ret_val;
693 * igb_set_d0_lplu_state_82575 - Set Low Power Linkup D0 state
694 * @hw: pointer to the HW structure
695 * @active: true to enable LPLU, false to disable
697 * Sets the LPLU D0 state according to the active flag. When
698 * activating LPLU this function also disables smart speed
699 * and vice versa. LPLU will not be activated unless the
700 * device autonegotiation advertisement meets standards of
701 * either 10 or 10/100 or 10/100/1000 at all duplexes.
702 * This is a function pointer entry point only called by
703 * PHY setup routines.
705 static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active)
707 struct e1000_phy_info *phy = &hw->phy;
708 s32 ret_val;
709 u16 data;
711 ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data);
712 if (ret_val)
713 goto out;
715 if (active) {
716 data |= IGP02E1000_PM_D0_LPLU;
717 ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
718 data);
719 if (ret_val)
720 goto out;
722 /* When LPLU is enabled, we should disable SmartSpeed */
723 ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
724 &data);
725 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
726 ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
727 data);
728 if (ret_val)
729 goto out;
730 } else {
731 data &= ~IGP02E1000_PM_D0_LPLU;
732 ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
733 data);
735 * LPLU and SmartSpeed are mutually exclusive. LPLU is used
736 * during Dx states where the power conservation is most
737 * important. During driver activity we should enable
738 * SmartSpeed, so performance is maintained.
740 if (phy->smart_speed == e1000_smart_speed_on) {
741 ret_val = phy->ops.read_reg(hw,
742 IGP01E1000_PHY_PORT_CONFIG, &data);
743 if (ret_val)
744 goto out;
746 data |= IGP01E1000_PSCFR_SMART_SPEED;
747 ret_val = phy->ops.write_reg(hw,
748 IGP01E1000_PHY_PORT_CONFIG, data);
749 if (ret_val)
750 goto out;
751 } else if (phy->smart_speed == e1000_smart_speed_off) {
752 ret_val = phy->ops.read_reg(hw,
753 IGP01E1000_PHY_PORT_CONFIG, &data);
754 if (ret_val)
755 goto out;
757 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
758 ret_val = phy->ops.write_reg(hw,
759 IGP01E1000_PHY_PORT_CONFIG, data);
760 if (ret_val)
761 goto out;
765 out:
766 return ret_val;
770 * igb_set_d0_lplu_state_82580 - Set Low Power Linkup D0 state
771 * @hw: pointer to the HW structure
772 * @active: true to enable LPLU, false to disable
774 * Sets the LPLU D0 state according to the active flag. When
775 * activating LPLU this function also disables smart speed
776 * and vice versa. LPLU will not be activated unless the
777 * device autonegotiation advertisement meets standards of
778 * either 10 or 10/100 or 10/100/1000 at all duplexes.
779 * This is a function pointer entry point only called by
780 * PHY setup routines.
782 static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active)
784 struct e1000_phy_info *phy = &hw->phy;
785 s32 ret_val = 0;
786 u16 data;
788 data = rd32(E1000_82580_PHY_POWER_MGMT);
790 if (active) {
791 data |= E1000_82580_PM_D0_LPLU;
793 /* When LPLU is enabled, we should disable SmartSpeed */
794 data &= ~E1000_82580_PM_SPD;
795 } else {
796 data &= ~E1000_82580_PM_D0_LPLU;
799 * LPLU and SmartSpeed are mutually exclusive. LPLU is used
800 * during Dx states where the power conservation is most
801 * important. During driver activity we should enable
802 * SmartSpeed, so performance is maintained.
804 if (phy->smart_speed == e1000_smart_speed_on)
805 data |= E1000_82580_PM_SPD;
806 else if (phy->smart_speed == e1000_smart_speed_off)
807 data &= ~E1000_82580_PM_SPD; }
809 wr32(E1000_82580_PHY_POWER_MGMT, data);
810 return ret_val;
814 * igb_set_d3_lplu_state_82580 - Sets low power link up state for D3
815 * @hw: pointer to the HW structure
816 * @active: boolean used to enable/disable lplu
818 * Success returns 0, Failure returns 1
820 * The low power link up (lplu) state is set to the power management level D3
821 * and SmartSpeed is disabled when active is true, else clear lplu for D3
822 * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU
823 * is used during Dx states where the power conservation is most important.
824 * During driver activity, SmartSpeed should be enabled so performance is
825 * maintained.
827 s32 igb_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active)
829 struct e1000_phy_info *phy = &hw->phy;
830 s32 ret_val = 0;
831 u16 data;
833 data = rd32(E1000_82580_PHY_POWER_MGMT);
835 if (!active) {
836 data &= ~E1000_82580_PM_D3_LPLU;
838 * LPLU and SmartSpeed are mutually exclusive. LPLU is used
839 * during Dx states where the power conservation is most
840 * important. During driver activity we should enable
841 * SmartSpeed, so performance is maintained.
843 if (phy->smart_speed == e1000_smart_speed_on)
844 data |= E1000_82580_PM_SPD;
845 else if (phy->smart_speed == e1000_smart_speed_off)
846 data &= ~E1000_82580_PM_SPD;
847 } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
848 (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
849 (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
850 data |= E1000_82580_PM_D3_LPLU;
851 /* When LPLU is enabled, we should disable SmartSpeed */
852 data &= ~E1000_82580_PM_SPD;
855 wr32(E1000_82580_PHY_POWER_MGMT, data);
856 return ret_val;
860 * igb_acquire_nvm_82575 - Request for access to EEPROM
861 * @hw: pointer to the HW structure
863 * Acquire the necessary semaphores for exclusive access to the EEPROM.
864 * Set the EEPROM access request bit and wait for EEPROM access grant bit.
865 * Return successful if access grant bit set, else clear the request for
866 * EEPROM access and return -E1000_ERR_NVM (-1).
868 static s32 igb_acquire_nvm_82575(struct e1000_hw *hw)
870 s32 ret_val;
872 ret_val = hw->mac.ops.acquire_swfw_sync(hw, E1000_SWFW_EEP_SM);
873 if (ret_val)
874 goto out;
876 ret_val = igb_acquire_nvm(hw);
878 if (ret_val)
879 hw->mac.ops.release_swfw_sync(hw, E1000_SWFW_EEP_SM);
881 out:
882 return ret_val;
886 * igb_release_nvm_82575 - Release exclusive access to EEPROM
887 * @hw: pointer to the HW structure
889 * Stop any current commands to the EEPROM and clear the EEPROM request bit,
890 * then release the semaphores acquired.
892 static void igb_release_nvm_82575(struct e1000_hw *hw)
894 igb_release_nvm(hw);
895 hw->mac.ops.release_swfw_sync(hw, E1000_SWFW_EEP_SM);
899 * igb_acquire_swfw_sync_82575 - Acquire SW/FW semaphore
900 * @hw: pointer to the HW structure
901 * @mask: specifies which semaphore to acquire
903 * Acquire the SW/FW semaphore to access the PHY or NVM. The mask
904 * will also specify which port we're acquiring the lock for.
906 static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
908 u32 swfw_sync;
909 u32 swmask = mask;
910 u32 fwmask = mask << 16;
911 s32 ret_val = 0;
912 s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
914 while (i < timeout) {
915 if (igb_get_hw_semaphore(hw)) {
916 ret_val = -E1000_ERR_SWFW_SYNC;
917 goto out;
920 swfw_sync = rd32(E1000_SW_FW_SYNC);
921 if (!(swfw_sync & (fwmask | swmask)))
922 break;
925 * Firmware currently using resource (fwmask)
926 * or other software thread using resource (swmask)
928 igb_put_hw_semaphore(hw);
929 mdelay(5);
930 i++;
933 if (i == timeout) {
934 hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n");
935 ret_val = -E1000_ERR_SWFW_SYNC;
936 goto out;
939 swfw_sync |= swmask;
940 wr32(E1000_SW_FW_SYNC, swfw_sync);
942 igb_put_hw_semaphore(hw);
944 out:
945 return ret_val;
949 * igb_release_swfw_sync_82575 - Release SW/FW semaphore
950 * @hw: pointer to the HW structure
951 * @mask: specifies which semaphore to acquire
953 * Release the SW/FW semaphore used to access the PHY or NVM. The mask
954 * will also specify which port we're releasing the lock for.
956 static void igb_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
958 u32 swfw_sync;
960 while (igb_get_hw_semaphore(hw) != 0);
961 /* Empty */
963 swfw_sync = rd32(E1000_SW_FW_SYNC);
964 swfw_sync &= ~mask;
965 wr32(E1000_SW_FW_SYNC, swfw_sync);
967 igb_put_hw_semaphore(hw);
971 * igb_get_cfg_done_82575 - Read config done bit
972 * @hw: pointer to the HW structure
974 * Read the management control register for the config done bit for
975 * completion status. NOTE: silicon which is EEPROM-less will fail trying
976 * to read the config done bit, so an error is *ONLY* logged and returns
977 * 0. If we were to return with error, EEPROM-less silicon
978 * would not be able to be reset or change link.
980 static s32 igb_get_cfg_done_82575(struct e1000_hw *hw)
982 s32 timeout = PHY_CFG_TIMEOUT;
983 s32 ret_val = 0;
984 u32 mask = E1000_NVM_CFG_DONE_PORT_0;
986 if (hw->bus.func == 1)
987 mask = E1000_NVM_CFG_DONE_PORT_1;
988 else if (hw->bus.func == E1000_FUNC_2)
989 mask = E1000_NVM_CFG_DONE_PORT_2;
990 else if (hw->bus.func == E1000_FUNC_3)
991 mask = E1000_NVM_CFG_DONE_PORT_3;
993 while (timeout) {
994 if (rd32(E1000_EEMNGCTL) & mask)
995 break;
996 msleep(1);
997 timeout--;
999 if (!timeout)
1000 hw_dbg("MNG configuration cycle has not completed.\n");
1002 /* If EEPROM is not marked present, init the PHY manually */
1003 if (((rd32(E1000_EECD) & E1000_EECD_PRES) == 0) &&
1004 (hw->phy.type == e1000_phy_igp_3))
1005 igb_phy_init_script_igp3(hw);
1007 return ret_val;
1011 * igb_check_for_link_82575 - Check for link
1012 * @hw: pointer to the HW structure
1014 * If sgmii is enabled, then use the pcs register to determine link, otherwise
1015 * use the generic interface for determining link.
1017 static s32 igb_check_for_link_82575(struct e1000_hw *hw)
1019 s32 ret_val;
1020 u16 speed, duplex;
1022 if (hw->phy.media_type != e1000_media_type_copper) {
1023 ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed,
1024 &duplex);
1026 * Use this flag to determine if link needs to be checked or
1027 * not. If we have link clear the flag so that we do not
1028 * continue to check for link.
1030 hw->mac.get_link_status = !hw->mac.serdes_has_link;
1032 /* Configure Flow Control now that Auto-Neg has completed.
1033 * First, we need to restore the desired flow control
1034 * settings because we may have had to re-autoneg with a
1035 * different link partner.
1037 ret_val = igb_config_fc_after_link_up(hw);
1038 if (ret_val)
1039 hw_dbg("Error configuring flow control\n");
1040 } else {
1041 ret_val = igb_check_for_copper_link(hw);
1044 return ret_val;
1048 * igb_power_up_serdes_link_82575 - Power up the serdes link after shutdown
1049 * @hw: pointer to the HW structure
1051 void igb_power_up_serdes_link_82575(struct e1000_hw *hw)
1053 u32 reg;
1056 if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
1057 !igb_sgmii_active_82575(hw))
1058 return;
1060 /* Enable PCS to turn on link */
1061 reg = rd32(E1000_PCS_CFG0);
1062 reg |= E1000_PCS_CFG_PCS_EN;
1063 wr32(E1000_PCS_CFG0, reg);
1065 /* Power up the laser */
1066 reg = rd32(E1000_CTRL_EXT);
1067 reg &= ~E1000_CTRL_EXT_SDP3_DATA;
1068 wr32(E1000_CTRL_EXT, reg);
1070 /* flush the write to verify completion */
1071 wrfl();
1072 msleep(1);
1076 * igb_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex
1077 * @hw: pointer to the HW structure
1078 * @speed: stores the current speed
1079 * @duplex: stores the current duplex
1081 * Using the physical coding sub-layer (PCS), retrieve the current speed and
1082 * duplex, then store the values in the pointers provided.
1084 static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed,
1085 u16 *duplex)
1087 struct e1000_mac_info *mac = &hw->mac;
1088 u32 pcs;
1090 /* Set up defaults for the return values of this function */
1091 mac->serdes_has_link = false;
1092 *speed = 0;
1093 *duplex = 0;
1096 * Read the PCS Status register for link state. For non-copper mode,
1097 * the status register is not accurate. The PCS status register is
1098 * used instead.
1100 pcs = rd32(E1000_PCS_LSTAT);
1103 * The link up bit determines when link is up on autoneg. The sync ok
1104 * gets set once both sides sync up and agree upon link. Stable link
1105 * can be determined by checking for both link up and link sync ok
1107 if ((pcs & E1000_PCS_LSTS_LINK_OK) && (pcs & E1000_PCS_LSTS_SYNK_OK)) {
1108 mac->serdes_has_link = true;
1110 /* Detect and store PCS speed */
1111 if (pcs & E1000_PCS_LSTS_SPEED_1000) {
1112 *speed = SPEED_1000;
1113 } else if (pcs & E1000_PCS_LSTS_SPEED_100) {
1114 *speed = SPEED_100;
1115 } else {
1116 *speed = SPEED_10;
1119 /* Detect and store PCS duplex */
1120 if (pcs & E1000_PCS_LSTS_DUPLEX_FULL) {
1121 *duplex = FULL_DUPLEX;
1122 } else {
1123 *duplex = HALF_DUPLEX;
1127 return 0;
1131 * igb_shutdown_serdes_link_82575 - Remove link during power down
1132 * @hw: pointer to the HW structure
1134 * In the case of fiber serdes, shut down optics and PCS on driver unload
1135 * when management pass thru is not enabled.
1137 void igb_shutdown_serdes_link_82575(struct e1000_hw *hw)
1139 u32 reg;
1141 if (hw->phy.media_type != e1000_media_type_internal_serdes &&
1142 igb_sgmii_active_82575(hw))
1143 return;
1145 if (!igb_enable_mng_pass_thru(hw)) {
1146 /* Disable PCS to turn off link */
1147 reg = rd32(E1000_PCS_CFG0);
1148 reg &= ~E1000_PCS_CFG_PCS_EN;
1149 wr32(E1000_PCS_CFG0, reg);
1151 /* shutdown the laser */
1152 reg = rd32(E1000_CTRL_EXT);
1153 reg |= E1000_CTRL_EXT_SDP3_DATA;
1154 wr32(E1000_CTRL_EXT, reg);
1156 /* flush the write to verify completion */
1157 wrfl();
1158 msleep(1);
1163 * igb_reset_hw_82575 - Reset hardware
1164 * @hw: pointer to the HW structure
1166 * This resets the hardware into a known state. This is a
1167 * function pointer entry point called by the api module.
1169 static s32 igb_reset_hw_82575(struct e1000_hw *hw)
1171 u32 ctrl, icr;
1172 s32 ret_val;
1175 * Prevent the PCI-E bus from sticking if there is no TLP connection
1176 * on the last TLP read/write transaction when MAC is reset.
1178 ret_val = igb_disable_pcie_master(hw);
1179 if (ret_val)
1180 hw_dbg("PCI-E Master disable polling has failed.\n");
1182 /* set the completion timeout for interface */
1183 ret_val = igb_set_pcie_completion_timeout(hw);
1184 if (ret_val) {
1185 hw_dbg("PCI-E Set completion timeout has failed.\n");
1188 hw_dbg("Masking off all interrupts\n");
1189 wr32(E1000_IMC, 0xffffffff);
1191 wr32(E1000_RCTL, 0);
1192 wr32(E1000_TCTL, E1000_TCTL_PSP);
1193 wrfl();
1195 msleep(10);
1197 ctrl = rd32(E1000_CTRL);
1199 hw_dbg("Issuing a global reset to MAC\n");
1200 wr32(E1000_CTRL, ctrl | E1000_CTRL_RST);
1202 ret_val = igb_get_auto_rd_done(hw);
1203 if (ret_val) {
1205 * When auto config read does not complete, do not
1206 * return with an error. This can happen in situations
1207 * where there is no eeprom and prevents getting link.
1209 hw_dbg("Auto Read Done did not complete\n");
1212 /* If EEPROM is not present, run manual init scripts */
1213 if ((rd32(E1000_EECD) & E1000_EECD_PRES) == 0)
1214 igb_reset_init_script_82575(hw);
1216 /* Clear any pending interrupt events. */
1217 wr32(E1000_IMC, 0xffffffff);
1218 icr = rd32(E1000_ICR);
1220 /* Install any alternate MAC address into RAR0 */
1221 ret_val = igb_check_alt_mac_addr(hw);
1223 return ret_val;
1227 * igb_init_hw_82575 - Initialize hardware
1228 * @hw: pointer to the HW structure
1230 * This inits the hardware readying it for operation.
1232 static s32 igb_init_hw_82575(struct e1000_hw *hw)
1234 struct e1000_mac_info *mac = &hw->mac;
1235 s32 ret_val;
1236 u16 i, rar_count = mac->rar_entry_count;
1238 /* Initialize identification LED */
1239 ret_val = igb_id_led_init(hw);
1240 if (ret_val) {
1241 hw_dbg("Error initializing identification LED\n");
1242 /* This is not fatal and we should not stop init due to this */
1245 /* Disabling VLAN filtering */
1246 hw_dbg("Initializing the IEEE VLAN\n");
1247 if (hw->mac.type == e1000_i350)
1248 igb_clear_vfta_i350(hw);
1249 else
1250 igb_clear_vfta(hw);
1252 /* Setup the receive address */
1253 igb_init_rx_addrs(hw, rar_count);
1255 /* Zero out the Multicast HASH table */
1256 hw_dbg("Zeroing the MTA\n");
1257 for (i = 0; i < mac->mta_reg_count; i++)
1258 array_wr32(E1000_MTA, i, 0);
1260 /* Zero out the Unicast HASH table */
1261 hw_dbg("Zeroing the UTA\n");
1262 for (i = 0; i < mac->uta_reg_count; i++)
1263 array_wr32(E1000_UTA, i, 0);
1265 /* Setup link and flow control */
1266 ret_val = igb_setup_link(hw);
1269 * Clear all of the statistics registers (clear on read). It is
1270 * important that we do this after we have tried to establish link
1271 * because the symbol error count will increment wildly if there
1272 * is no link.
1274 igb_clear_hw_cntrs_82575(hw);
1275 return ret_val;
1279 * igb_setup_copper_link_82575 - Configure copper link settings
1280 * @hw: pointer to the HW structure
1282 * Configures the link for auto-neg or forced speed and duplex. Then we check
1283 * for link, once link is established calls to configure collision distance
1284 * and flow control are called.
1286 static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
1288 u32 ctrl;
1289 s32 ret_val;
1290 u32 phpm_reg;
1292 ctrl = rd32(E1000_CTRL);
1293 ctrl |= E1000_CTRL_SLU;
1294 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
1295 wr32(E1000_CTRL, ctrl);
1297 /* Clear Go Link Disconnect bit */
1298 if (hw->mac.type >= e1000_82580) {
1299 phpm_reg = rd32(E1000_82580_PHY_POWER_MGMT);
1300 phpm_reg &= ~E1000_82580_PM_GO_LINKD;
1301 wr32(E1000_82580_PHY_POWER_MGMT, phpm_reg);
1304 ret_val = igb_setup_serdes_link_82575(hw);
1305 if (ret_val)
1306 goto out;
1308 if (igb_sgmii_active_82575(hw) && !hw->phy.reset_disable) {
1309 /* allow time for SFP cage time to power up phy */
1310 msleep(300);
1312 ret_val = hw->phy.ops.reset(hw);
1313 if (ret_val) {
1314 hw_dbg("Error resetting the PHY.\n");
1315 goto out;
1318 switch (hw->phy.type) {
1319 case e1000_phy_i210:
1320 case e1000_phy_m88:
1321 if (hw->phy.id == I347AT4_E_PHY_ID ||
1322 hw->phy.id == M88E1112_E_PHY_ID)
1323 ret_val = igb_copper_link_setup_m88_gen2(hw);
1324 else
1325 ret_val = igb_copper_link_setup_m88(hw);
1326 break;
1327 case e1000_phy_igp_3:
1328 ret_val = igb_copper_link_setup_igp(hw);
1329 break;
1330 case e1000_phy_82580:
1331 ret_val = igb_copper_link_setup_82580(hw);
1332 break;
1333 default:
1334 ret_val = -E1000_ERR_PHY;
1335 break;
1338 if (ret_val)
1339 goto out;
1341 ret_val = igb_setup_copper_link(hw);
1342 out:
1343 return ret_val;
1347 * igb_setup_serdes_link_82575 - Setup link for serdes
1348 * @hw: pointer to the HW structure
1350 * Configure the physical coding sub-layer (PCS) link. The PCS link is
1351 * used on copper connections where the serialized gigabit media independent
1352 * interface (sgmii), or serdes fiber is being used. Configures the link
1353 * for auto-negotiation or forces speed/duplex.
1355 static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
1357 u32 ctrl_ext, ctrl_reg, reg, anadv_reg;
1358 bool pcs_autoneg;
1359 s32 ret_val = E1000_SUCCESS;
1360 u16 data;
1362 if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
1363 !igb_sgmii_active_82575(hw))
1364 return ret_val;
1368 * On the 82575, SerDes loopback mode persists until it is
1369 * explicitly turned off or a power cycle is performed. A read to
1370 * the register does not indicate its status. Therefore, we ensure
1371 * loopback mode is disabled during initialization.
1373 wr32(E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK);
1375 /* power on the sfp cage if present */
1376 ctrl_ext = rd32(E1000_CTRL_EXT);
1377 ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA;
1378 wr32(E1000_CTRL_EXT, ctrl_ext);
1380 ctrl_reg = rd32(E1000_CTRL);
1381 ctrl_reg |= E1000_CTRL_SLU;
1383 if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576) {
1384 /* set both sw defined pins */
1385 ctrl_reg |= E1000_CTRL_SWDPIN0 | E1000_CTRL_SWDPIN1;
1387 /* Set switch control to serdes energy detect */
1388 reg = rd32(E1000_CONNSW);
1389 reg |= E1000_CONNSW_ENRGSRC;
1390 wr32(E1000_CONNSW, reg);
1393 reg = rd32(E1000_PCS_LCTL);
1395 /* default pcs_autoneg to the same setting as mac autoneg */
1396 pcs_autoneg = hw->mac.autoneg;
1398 switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) {
1399 case E1000_CTRL_EXT_LINK_MODE_SGMII:
1400 /* sgmii mode lets the phy handle forcing speed/duplex */
1401 pcs_autoneg = true;
1402 /* autoneg time out should be disabled for SGMII mode */
1403 reg &= ~(E1000_PCS_LCTL_AN_TIMEOUT);
1404 break;
1405 case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
1406 /* disable PCS autoneg and support parallel detect only */
1407 pcs_autoneg = false;
1408 default:
1409 if (hw->mac.type == e1000_82575 ||
1410 hw->mac.type == e1000_82576) {
1411 ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &data);
1412 if (ret_val) {
1413 printk(KERN_DEBUG "NVM Read Error\n\n");
1414 return ret_val;
1417 if (data & E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT)
1418 pcs_autoneg = false;
1422 * non-SGMII modes only supports a speed of 1000/Full for the
1423 * link so it is best to just force the MAC and let the pcs
1424 * link either autoneg or be forced to 1000/Full
1426 ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD |
1427 E1000_CTRL_FD | E1000_CTRL_FRCDPX;
1429 /* set speed of 1000/Full if speed/duplex is forced */
1430 reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL;
1431 break;
1434 wr32(E1000_CTRL, ctrl_reg);
1437 * New SerDes mode allows for forcing speed or autonegotiating speed
1438 * at 1gb. Autoneg should be default set by most drivers. This is the
1439 * mode that will be compatible with older link partners and switches.
1440 * However, both are supported by the hardware and some drivers/tools.
1442 reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP |
1443 E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK);
1445 if (pcs_autoneg) {
1446 /* Set PCS register for autoneg */
1447 reg |= E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */
1448 E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */
1450 /* Disable force flow control for autoneg */
1451 reg &= ~E1000_PCS_LCTL_FORCE_FCTRL;
1453 /* Configure flow control advertisement for autoneg */
1454 anadv_reg = rd32(E1000_PCS_ANADV);
1455 anadv_reg &= ~(E1000_TXCW_ASM_DIR | E1000_TXCW_PAUSE);
1456 switch (hw->fc.requested_mode) {
1457 case e1000_fc_full:
1458 case e1000_fc_rx_pause:
1459 anadv_reg |= E1000_TXCW_ASM_DIR;
1460 anadv_reg |= E1000_TXCW_PAUSE;
1461 break;
1462 case e1000_fc_tx_pause:
1463 anadv_reg |= E1000_TXCW_ASM_DIR;
1464 break;
1465 default:
1466 break;
1468 wr32(E1000_PCS_ANADV, anadv_reg);
1470 hw_dbg("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg);
1471 } else {
1472 /* Set PCS register for forced link */
1473 reg |= E1000_PCS_LCTL_FSD; /* Force Speed */
1475 /* Force flow control for forced link */
1476 reg |= E1000_PCS_LCTL_FORCE_FCTRL;
1478 hw_dbg("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg);
1481 wr32(E1000_PCS_LCTL, reg);
1483 if (!pcs_autoneg && !igb_sgmii_active_82575(hw))
1484 igb_force_mac_fc(hw);
1486 return ret_val;
1490 * igb_sgmii_active_82575 - Return sgmii state
1491 * @hw: pointer to the HW structure
1493 * 82575 silicon has a serialized gigabit media independent interface (sgmii)
1494 * which can be enabled for use in the embedded applications. Simply
1495 * return the current state of the sgmii interface.
1497 static bool igb_sgmii_active_82575(struct e1000_hw *hw)
1499 struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
1500 return dev_spec->sgmii_active;
1504 * igb_reset_init_script_82575 - Inits HW defaults after reset
1505 * @hw: pointer to the HW structure
1507 * Inits recommended HW defaults after a reset when there is no EEPROM
1508 * detected. This is only for the 82575.
1510 static s32 igb_reset_init_script_82575(struct e1000_hw *hw)
1512 if (hw->mac.type == e1000_82575) {
1513 hw_dbg("Running reset init script for 82575\n");
1514 /* SerDes configuration via SERDESCTRL */
1515 igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x00, 0x0C);
1516 igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x01, 0x78);
1517 igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x1B, 0x23);
1518 igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x23, 0x15);
1520 /* CCM configuration via CCMCTL register */
1521 igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x14, 0x00);
1522 igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x10, 0x00);
1524 /* PCIe lanes configuration */
1525 igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x00, 0xEC);
1526 igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x61, 0xDF);
1527 igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x34, 0x05);
1528 igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x2F, 0x81);
1530 /* PCIe PLL Configuration */
1531 igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x02, 0x47);
1532 igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x14, 0x00);
1533 igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x10, 0x00);
1536 return 0;
1540 * igb_read_mac_addr_82575 - Read device MAC address
1541 * @hw: pointer to the HW structure
1543 static s32 igb_read_mac_addr_82575(struct e1000_hw *hw)
1545 s32 ret_val = 0;
1548 * If there's an alternate MAC address place it in RAR0
1549 * so that it will override the Si installed default perm
1550 * address.
1552 ret_val = igb_check_alt_mac_addr(hw);
1553 if (ret_val)
1554 goto out;
1556 ret_val = igb_read_mac_addr(hw);
1558 out:
1559 return ret_val;
1563 * igb_power_down_phy_copper_82575 - Remove link during PHY power down
1564 * @hw: pointer to the HW structure
1566 * In the case of a PHY power down to save power, or to turn off link during a
1567 * driver unload, or wake on lan is not enabled, remove the link.
1569 void igb_power_down_phy_copper_82575(struct e1000_hw *hw)
1571 /* If the management interface is not enabled, then power down */
1572 if (!(igb_enable_mng_pass_thru(hw) || igb_check_reset_block(hw)))
1573 igb_power_down_phy_copper(hw);
1577 * igb_clear_hw_cntrs_82575 - Clear device specific hardware counters
1578 * @hw: pointer to the HW structure
1580 * Clears the hardware counters by reading the counter registers.
1582 static void igb_clear_hw_cntrs_82575(struct e1000_hw *hw)
1584 igb_clear_hw_cntrs_base(hw);
1586 rd32(E1000_PRC64);
1587 rd32(E1000_PRC127);
1588 rd32(E1000_PRC255);
1589 rd32(E1000_PRC511);
1590 rd32(E1000_PRC1023);
1591 rd32(E1000_PRC1522);
1592 rd32(E1000_PTC64);
1593 rd32(E1000_PTC127);
1594 rd32(E1000_PTC255);
1595 rd32(E1000_PTC511);
1596 rd32(E1000_PTC1023);
1597 rd32(E1000_PTC1522);
1599 rd32(E1000_ALGNERRC);
1600 rd32(E1000_RXERRC);
1601 rd32(E1000_TNCRS);
1602 rd32(E1000_CEXTERR);
1603 rd32(E1000_TSCTC);
1604 rd32(E1000_TSCTFC);
1606 rd32(E1000_MGTPRC);
1607 rd32(E1000_MGTPDC);
1608 rd32(E1000_MGTPTC);
1610 rd32(E1000_IAC);
1611 rd32(E1000_ICRXOC);
1613 rd32(E1000_ICRXPTC);
1614 rd32(E1000_ICRXATC);
1615 rd32(E1000_ICTXPTC);
1616 rd32(E1000_ICTXATC);
1617 rd32(E1000_ICTXQEC);
1618 rd32(E1000_ICTXQMTC);
1619 rd32(E1000_ICRXDMTC);
1621 rd32(E1000_CBTMPC);
1622 rd32(E1000_HTDPMC);
1623 rd32(E1000_CBRMPC);
1624 rd32(E1000_RPTHC);
1625 rd32(E1000_HGPTC);
1626 rd32(E1000_HTCBDPC);
1627 rd32(E1000_HGORCL);
1628 rd32(E1000_HGORCH);
1629 rd32(E1000_HGOTCL);
1630 rd32(E1000_HGOTCH);
1631 rd32(E1000_LENERRS);
1633 /* This register should not be read in copper configurations */
1634 if (hw->phy.media_type == e1000_media_type_internal_serdes ||
1635 igb_sgmii_active_82575(hw))
1636 rd32(E1000_SCVPC);
1640 * igb_rx_fifo_flush_82575 - Clean rx fifo after RX enable
1641 * @hw: pointer to the HW structure
1643 * After rx enable if managability is enabled then there is likely some
1644 * bad data at the start of the fifo and possibly in the DMA fifo. This
1645 * function clears the fifos and flushes any packets that came in as rx was
1646 * being enabled.
1648 void igb_rx_fifo_flush_82575(struct e1000_hw *hw)
1650 u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled;
1651 int i, ms_wait;
1653 if (hw->mac.type != e1000_82575 ||
1654 !(rd32(E1000_MANC) & E1000_MANC_RCV_TCO_EN))
1655 return;
1657 /* Disable all RX queues */
1658 for (i = 0; i < 4; i++) {
1659 rxdctl[i] = rd32(E1000_RXDCTL(i));
1660 wr32(E1000_RXDCTL(i),
1661 rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE);
1663 /* Poll all queues to verify they have shut down */
1664 for (ms_wait = 0; ms_wait < 10; ms_wait++) {
1665 msleep(1);
1666 rx_enabled = 0;
1667 for (i = 0; i < 4; i++)
1668 rx_enabled |= rd32(E1000_RXDCTL(i));
1669 if (!(rx_enabled & E1000_RXDCTL_QUEUE_ENABLE))
1670 break;
1673 if (ms_wait == 10)
1674 hw_dbg("Queue disable timed out after 10ms\n");
1676 /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all
1677 * incoming packets are rejected. Set enable and wait 2ms so that
1678 * any packet that was coming in as RCTL.EN was set is flushed
1680 rfctl = rd32(E1000_RFCTL);
1681 wr32(E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF);
1683 rlpml = rd32(E1000_RLPML);
1684 wr32(E1000_RLPML, 0);
1686 rctl = rd32(E1000_RCTL);
1687 temp_rctl = rctl & ~(E1000_RCTL_EN | E1000_RCTL_SBP);
1688 temp_rctl |= E1000_RCTL_LPE;
1690 wr32(E1000_RCTL, temp_rctl);
1691 wr32(E1000_RCTL, temp_rctl | E1000_RCTL_EN);
1692 wrfl();
1693 msleep(2);
1695 /* Enable RX queues that were previously enabled and restore our
1696 * previous state
1698 for (i = 0; i < 4; i++)
1699 wr32(E1000_RXDCTL(i), rxdctl[i]);
1700 wr32(E1000_RCTL, rctl);
1701 wrfl();
1703 wr32(E1000_RLPML, rlpml);
1704 wr32(E1000_RFCTL, rfctl);
1706 /* Flush receive errors generated by workaround */
1707 rd32(E1000_ROC);
1708 rd32(E1000_RNBC);
1709 rd32(E1000_MPC);
1713 * igb_set_pcie_completion_timeout - set pci-e completion timeout
1714 * @hw: pointer to the HW structure
1716 * The defaults for 82575 and 82576 should be in the range of 50us to 50ms,
1717 * however the hardware default for these parts is 500us to 1ms which is less
1718 * than the 10ms recommended by the pci-e spec. To address this we need to
1719 * increase the value to either 10ms to 200ms for capability version 1 config,
1720 * or 16ms to 55ms for version 2.
1722 static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw)
1724 u32 gcr = rd32(E1000_GCR);
1725 s32 ret_val = 0;
1726 u16 pcie_devctl2;
1728 /* only take action if timeout value is defaulted to 0 */
1729 if (gcr & E1000_GCR_CMPL_TMOUT_MASK)
1730 goto out;
1733 * if capababilities version is type 1 we can write the
1734 * timeout of 10ms to 200ms through the GCR register
1736 if (!(gcr & E1000_GCR_CAP_VER2)) {
1737 gcr |= E1000_GCR_CMPL_TMOUT_10ms;
1738 goto out;
1742 * for version 2 capabilities we need to write the config space
1743 * directly in order to set the completion timeout value for
1744 * 16ms to 55ms
1746 ret_val = igb_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
1747 &pcie_devctl2);
1748 if (ret_val)
1749 goto out;
1751 pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms;
1753 ret_val = igb_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
1754 &pcie_devctl2);
1755 out:
1756 /* disable completion timeout resend */
1757 gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND;
1759 wr32(E1000_GCR, gcr);
1760 return ret_val;
1764 * igb_vmdq_set_anti_spoofing_pf - enable or disable anti-spoofing
1765 * @hw: pointer to the hardware struct
1766 * @enable: state to enter, either enabled or disabled
1767 * @pf: Physical Function pool - do not set anti-spoofing for the PF
1769 * enables/disables L2 switch anti-spoofing functionality.
1771 void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf)
1773 u32 dtxswc;
1775 switch (hw->mac.type) {
1776 case e1000_82576:
1777 case e1000_i350:
1778 dtxswc = rd32(E1000_DTXSWC);
1779 if (enable) {
1780 dtxswc |= (E1000_DTXSWC_MAC_SPOOF_MASK |
1781 E1000_DTXSWC_VLAN_SPOOF_MASK);
1782 /* The PF can spoof - it has to in order to
1783 * support emulation mode NICs */
1784 dtxswc ^= (1 << pf | 1 << (pf + MAX_NUM_VFS));
1785 } else {
1786 dtxswc &= ~(E1000_DTXSWC_MAC_SPOOF_MASK |
1787 E1000_DTXSWC_VLAN_SPOOF_MASK);
1789 wr32(E1000_DTXSWC, dtxswc);
1790 break;
1791 default:
1792 break;
1797 * igb_vmdq_set_loopback_pf - enable or disable vmdq loopback
1798 * @hw: pointer to the hardware struct
1799 * @enable: state to enter, either enabled or disabled
1801 * enables/disables L2 switch loopback functionality.
1803 void igb_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable)
1805 u32 dtxswc;
1807 switch (hw->mac.type) {
1808 case e1000_82576:
1809 dtxswc = rd32(E1000_DTXSWC);
1810 if (enable)
1811 dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN;
1812 else
1813 dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN;
1814 wr32(E1000_DTXSWC, dtxswc);
1815 break;
1816 case e1000_i350:
1817 dtxswc = rd32(E1000_TXSWC);
1818 if (enable)
1819 dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN;
1820 else
1821 dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN;
1822 wr32(E1000_TXSWC, dtxswc);
1823 break;
1824 default:
1825 /* Currently no other hardware supports loopback */
1826 break;
1833 * igb_vmdq_set_replication_pf - enable or disable vmdq replication
1834 * @hw: pointer to the hardware struct
1835 * @enable: state to enter, either enabled or disabled
1837 * enables/disables replication of packets across multiple pools.
1839 void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable)
1841 u32 vt_ctl = rd32(E1000_VT_CTL);
1843 if (enable)
1844 vt_ctl |= E1000_VT_CTL_VM_REPL_EN;
1845 else
1846 vt_ctl &= ~E1000_VT_CTL_VM_REPL_EN;
1848 wr32(E1000_VT_CTL, vt_ctl);
1852 * igb_read_phy_reg_82580 - Read 82580 MDI control register
1853 * @hw: pointer to the HW structure
1854 * @offset: register offset to be read
1855 * @data: pointer to the read data
1857 * Reads the MDI control register in the PHY at offset and stores the
1858 * information read to data.
1860 static s32 igb_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data)
1862 s32 ret_val;
1865 ret_val = hw->phy.ops.acquire(hw);
1866 if (ret_val)
1867 goto out;
1869 ret_val = igb_read_phy_reg_mdic(hw, offset, data);
1871 hw->phy.ops.release(hw);
1873 out:
1874 return ret_val;
1878 * igb_write_phy_reg_82580 - Write 82580 MDI control register
1879 * @hw: pointer to the HW structure
1880 * @offset: register offset to write to
1881 * @data: data to write to register at offset
1883 * Writes data to MDI control register in the PHY at offset.
1885 static s32 igb_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data)
1887 s32 ret_val;
1890 ret_val = hw->phy.ops.acquire(hw);
1891 if (ret_val)
1892 goto out;
1894 ret_val = igb_write_phy_reg_mdic(hw, offset, data);
1896 hw->phy.ops.release(hw);
1898 out:
1899 return ret_val;
1903 * igb_reset_mdicnfg_82580 - Reset MDICNFG destination and com_mdio bits
1904 * @hw: pointer to the HW structure
1906 * This resets the the MDICNFG.Destination and MDICNFG.Com_MDIO bits based on
1907 * the values found in the EEPROM. This addresses an issue in which these
1908 * bits are not restored from EEPROM after reset.
1910 static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw)
1912 s32 ret_val = 0;
1913 u32 mdicnfg;
1914 u16 nvm_data = 0;
1916 if (hw->mac.type != e1000_82580)
1917 goto out;
1918 if (!igb_sgmii_active_82575(hw))
1919 goto out;
1921 ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
1922 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
1923 &nvm_data);
1924 if (ret_val) {
1925 hw_dbg("NVM Read Error\n");
1926 goto out;
1929 mdicnfg = rd32(E1000_MDICNFG);
1930 if (nvm_data & NVM_WORD24_EXT_MDIO)
1931 mdicnfg |= E1000_MDICNFG_EXT_MDIO;
1932 if (nvm_data & NVM_WORD24_COM_MDIO)
1933 mdicnfg |= E1000_MDICNFG_COM_MDIO;
1934 wr32(E1000_MDICNFG, mdicnfg);
1935 out:
1936 return ret_val;
1940 * igb_reset_hw_82580 - Reset hardware
1941 * @hw: pointer to the HW structure
1943 * This resets function or entire device (all ports, etc.)
1944 * to a known state.
1946 static s32 igb_reset_hw_82580(struct e1000_hw *hw)
1948 s32 ret_val = 0;
1949 /* BH SW mailbox bit in SW_FW_SYNC */
1950 u16 swmbsw_mask = E1000_SW_SYNCH_MB;
1951 u32 ctrl, icr;
1952 bool global_device_reset = hw->dev_spec._82575.global_device_reset;
1955 hw->dev_spec._82575.global_device_reset = false;
1957 /* due to hw errata, global device reset doesn't always
1958 * work on 82580
1960 if (hw->mac.type == e1000_82580)
1961 global_device_reset = false;
1963 /* Get current control state. */
1964 ctrl = rd32(E1000_CTRL);
1967 * Prevent the PCI-E bus from sticking if there is no TLP connection
1968 * on the last TLP read/write transaction when MAC is reset.
1970 ret_val = igb_disable_pcie_master(hw);
1971 if (ret_val)
1972 hw_dbg("PCI-E Master disable polling has failed.\n");
1974 hw_dbg("Masking off all interrupts\n");
1975 wr32(E1000_IMC, 0xffffffff);
1976 wr32(E1000_RCTL, 0);
1977 wr32(E1000_TCTL, E1000_TCTL_PSP);
1978 wrfl();
1980 msleep(10);
1982 /* Determine whether or not a global dev reset is requested */
1983 if (global_device_reset &&
1984 hw->mac.ops.acquire_swfw_sync(hw, swmbsw_mask))
1985 global_device_reset = false;
1987 if (global_device_reset &&
1988 !(rd32(E1000_STATUS) & E1000_STAT_DEV_RST_SET))
1989 ctrl |= E1000_CTRL_DEV_RST;
1990 else
1991 ctrl |= E1000_CTRL_RST;
1993 wr32(E1000_CTRL, ctrl);
1994 wrfl();
1996 /* Add delay to insure DEV_RST has time to complete */
1997 if (global_device_reset)
1998 msleep(5);
2000 ret_val = igb_get_auto_rd_done(hw);
2001 if (ret_val) {
2003 * When auto config read does not complete, do not
2004 * return with an error. This can happen in situations
2005 * where there is no eeprom and prevents getting link.
2007 hw_dbg("Auto Read Done did not complete\n");
2010 /* If EEPROM is not present, run manual init scripts */
2011 if ((rd32(E1000_EECD) & E1000_EECD_PRES) == 0)
2012 igb_reset_init_script_82575(hw);
2014 /* clear global device reset status bit */
2015 wr32(E1000_STATUS, E1000_STAT_DEV_RST_SET);
2017 /* Clear any pending interrupt events. */
2018 wr32(E1000_IMC, 0xffffffff);
2019 icr = rd32(E1000_ICR);
2021 ret_val = igb_reset_mdicnfg_82580(hw);
2022 if (ret_val)
2023 hw_dbg("Could not reset MDICNFG based on EEPROM\n");
2025 /* Install any alternate MAC address into RAR0 */
2026 ret_val = igb_check_alt_mac_addr(hw);
2028 /* Release semaphore */
2029 if (global_device_reset)
2030 hw->mac.ops.release_swfw_sync(hw, swmbsw_mask);
2032 return ret_val;
2036 * igb_rxpbs_adjust_82580 - adjust RXPBS value to reflect actual RX PBA size
2037 * @data: data received by reading RXPBS register
2039 * The 82580 uses a table based approach for packet buffer allocation sizes.
2040 * This function converts the retrieved value into the correct table value
2041 * 0x0 0x1 0x2 0x3 0x4 0x5 0x6 0x7
2042 * 0x0 36 72 144 1 2 4 8 16
2043 * 0x8 35 70 140 rsv rsv rsv rsv rsv
2045 u16 igb_rxpbs_adjust_82580(u32 data)
2047 u16 ret_val = 0;
2049 if (data < E1000_82580_RXPBS_TABLE_SIZE)
2050 ret_val = e1000_82580_rxpbs_table[data];
2052 return ret_val;
2056 * igb_validate_nvm_checksum_with_offset - Validate EEPROM
2057 * checksum
2058 * @hw: pointer to the HW structure
2059 * @offset: offset in words of the checksum protected region
2061 * Calculates the EEPROM checksum by reading/adding each word of the EEPROM
2062 * and then verifies that the sum of the EEPROM is equal to 0xBABA.
2064 static s32 igb_validate_nvm_checksum_with_offset(struct e1000_hw *hw,
2065 u16 offset)
2067 s32 ret_val = 0;
2068 u16 checksum = 0;
2069 u16 i, nvm_data;
2071 for (i = offset; i < ((NVM_CHECKSUM_REG + offset) + 1); i++) {
2072 ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
2073 if (ret_val) {
2074 hw_dbg("NVM Read Error\n");
2075 goto out;
2077 checksum += nvm_data;
2080 if (checksum != (u16) NVM_SUM) {
2081 hw_dbg("NVM Checksum Invalid\n");
2082 ret_val = -E1000_ERR_NVM;
2083 goto out;
2086 out:
2087 return ret_val;
2091 * igb_update_nvm_checksum_with_offset - Update EEPROM
2092 * checksum
2093 * @hw: pointer to the HW structure
2094 * @offset: offset in words of the checksum protected region
2096 * Updates the EEPROM checksum by reading/adding each word of the EEPROM
2097 * up to the checksum. Then calculates the EEPROM checksum and writes the
2098 * value to the EEPROM.
2100 static s32 igb_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset)
2102 s32 ret_val;
2103 u16 checksum = 0;
2104 u16 i, nvm_data;
2106 for (i = offset; i < (NVM_CHECKSUM_REG + offset); i++) {
2107 ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
2108 if (ret_val) {
2109 hw_dbg("NVM Read Error while updating checksum.\n");
2110 goto out;
2112 checksum += nvm_data;
2114 checksum = (u16) NVM_SUM - checksum;
2115 ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1,
2116 &checksum);
2117 if (ret_val)
2118 hw_dbg("NVM Write Error while updating checksum.\n");
2120 out:
2121 return ret_val;
2125 * igb_validate_nvm_checksum_82580 - Validate EEPROM checksum
2126 * @hw: pointer to the HW structure
2128 * Calculates the EEPROM section checksum by reading/adding each word of
2129 * the EEPROM and then verifies that the sum of the EEPROM is
2130 * equal to 0xBABA.
2132 static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw)
2134 s32 ret_val = 0;
2135 u16 eeprom_regions_count = 1;
2136 u16 j, nvm_data;
2137 u16 nvm_offset;
2139 ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
2140 if (ret_val) {
2141 hw_dbg("NVM Read Error\n");
2142 goto out;
2145 if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) {
2146 /* if checksums compatibility bit is set validate checksums
2147 * for all 4 ports. */
2148 eeprom_regions_count = 4;
2151 for (j = 0; j < eeprom_regions_count; j++) {
2152 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
2153 ret_val = igb_validate_nvm_checksum_with_offset(hw,
2154 nvm_offset);
2155 if (ret_val != 0)
2156 goto out;
2159 out:
2160 return ret_val;
2164 * igb_update_nvm_checksum_82580 - Update EEPROM checksum
2165 * @hw: pointer to the HW structure
2167 * Updates the EEPROM section checksums for all 4 ports by reading/adding
2168 * each word of the EEPROM up to the checksum. Then calculates the EEPROM
2169 * checksum and writes the value to the EEPROM.
2171 static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw)
2173 s32 ret_val;
2174 u16 j, nvm_data;
2175 u16 nvm_offset;
2177 ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
2178 if (ret_val) {
2179 hw_dbg("NVM Read Error while updating checksum"
2180 " compatibility bit.\n");
2181 goto out;
2184 if ((nvm_data & NVM_COMPATIBILITY_BIT_MASK) == 0) {
2185 /* set compatibility bit to validate checksums appropriately */
2186 nvm_data = nvm_data | NVM_COMPATIBILITY_BIT_MASK;
2187 ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1,
2188 &nvm_data);
2189 if (ret_val) {
2190 hw_dbg("NVM Write Error while updating checksum"
2191 " compatibility bit.\n");
2192 goto out;
2196 for (j = 0; j < 4; j++) {
2197 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
2198 ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset);
2199 if (ret_val)
2200 goto out;
2203 out:
2204 return ret_val;
2208 * igb_validate_nvm_checksum_i350 - Validate EEPROM checksum
2209 * @hw: pointer to the HW structure
2211 * Calculates the EEPROM section checksum by reading/adding each word of
2212 * the EEPROM and then verifies that the sum of the EEPROM is
2213 * equal to 0xBABA.
2215 static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw)
2217 s32 ret_val = 0;
2218 u16 j;
2219 u16 nvm_offset;
2221 for (j = 0; j < 4; j++) {
2222 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
2223 ret_val = igb_validate_nvm_checksum_with_offset(hw,
2224 nvm_offset);
2225 if (ret_val != 0)
2226 goto out;
2229 out:
2230 return ret_val;
2234 * igb_update_nvm_checksum_i350 - Update EEPROM checksum
2235 * @hw: pointer to the HW structure
2237 * Updates the EEPROM section checksums for all 4 ports by reading/adding
2238 * each word of the EEPROM up to the checksum. Then calculates the EEPROM
2239 * checksum and writes the value to the EEPROM.
2241 static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw)
2243 s32 ret_val = 0;
2244 u16 j;
2245 u16 nvm_offset;
2247 for (j = 0; j < 4; j++) {
2248 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
2249 ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset);
2250 if (ret_val != 0)
2251 goto out;
2254 out:
2255 return ret_val;
2259 * igb_set_eee_i350 - Enable/disable EEE support
2260 * @hw: pointer to the HW structure
2262 * Enable/disable EEE based on setting in dev_spec structure.
2265 s32 igb_set_eee_i350(struct e1000_hw *hw)
2267 s32 ret_val = 0;
2268 u32 ipcnfg, eeer;
2270 if ((hw->mac.type < e1000_i350) ||
2271 (hw->phy.media_type != e1000_media_type_copper))
2272 goto out;
2273 ipcnfg = rd32(E1000_IPCNFG);
2274 eeer = rd32(E1000_EEER);
2276 /* enable or disable per user setting */
2277 if (!(hw->dev_spec._82575.eee_disable)) {
2278 u32 eee_su = rd32(E1000_EEE_SU);
2280 ipcnfg |= (E1000_IPCNFG_EEE_1G_AN | E1000_IPCNFG_EEE_100M_AN);
2281 eeer |= (E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN |
2282 E1000_EEER_LPI_FC);
2284 /* This bit should not be set in normal operation. */
2285 if (eee_su & E1000_EEE_SU_LPI_CLK_STP)
2286 hw_dbg("LPI Clock Stop Bit should not be set!\n");
2289 } else {
2290 ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN |
2291 E1000_IPCNFG_EEE_100M_AN);
2292 eeer &= ~(E1000_EEER_TX_LPI_EN |
2293 E1000_EEER_RX_LPI_EN |
2294 E1000_EEER_LPI_FC);
2296 wr32(E1000_IPCNFG, ipcnfg);
2297 wr32(E1000_EEER, eeer);
2298 rd32(E1000_IPCNFG);
2299 rd32(E1000_EEER);
2300 out:
2302 return ret_val;
2305 static struct e1000_mac_operations e1000_mac_ops_82575 = {
2306 .init_hw = igb_init_hw_82575,
2307 .check_for_link = igb_check_for_link_82575,
2308 .rar_set = igb_rar_set,
2309 .read_mac_addr = igb_read_mac_addr_82575,
2310 .get_speed_and_duplex = igb_get_speed_and_duplex_copper,
2313 static struct e1000_phy_operations e1000_phy_ops_82575 = {
2314 .acquire = igb_acquire_phy_82575,
2315 .get_cfg_done = igb_get_cfg_done_82575,
2316 .release = igb_release_phy_82575,
2319 static struct e1000_nvm_operations e1000_nvm_ops_82575 = {
2320 .acquire = igb_acquire_nvm_82575,
2321 .read = igb_read_nvm_eerd,
2322 .release = igb_release_nvm_82575,
2323 .write = igb_write_nvm_spi,
2326 const struct e1000_info e1000_82575_info = {
2327 .get_invariants = igb_get_invariants_82575,
2328 .mac_ops = &e1000_mac_ops_82575,
2329 .phy_ops = &e1000_phy_ops_82575,
2330 .nvm_ops = &e1000_nvm_ops_82575,