832 need Intel 82579 Gigabit Ethernet PHY support in e1000g
[illumos-gate.git] / usr / src / uts / common / io / e1000g / e1000_ich8lan.c
blob0cc9f26f272865bacc3ffed9ceb2f40c692386ec
1 /*
2 * This file is provided under a CDDLv1 license. When using or
3 * redistributing this file, you may do so under this license.
4 * In redistributing this file this license must be included
5 * and no other modification of this header file is permitted.
7 * CDDL LICENSE SUMMARY
9 * Copyright(c) 1999 - 2009 Intel Corporation. All rights reserved.
11 * The contents of this file are subject to the terms of Version
12 * 1.0 of the Common Development and Distribution License (the "License").
14 * You should have received a copy of the License with this software.
15 * You can obtain a copy of the License at
16 * http://www.opensolaris.org/os/licensing.
17 * See the License for the specific language governing permissions
18 * and limitations under the License.
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms of the CDDLv1.
27 * Copyright (c) 2001-2010, Intel Corporation
28 * All rights reserved.
30 * Redistribution and use in source and binary forms, with or without
31 * modification, are permitted provided that the following conditions are met:
33 * 1. Redistributions of source code must retain the above copyright notice,
34 * this list of conditions and the following disclaimer.
36 * 2. Redistributions in binary form must reproduce the above copyright
37 * notice, this list of conditions and the following disclaimer in the
38 * documentation and/or other materials provided with the distribution.
40 * 3. Neither the name of the Intel Corporation nor the names of its
41 * contributors may be used to endorse or promote products derived from
42 * this software without specific prior written permission.
44 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
45 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
46 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
47 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
48 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
49 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
50 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
51 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
52 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
53 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
54 * POSSIBILITY OF SUCH DAMAGE.
58 * 82562G 10/100 Network Connection
59 * 82562G-2 10/100 Network Connection
60 * 82562GT 10/100 Network Connection
61 * 82562GT-2 10/100 Network Connection
62 * 82562V 10/100 Network Connection
63 * 82562V-2 10/100 Network Connection
64 * 82566DC-2 Gigabit Network Connection
65 * 82566DC Gigabit Network Connection
66 * 82566DM-2 Gigabit Network Connection
67 * 82566DM Gigabit Network Connection
68 * 82566MC Gigabit Network Connection
69 * 82566MM Gigabit Network Connection
70 * 82567LM Gigabit Network Connection
71 * 82567LF Gigabit Network Connection
72 * 82567V Gigabit Network Connection
73 * 82567LM-2 Gigabit Network Connection
74 * 82567LF-2 Gigabit Network Connection
75 * 82567V-2 Gigabit Network Connection
76 * 82567LF-3 Gigabit Network Connection
77 * 82567LM-3 Gigabit Network Connection
78 * 82567LM-4 Gigabit Network Connection
79 * 82577LM Gigabit Network Connection
80 * 82577LC Gigabit Network Connection
81 * 82578DM Gigabit Network Connection
82 * 82578DC Gigabit Network Connection
83 * 82579LM Gigabit Network Connection
84 * 82579V Gigabit Network Connection
87 #include "e1000_api.h"
89 static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw);
90 static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw);
91 static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw);
92 static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw);
93 static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw);
94 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw);
95 static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw);
96 static void e1000_release_nvm_ich8lan(struct e1000_hw *hw);
97 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
98 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
99 static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
100 static void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
101 u8 *mc_addr_list,
102 u32 mc_addr_count);
103 static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw);
104 static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw);
105 static s32 e1000_get_phy_info_ich8lan(struct e1000_hw *hw);
106 static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
107 static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw,
108 bool active);
109 static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw,
110 bool active);
111 static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
112 u16 words, u16 *data);
113 static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
114 u16 words, u16 *data);
115 static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw);
116 static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw);
117 static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw,
118 u16 *data);
119 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
120 static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw);
121 static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw);
122 static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw);
123 static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw);
124 static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
125 static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw,
126 u16 *speed, u16 *duplex);
127 static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
128 static s32 e1000_led_on_ich8lan(struct e1000_hw *hw);
129 static s32 e1000_led_off_ich8lan(struct e1000_hw *hw);
130 static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
131 static s32 e1000_setup_led_pchlan(struct e1000_hw *hw);
132 static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw);
133 static s32 e1000_led_on_pchlan(struct e1000_hw *hw);
134 static s32 e1000_led_off_pchlan(struct e1000_hw *hw);
135 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
136 static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
137 static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout);
138 static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw);
139 static s32 e1000_get_phy_info_ife_ich8lan(struct e1000_hw *hw);
140 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
141 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
142 static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw,
143 u32 offset, u8 *data);
144 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
145 u8 size, u16 *data);
146 static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw,
147 u32 offset, u16 *data);
148 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
149 u32 offset, u8 byte);
150 static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw,
151 u32 offset, u8 data);
152 static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
153 u8 size, u16 data);
154 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
155 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
156 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw);
157 static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw);
158 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw);
159 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
160 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
161 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
163 /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
164 /* Offset 04h HSFSTS */
165 union ich8_hws_flash_status {
166 struct ich8_hsfsts {
167 u16 flcdone:1; /* bit 0 Flash Cycle Done */
168 u16 flcerr:1; /* bit 1 Flash Cycle Error */
169 u16 dael:1; /* bit 2 Direct Access error Log */
170 u16 berasesz:2; /* bit 4:3 Sector Erase Size */
171 u16 flcinprog:1; /* bit 5 flash cycle in Progress */
172 u16 reserved1:2; /* bit 13:6 Reserved */
173 u16 reserved2:6; /* bit 13:6 Reserved */
174 u16 fldesvalid:1; /* bit 14 Flash Descriptor Valid */
175 u16 flockdn:1; /* bit 15 Flash Config Lock-Down */
176 } hsf_status;
177 u16 regval;
180 /* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
181 /* Offset 06h FLCTL */
182 union ich8_hws_flash_ctrl {
183 struct ich8_hsflctl {
184 u16 flcgo:1; /* 0 Flash Cycle Go */
185 u16 flcycle:2; /* 2:1 Flash Cycle */
186 u16 reserved:5; /* 7:3 Reserved */
187 u16 fldbcount:2; /* 9:8 Flash Data Byte Count */
188 u16 flockdn:6; /* 15:10 Reserved */
189 } hsf_ctrl;
190 u16 regval;
193 /* ICH Flash Region Access Permissions */
194 union ich8_hws_flash_regacc {
195 struct ich8_flracc {
196 u32 grra:8; /* 0:7 GbE region Read Access */
197 u32 grwa:8; /* 8:15 GbE region Write Access */
198 u32 gmrag:8; /* 23:16 GbE Master Read Access Grant */
199 u32 gmwag:8; /* 31:24 GbE Master Write Access Grant */
200 } hsf_flregacc;
201 u16 regval;
205 * e1000_init_phy_params_pchlan - Initialize PHY function pointers
206 * @hw: pointer to the HW structure
208 * Initialize family-specific PHY parameters and function pointers.
210 static s32
211 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
213 struct e1000_phy_info *phy = &hw->phy;
214 u32 ctrl, fwsm;
215 s32 ret_val = E1000_SUCCESS;
217 DEBUGFUNC("e1000_init_phy_params_pchlan");
219 phy->addr = 1;
220 phy->reset_delay_us = 100;
222 phy->ops.acquire = e1000_acquire_swflag_ich8lan;
223 phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
224 phy->ops.get_cfg_done = e1000_get_cfg_done_ich8lan;
225 phy->ops.read_reg = e1000_read_phy_reg_hv;
226 phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked;
227 phy->ops.release = e1000_release_swflag_ich8lan;
228 phy->ops.reset = e1000_phy_hw_reset_ich8lan;
229 phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
230 phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
231 phy->ops.write_reg = e1000_write_phy_reg_hv;
232 phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked;
233 phy->ops.power_up = e1000_power_up_phy_copper;
234 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
235 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
238 * The MAC-PHY interconnect may still be in SMBus mode
239 * after Sx->S0. If the manageability engine (ME) is
240 * disabled, then toggle the LANPHYPC Value bit to force
241 * the interconnect to PCIe mode.
243 fwsm = E1000_READ_REG(hw, E1000_FWSM);
244 if (!(fwsm & E1000_ICH_FWSM_FW_VALID) &&
245 !(hw->phy.ops.check_reset_block(hw))) {
246 ctrl = E1000_READ_REG(hw, E1000_CTRL);
247 ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE;
248 ctrl &= ~E1000_CTRL_LANPHYPC_VALUE;
249 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
250 usec_delay(10);
251 ctrl &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
252 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
253 msec_delay(50);
256 * Gate automatic PHY configuration by hardware on
257 * non-managed 82579
259 if (hw->mac.type == e1000_pch2lan)
260 e1000_gate_hw_phy_config_ich8lan(hw, true);
264 * Reset the PHY before any acccess to it. Doing so, ensures that
265 * the PHY is in a known good state before we read/write PHY registers.
266 * The generic reset is sufficient here, because we haven't determined
267 * the PHY type yet.
269 ret_val = e1000_phy_hw_reset_generic(hw);
270 if (ret_val)
271 goto out;
273 /* Ungate automatic PHY configuration on non-managed 82579 */
274 if ((hw->mac.type == e1000_pch2lan) &&
275 !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
276 msec_delay(10);
277 e1000_gate_hw_phy_config_ich8lan(hw, false);
280 phy->id = e1000_phy_unknown;
281 switch (hw->mac.type) {
282 default:
283 ret_val = e1000_get_phy_id(hw);
284 if (ret_val)
285 goto out;
286 if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
287 break;
288 /* FALLTHROUGH */
289 case e1000_pch2lan:
291 * In case the PHY needs to be in mdio slow mode,
292 * set slow mode and try to get the PHY id again.
294 ret_val = e1000_set_mdio_slow_mode_hv(hw);
295 if (ret_val)
296 goto out;
297 ret_val = e1000_get_phy_id(hw);
298 if (ret_val)
299 goto out;
300 break;
302 phy->type = e1000_get_phy_type_from_id(phy->id);
304 switch (phy->type) {
305 case e1000_phy_82577:
306 case e1000_phy_82579:
307 phy->ops.check_polarity = e1000_check_polarity_82577;
308 phy->ops.force_speed_duplex =
309 e1000_phy_force_speed_duplex_82577;
310 phy->ops.get_cable_length = e1000_get_cable_length_82577;
311 phy->ops.get_info = e1000_get_phy_info_82577;
312 phy->ops.commit = e1000_phy_sw_reset_generic;
313 break;
314 case e1000_phy_82578:
315 phy->ops.check_polarity = e1000_check_polarity_m88;
316 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
317 phy->ops.get_cable_length = e1000_get_cable_length_m88;
318 phy->ops.get_info = e1000_get_phy_info_m88;
319 break;
320 default:
321 ret_val = -E1000_ERR_PHY;
322 break;
325 out:
326 return (ret_val);
330 * e1000_init_phy_params_ich8lan - Initialize PHY function pointers
331 * @hw: pointer to the HW structure
333 * Initialize family-specific PHY parameters and function pointers.
335 static s32
336 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
338 struct e1000_phy_info *phy = &hw->phy;
339 s32 ret_val = E1000_SUCCESS;
340 u16 i = 0;
342 DEBUGFUNC("e1000_init_phy_params_ich8lan");
344 phy->addr = 1;
345 phy->reset_delay_us = 100;
347 phy->ops.acquire = e1000_acquire_swflag_ich8lan;
348 phy->ops.check_polarity = e1000_check_polarity_ife;
349 phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
350 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
351 phy->ops.get_cable_length = e1000_get_cable_length_igp_2;
352 phy->ops.get_cfg_done = e1000_get_cfg_done_ich8lan;
353 phy->ops.get_info = e1000_get_phy_info_ich8lan;
354 phy->ops.read_reg = e1000_read_phy_reg_igp;
355 phy->ops.release = e1000_release_swflag_ich8lan;
356 phy->ops.reset = e1000_phy_hw_reset_ich8lan;
357 phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan;
358 phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan;
359 phy->ops.write_reg = e1000_write_phy_reg_igp;
360 phy->ops.power_up = e1000_power_up_phy_copper;
361 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
364 * We may need to do this twice - once for IGP and if that fails,
365 * we'll set BM func pointers and try again
367 ret_val = e1000_determine_phy_address(hw);
368 if (ret_val) {
369 phy->ops.write_reg = e1000_write_phy_reg_bm;
370 phy->ops.read_reg = e1000_read_phy_reg_bm;
371 ret_val = e1000_determine_phy_address(hw);
372 if (ret_val) {
373 DEBUGOUT("Can't determine PHY address. Erroring out\n");
374 goto out;
378 phy->id = 0;
379 while ((e1000_phy_unknown == e1000_get_phy_type_from_id(phy->id)) &&
380 (i++ < 100)) {
381 msec_delay(1);
382 ret_val = e1000_get_phy_id(hw);
383 if (ret_val)
384 goto out;
387 /* Verify phy id */
388 switch (phy->id) {
389 case IGP03E1000_E_PHY_ID:
390 phy->type = e1000_phy_igp_3;
391 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
392 phy->ops.read_reg_locked = e1000_read_phy_reg_igp_locked;
393 phy->ops.write_reg_locked = e1000_write_phy_reg_igp_locked;
394 break;
395 case IFE_E_PHY_ID:
396 case IFE_PLUS_E_PHY_ID:
397 case IFE_C_E_PHY_ID:
398 phy->type = e1000_phy_ife;
399 phy->autoneg_mask = E1000_ALL_NOT_GIG;
400 break;
401 case BME1000_E_PHY_ID:
402 phy->type = e1000_phy_bm;
403 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
404 phy->ops.read_reg = e1000_read_phy_reg_bm;
405 phy->ops.write_reg = e1000_write_phy_reg_bm;
406 phy->ops.commit = e1000_phy_sw_reset_generic;
407 break;
408 default:
409 ret_val = -E1000_ERR_PHY;
410 goto out;
413 out:
414 return (ret_val);
418 * e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
419 * @hw: pointer to the HW structure
421 * Initialize family-specific NVM parameters and function
422 * pointers.
424 static s32
425 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
427 struct e1000_nvm_info *nvm = &hw->nvm;
428 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
429 u32 gfpreg, sector_base_addr, sector_end_addr;
430 s32 ret_val = E1000_SUCCESS;
431 u16 i;
433 DEBUGFUNC("e1000_init_nvm_params_ich8lan");
435 /* Can't read flash registers if the register set isn't mapped. */
436 if (!hw->flash_address) {
437 DEBUGOUT("ERROR: Flash registers not mapped\n");
438 ret_val = -E1000_ERR_CONFIG;
439 goto out;
442 nvm->type = e1000_nvm_flash_sw;
444 gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG);
447 * sector_X_addr is a "sector"-aligned address (4096 bytes) Add 1 to
448 * sector_end_addr since this sector is included in the overall size.
450 sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
451 sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
453 /* flash_base_addr is byte-aligned */
454 nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT;
457 * find total size of the NVM, then cut in half since the total size
458 * represents two separate NVM banks.
460 nvm->flash_bank_size = (sector_end_addr - sector_base_addr)
461 << FLASH_SECTOR_ADDR_SHIFT;
462 nvm->flash_bank_size /= 2;
463 /* Adjust to word count */
464 nvm->flash_bank_size /= sizeof (u16);
466 nvm->word_size = E1000_SHADOW_RAM_WORDS;
468 /* Clear shadow ram */
469 for (i = 0; i < nvm->word_size; i++) {
470 dev_spec->shadow_ram[i].modified = false;
471 dev_spec->shadow_ram[i].value = 0xFFFF;
474 E1000_MUTEX_INIT(&dev_spec->nvm_mutex);
475 E1000_MUTEX_INIT(&dev_spec->swflag_mutex);
477 /* Function Pointers */
478 nvm->ops.acquire = e1000_acquire_nvm_ich8lan;
479 nvm->ops.release = e1000_release_nvm_ich8lan;
480 nvm->ops.read = e1000_read_nvm_ich8lan;
481 nvm->ops.update = e1000_update_nvm_checksum_ich8lan;
482 nvm->ops.valid_led_default = e1000_valid_led_default_ich8lan;
483 nvm->ops.validate = e1000_validate_nvm_checksum_ich8lan;
484 nvm->ops.write = e1000_write_nvm_ich8lan;
486 out:
487 return (ret_val);
491 * e1000_init_mac_params_ich8lan - Initialize MAC function pointers
492 * @hw: pointer to the HW structure
494 * Initialize family-specific MAC parameters and function
495 * pointers.
497 static s32
498 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
500 struct e1000_mac_info *mac = &hw->mac;
501 u16 pci_cfg;
503 DEBUGFUNC("e1000_init_mac_params_ich8lan");
505 /* Set media type function pointer */
506 hw->phy.media_type = e1000_media_type_copper;
508 /* Set mta register count */
509 mac->mta_reg_count = 32;
510 /* Set rar entry count */
511 mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
512 if (mac->type == e1000_ich8lan)
513 mac->rar_entry_count--;
514 /* Set if part includes ASF firmware */
515 mac->asf_firmware_present = true;
516 /* FWSM register */
517 mac->has_fwsm = true;
518 /* ARC subsystem not supported */
519 mac->arc_subsystem_valid = false;
520 /* Adaptive IFS supported */
521 mac->adaptive_ifs = true;
523 /* Function pointers */
525 /* bus type/speed/width */
526 mac->ops.get_bus_info = e1000_get_bus_info_ich8lan;
527 /* function id */
528 mac->ops.set_lan_id = e1000_set_lan_id_single_port;
529 /* reset */
530 mac->ops.reset_hw = e1000_reset_hw_ich8lan;
531 /* hw initialization */
532 mac->ops.init_hw = e1000_init_hw_ich8lan;
533 /* link setup */
534 mac->ops.setup_link = e1000_setup_link_ich8lan;
535 /* physical interface setup */
536 mac->ops.setup_physical_interface = e1000_setup_copper_link_ich8lan;
537 /* check for link */
538 mac->ops.check_for_link = e1000_check_for_copper_link_ich8lan;
539 /* link info */
540 mac->ops.get_link_up_info = e1000_get_link_up_info_ich8lan;
541 /* multicast address update */
542 mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
543 /* clear hardware counters */
544 mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan;
546 /* LED operations */
547 switch (mac->type) {
548 case e1000_ich8lan:
549 case e1000_ich9lan:
550 case e1000_ich10lan:
551 /* check management mode */
552 mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
553 /* ID LED init */
554 mac->ops.id_led_init = e1000_id_led_init_generic;
555 /* blink LED */
556 mac->ops.blink_led = e1000_blink_led_generic;
557 /* setup LED */
558 mac->ops.setup_led = e1000_setup_led_generic;
559 /* cleanup LED */
560 mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
561 /* turn on/off LED */
562 mac->ops.led_on = e1000_led_on_ich8lan;
563 mac->ops.led_off = e1000_led_off_ich8lan;
564 break;
565 case e1000_pch2lan:
566 mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
567 mac->ops.rar_set = e1000_rar_set_pch2lan;
568 /* multicast address update for pch2 */
569 mac->ops.update_mc_addr_list =
570 e1000_update_mc_addr_list_pch2lan;
571 /* FALLTHROUGH */
572 case e1000_pchlan:
573 /* save PCH revision_id */
574 e1000_read_pci_cfg(hw, 0x2, &pci_cfg);
575 hw->revision_id = (u8)(pci_cfg &= 0x000F);
576 /* check management mode */
577 mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
578 /* ID LED init */
579 mac->ops.id_led_init = e1000_id_led_init_pchlan;
580 /* setup LED */
581 mac->ops.setup_led = e1000_setup_led_pchlan;
582 /* cleanup LED */
583 mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
584 /* turn on/off LED */
585 mac->ops.led_on = e1000_led_on_pchlan;
586 mac->ops.led_off = e1000_led_off_pchlan;
587 break;
588 default:
589 break;
592 /* Enable PCS Lock-loss workaround for ICH8 */
593 if (mac->type == e1000_ich8lan)
594 e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, true);
596 /* Gate automatic PHY configuration by hardware on managed 82579 */
597 if ((mac->type == e1000_pch2lan) &&
598 (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
599 e1000_gate_hw_phy_config_ich8lan(hw, true);
601 return (E1000_SUCCESS);
605 * e1000_set_eee_pchlan - Enable/disable EEE support
606 * @hw: pointer to the HW structure
608 * Enable/disable EEE based on setting in dev_spec structure. The bits in
609 * the LPI Control register will remain set only if/when link is up.
611 static s32
612 e1000_set_eee_pchlan(struct e1000_hw *hw)
614 s32 ret_val = E1000_SUCCESS;
615 u16 phy_reg;
617 DEBUGFUNC("e1000_set_eee_pchlan");
619 if (hw->phy.type != e1000_phy_82579)
620 goto out;
622 ret_val = hw->phy.ops.read_reg(hw, I82579_LPI_CTRL, &phy_reg);
623 if (ret_val)
624 goto out;
626 if (hw->dev_spec.ich8lan.eee_disable)
627 phy_reg &= ~I82579_LPI_CTRL_ENABLE_MASK;
628 else
629 phy_reg |= I82579_LPI_CTRL_ENABLE_MASK;
631 ret_val = hw->phy.ops.write_reg(hw, I82579_LPI_CTRL, phy_reg);
632 out:
633 return (ret_val);
637 * e1000_check_for_copper_link_ich8lan - Check for link (Copper)
638 * @hw: pointer to the HW structure
640 * Checks to see of the link status of the hardware has changed. If a
641 * change in link status has been detected, then we read the PHY registers
642 * to get the current speed/duplex if link exists.
644 static s32
645 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
647 struct e1000_mac_info *mac = &hw->mac;
648 s32 ret_val;
649 bool link;
651 DEBUGFUNC("e1000_check_for_copper_link_ich8lan");
654 * We only want to go out to the PHY registers to see if Auto-Neg
655 * has completed and/or if our link status has changed. The
656 * get_link_status flag is set upon receiving a Link Status
657 * Change or Rx Sequence Error interrupt.
659 if (!mac->get_link_status) {
660 ret_val = E1000_SUCCESS;
661 goto out;
665 * First we want to see if the MII Status Register reports
666 * link. If so, then we want to get the current speed/duplex
667 * of the PHY.
669 ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
670 if (ret_val)
671 goto out;
673 if (hw->mac.type == e1000_pchlan) {
674 ret_val = e1000_k1_gig_workaround_hv(hw, link);
675 if (ret_val)
676 goto out;
679 if (!link)
680 goto out; /* No link detected */
682 mac->get_link_status = false;
684 if (hw->phy.type == e1000_phy_82578) {
685 ret_val = e1000_link_stall_workaround_hv(hw);
686 if (ret_val)
687 goto out;
690 if (hw->mac.type == e1000_pch2lan) {
691 ret_val = e1000_k1_workaround_lv(hw);
692 if (ret_val)
693 goto out;
697 * Check if there was DownShift, must be checked
698 * immediately after link-up
700 (void) e1000_check_downshift_generic(hw);
702 /* Enable/Disable EEE after link up */
703 ret_val = e1000_set_eee_pchlan(hw);
704 if (ret_val)
705 goto out;
708 * If we are forcing speed/duplex, then we simply return since
709 * we have already determined whether we have link or not.
711 if (!mac->autoneg) {
712 ret_val = -E1000_ERR_CONFIG;
713 goto out;
717 * Auto-Neg is enabled. Auto Speed Detection takes care
718 * of MAC speed/duplex configuration. So we only need to
719 * configure Collision Distance in the MAC.
721 e1000_config_collision_dist_generic(hw);
724 * Configure Flow Control now that Auto-Neg has completed.
725 * First, we need to restore the desired flow control
726 * settings because we may have had to re-autoneg with a
727 * different link partner.
729 ret_val = e1000_config_fc_after_link_up_generic(hw);
730 if (ret_val)
731 DEBUGOUT("Error configuring flow control\n");
733 out:
734 return (ret_val);
738 * e1000_init_function_pointers_ich8lan - Initialize ICH8 function pointers
739 * @hw: pointer to the HW structure
741 * Initialize family-specific function pointers for PHY, MAC, and NVM.
743 void
744 e1000_init_function_pointers_ich8lan(struct e1000_hw *hw)
746 DEBUGFUNC("e1000_init_function_pointers_ich8lan");
748 hw->mac.ops.init_params = e1000_init_mac_params_ich8lan;
749 hw->nvm.ops.init_params = e1000_init_nvm_params_ich8lan;
750 switch (hw->mac.type) {
751 case e1000_ich8lan:
752 case e1000_ich9lan:
753 case e1000_ich10lan:
754 hw->phy.ops.init_params = e1000_init_phy_params_ich8lan;
755 break;
756 case e1000_pchlan:
757 case e1000_pch2lan:
758 hw->phy.ops.init_params = e1000_init_phy_params_pchlan;
759 break;
760 default:
761 break;
766 * e1000_acquire_nvm_ich8lan - Acquire NVM mutex
767 * @hw: pointer to the HW structure
769 * Acquires the mutex for performing NVM operations.
771 static s32
772 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
774 DEBUGFUNC("e1000_acquire_nvm_ich8lan");
776 E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.nvm_mutex);
778 return (E1000_SUCCESS);
782 * e1000_release_nvm_ich8lan - Release NVM mutex
783 * @hw: pointer to the HW structure
785 * Releases the mutex used while performing NVM operations.
787 static void
788 e1000_release_nvm_ich8lan(struct e1000_hw *hw)
790 DEBUGFUNC("e1000_release_nvm_ich8lan");
792 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.nvm_mutex);
796 * e1000_acquire_swflag_ich8lan - Acquire software control flag
797 * @hw: pointer to the HW structure
799 * Acquires the software control flag for performing PHY and select
800 * MAC CSR accesses.
802 static s32
803 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
805 u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
806 s32 ret_val = E1000_SUCCESS;
808 DEBUGFUNC("e1000_acquire_swflag_ich8lan");
810 E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.swflag_mutex);
812 while (timeout) {
813 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
814 if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
815 break;
817 msec_delay_irq(1);
818 timeout--;
821 if (!timeout) {
822 DEBUGOUT("SW/FW/HW has locked the resource for too long.\n");
823 ret_val = -E1000_ERR_CONFIG;
824 goto out;
827 /* In some cases, hardware will take up to 400ms to set the SW flag. */
828 timeout = SW_FLAG_TIMEOUT;
830 extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
831 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
833 while (timeout) {
834 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
835 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
836 break;
838 msec_delay_irq(1);
839 timeout--;
842 if (!timeout) {
843 DEBUGOUT("Failed to acquire the semaphore.\n");
844 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
845 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
846 ret_val = -E1000_ERR_CONFIG;
847 goto out;
850 out:
851 if (ret_val)
852 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
854 return (ret_val);
858 * e1000_release_swflag_ich8lan - Release software control flag
859 * @hw: pointer to the HW structure
861 * Releases the software control flag for performing PHY and select
862 * MAC CSR accesses.
864 static void
865 e1000_release_swflag_ich8lan(struct e1000_hw *hw)
867 u32 extcnf_ctrl;
869 DEBUGFUNC("e1000_release_swflag_ich8lan");
871 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
872 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
873 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
875 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
879 * e1000_check_mng_mode_ich8lan - Checks management mode
880 * @hw: pointer to the HW structure
882 * This checks if the adapter has manageability enabled.
883 * This is a function pointer entry point only called by read/write
884 * routines for the PHY and NVM parts.
886 static bool
887 e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
889 u32 fwsm;
891 DEBUGFUNC("e1000_check_mng_mode_ich8lan");
893 fwsm = E1000_READ_REG(hw, E1000_FWSM);
895 return ((fwsm & E1000_FWSM_MODE_MASK) ==
896 (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
900 * e1000_check_mng_mode_pchlan - Checks management mode
901 * @hw: pointer to the HW structure
903 * This checks if the adapter has iAMT enabled.
904 * This is a function pointer entry point only called by read/write
905 * routines for the PHY and NVM parts.
907 static bool
908 e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
910 u32 fwsm;
912 DEBUGFUNC("e1000_check_mng_mode_pchlan");
914 fwsm = E1000_READ_REG(hw, E1000_FWSM);
916 return ((fwsm & E1000_ICH_FWSM_FW_VALID) &&
917 (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)));
921 * e1000_rar_set_pch2lan - Set receive address register
922 * @hw: pointer to the HW structure
923 * @addr: pointer to the receive address
924 * @index: receive address array register
926 * Sets the receive address array register at index to the address passed
927 * in by addr. For 82579, RAR[0] is the base address register that is to
928 * contain the MAC address but RAR[1-6] are reserved for manageability (ME).
929 * Use SHRA[0-3] in place of those reserved for ME.
931 static void
932 e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
934 u32 rar_low, rar_high;
936 DEBUGFUNC("e1000_rar_set_pch2lan");
939 * HW expects these in little endian so we reverse the byte order
940 * from network order (big endian) to little endian
942 rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
943 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
945 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
947 /* If MAC address zero, no need to set the AV bit */
948 if (rar_low || rar_high)
949 rar_high |= E1000_RAH_AV;
951 if (index == 0) {
952 E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
953 E1000_WRITE_FLUSH(hw);
954 E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
955 E1000_WRITE_FLUSH(hw);
956 return;
959 if (index < hw->mac.rar_entry_count) {
960 E1000_WRITE_REG(hw, E1000_SHRAL(index - 1), rar_low);
961 E1000_WRITE_FLUSH(hw);
962 E1000_WRITE_REG(hw, E1000_SHRAH(index - 1), rar_high);
963 E1000_WRITE_FLUSH(hw);
965 /* verify the register updates */
966 if ((E1000_READ_REG(hw, E1000_SHRAL(index - 1)) == rar_low) &&
967 (E1000_READ_REG(hw, E1000_SHRAH(index - 1)) == rar_high))
968 return;
970 DEBUGOUT2("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
971 (index - 1), E1000_READ_REG(hw, E1000_FWSM));
974 DEBUGOUT1("Failed to write receive address at index %d\n", index);
978 * e1000_update_mc_addr_list_pch2lan - Update Multicast addresses
979 * @hw: pointer to the HW structure
980 * @mc_addr_list: array of multicast addresses to program
981 * @mc_addr_count: number of multicast addresses to program
983 * Updates entire Multicast Table Array of the PCH2 MAC and PHY.
984 * The caller must have a packed mc_addr_list of multicast addresses.
986 static void
987 e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw, u8 *mc_addr_list,
988 u32 mc_addr_count)
990 int i;
992 DEBUGFUNC("e1000_update_mc_addr_list_pch2lan");
994 e1000_update_mc_addr_list_generic(hw, mc_addr_list, mc_addr_count);
996 for (i = 0; i < hw->mac.mta_reg_count; i++) {
997 hw->phy.ops.write_reg(hw, BM_MTA(i),
998 (u16)(hw->mac.mta_shadow[i] & 0xFFFF));
999 hw->phy.ops.write_reg(hw, (BM_MTA(i) + 1),
1000 (u16)((hw->mac.mta_shadow[i] >> 16) &
1001 0xFFFF));
1006 * e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
1007 * @hw: pointer to the HW structure
1009 * Checks if firmware is blocking the reset of the PHY.
1010 * This is a function pointer entry point only called by
1011 * reset routines.
1013 static s32
1014 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
1016 u32 fwsm;
1018 DEBUGFUNC("e1000_check_reset_block_ich8lan");
1020 fwsm = E1000_READ_REG(hw, E1000_FWSM);
1022 return ((fwsm & E1000_ICH_FWSM_RSPCIPHY) ? E1000_SUCCESS
1023 : E1000_BLK_PHY_RESET);
1027 * e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
1028 * @hw: pointer to the HW structure
1030 * Assumes semaphore already acquired.
1033 static s32
1034 e1000_write_smbus_addr(struct e1000_hw *hw)
1036 u16 phy_data;
1037 u32 strap = E1000_READ_REG(hw, E1000_STRAP);
1038 s32 ret_val = E1000_SUCCESS;
1040 strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
1042 ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
1043 if (ret_val)
1044 goto out;
1046 phy_data &= ~HV_SMB_ADDR_MASK;
1047 phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
1048 phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
1049 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
1051 out:
1052 return (ret_val);
1056 * e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
1057 * @hw: pointer to the HW structure
1059 * SW should configure the LCD from the NVM extended configuration region
1060 * as a workaround for certain parts.
1062 static s32
1063 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
1065 struct e1000_phy_info *phy = &hw->phy;
1066 u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
1067 s32 ret_val = E1000_SUCCESS;
1068 u16 word_addr, reg_data, reg_addr, phy_page = 0;
1070 DEBUGFUNC("e1000_sw_lcd_config_ich8lan");
1073 * Initialize the PHY from the NVM on ICH platforms. This
1074 * is needed due to an issue where the NVM configuration is
1075 * not properly autoloaded after power transitions.
1076 * Therefore, after each PHY reset, we will load the
1077 * configuration data out of the NVM manually.
1079 switch (hw->mac.type) {
1080 case e1000_ich8lan:
1081 if (phy->type != e1000_phy_igp_3)
1082 return (ret_val);
1084 if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_AMT) ||
1085 (hw->device_id == E1000_DEV_ID_ICH8_IGP_C)) {
1086 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
1087 break;
1089 /* FALLTHROUGH */
1090 case e1000_pchlan:
1091 case e1000_pch2lan:
1092 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
1093 break;
1094 default:
1095 return (ret_val);
1098 ret_val = hw->phy.ops.acquire(hw);
1099 if (ret_val)
1100 return (ret_val);
1102 data = E1000_READ_REG(hw, E1000_FEXTNVM);
1103 if (!(data & sw_cfg_mask))
1104 goto out;
1107 * Make sure HW does not configure LCD from PHY
1108 * extended configuration before SW configuration
1110 data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1111 if (!(hw->mac.type == e1000_pch2lan)) {
1112 if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE)
1113 goto out;
1116 cnf_size = E1000_READ_REG(hw, E1000_EXTCNF_SIZE);
1117 cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
1118 cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
1119 if (!cnf_size)
1120 goto out;
1122 cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
1123 cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
1125 if ((!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) &&
1126 (hw->mac.type == e1000_pchlan)) ||
1127 (hw->mac.type == e1000_pch2lan)) {
1129 * HW configures the SMBus address and LEDs when the
1130 * OEM and LCD Write Enable bits are set in the NVM.
1131 * When both NVM bits are cleared, SW will configure
1132 * them instead.
1134 ret_val = e1000_write_smbus_addr(hw);
1135 if (ret_val)
1136 goto out;
1138 data = E1000_READ_REG(hw, E1000_LEDCTL);
1139 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
1140 (u16)data);
1141 if (ret_val)
1142 goto out;
1145 /* Configure LCD from extended configuration region. */
1147 /* cnf_base_addr is in DWORD */
1148 word_addr = (u16)(cnf_base_addr << 1);
1150 for (i = 0; i < cnf_size; i++) {
1151 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2), 1,
1152 &reg_data);
1153 if (ret_val)
1154 goto out;
1156 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2 + 1),
1157 1, &reg_addr);
1158 if (ret_val)
1159 goto out;
1161 /* Save off the PHY page for future writes. */
1162 if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
1163 phy_page = reg_data;
1164 continue;
1167 reg_addr &= PHY_REG_MASK;
1168 reg_addr |= phy_page;
1170 ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
1171 reg_data);
1172 if (ret_val)
1173 goto out;
1176 out:
1177 hw->phy.ops.release(hw);
1178 return (ret_val);
1183 * e1000_k1_gig_workaround_hv - K1 Si workaround
1184 * @hw: pointer to the HW structure
1185 * @link: link up bool flag
1187 * If K1 is enabled for 1Gbps, the MAC might stall when transitioning
1188 * from a lower speed. This workaround disables K1 whenever link is at 1Gig
1189 * If link is down, the function will restore the default K1 setting located
1190 * in the NVM.
1192 static s32
1193 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
1195 s32 ret_val = E1000_SUCCESS;
1196 u16 status_reg = 0;
1197 bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
1199 DEBUGFUNC("e1000_k1_gig_workaround_hv");
1201 if (hw->mac.type != e1000_pchlan)
1202 goto out;
1204 /* Wrap the whole flow with the sw flag */
1205 ret_val = hw->phy.ops.acquire(hw);
1206 if (ret_val)
1207 goto out;
1209 /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
1210 if (link) {
1211 if (hw->phy.type == e1000_phy_82578) {
1212 ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS,
1213 &status_reg);
1214 if (ret_val)
1215 goto release;
1217 status_reg &= BM_CS_STATUS_LINK_UP |
1218 BM_CS_STATUS_RESOLVED |
1219 BM_CS_STATUS_SPEED_MASK;
1221 if (status_reg == (BM_CS_STATUS_LINK_UP |
1222 BM_CS_STATUS_RESOLVED |
1223 BM_CS_STATUS_SPEED_1000))
1224 k1_enable = false;
1227 if (hw->phy.type == e1000_phy_82577) {
1228 ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS,
1229 &status_reg);
1230 if (ret_val)
1231 goto release;
1233 status_reg &= HV_M_STATUS_LINK_UP |
1234 HV_M_STATUS_AUTONEG_COMPLETE |
1235 HV_M_STATUS_SPEED_MASK;
1237 if (status_reg == (HV_M_STATUS_LINK_UP |
1238 HV_M_STATUS_AUTONEG_COMPLETE |
1239 HV_M_STATUS_SPEED_1000))
1240 k1_enable = false;
1243 /* Link stall fix for link up */
1244 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
1245 0x0100);
1246 if (ret_val)
1247 goto release;
1249 } else {
1250 /* Link stall fix for link down */
1251 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
1252 0x4100);
1253 if (ret_val)
1254 goto release;
1257 ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
1259 release:
1260 hw->phy.ops.release(hw);
1261 out:
1262 return (ret_val);
1266 * e1000_configure_k1_ich8lan - Configure K1 power state
1267 * @hw: pointer to the HW structure
1268 * @enable: K1 state to configure
1270 * Configure the K1 power state based on the provided parameter.
1271 * Assumes semaphore already acquired.
1273 * Success returns 0, Failure returns -E1000_ERR_PHY (-2)
1276 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
1278 s32 ret_val = E1000_SUCCESS;
1279 u32 ctrl_reg = 0;
1280 u32 ctrl_ext = 0;
1281 u32 reg = 0;
1282 u16 kmrn_reg = 0;
1284 ret_val = e1000_read_kmrn_reg_locked(hw,
1285 E1000_KMRNCTRLSTA_K1_CONFIG,
1286 &kmrn_reg);
1287 if (ret_val)
1288 goto out;
1290 if (k1_enable)
1291 kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
1292 else
1293 kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
1295 ret_val = e1000_write_kmrn_reg_locked(hw,
1296 E1000_KMRNCTRLSTA_K1_CONFIG,
1297 kmrn_reg);
1298 if (ret_val)
1299 goto out;
1301 usec_delay(20);
1302 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
1303 ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
1305 reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
1306 reg |= E1000_CTRL_FRCSPD;
1307 E1000_WRITE_REG(hw, E1000_CTRL, reg);
1309 E1000_WRITE_REG(hw,
1310 E1000_CTRL_EXT,
1311 ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
1312 usec_delay(20);
1313 E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
1314 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
1315 usec_delay(20);
1317 out:
1318 return (ret_val);
1322 * e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
1323 * @hw: pointer to the HW structure
1324 * @d0_state: boolean if entering d0 or d3 device state
1326 * SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
1327 * collectively called OEM bits. The OEM Write Enable bit and SW Config bit
1328 * in NVM determines whether HW should configure LPLU and Gbe Disable.
1331 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
1333 s32 ret_val = 0;
1334 u32 mac_reg;
1335 u16 oem_reg;
1337 DEBUGFUNC("e1000_oem_bits_config_ich8lan");
1339 if ((hw->mac.type != e1000_pch2lan) && (hw->mac.type != e1000_pchlan))
1340 return (ret_val);
1342 ret_val = hw->phy.ops.acquire(hw);
1343 if (ret_val)
1344 return (ret_val);
1346 if (!(hw->mac.type == e1000_pch2lan)) {
1347 mac_reg = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1348 if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
1349 goto out;
1352 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM);
1353 if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
1354 goto out;
1356 mac_reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
1358 ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg);
1359 if (ret_val)
1360 goto out;
1362 oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
1364 if (d0_state) {
1365 if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
1366 oem_reg |= HV_OEM_BITS_GBE_DIS;
1368 if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
1369 oem_reg |= HV_OEM_BITS_LPLU;
1370 } else {
1371 if (mac_reg & E1000_PHY_CTRL_NOND0A_GBE_DISABLE)
1372 oem_reg |= HV_OEM_BITS_GBE_DIS;
1374 if (mac_reg & E1000_PHY_CTRL_NOND0A_LPLU)
1375 oem_reg |= HV_OEM_BITS_LPLU;
1377 /* Restart auto-neg to activate the bits */
1378 if (!hw->phy.ops.check_reset_block(hw))
1379 oem_reg |= HV_OEM_BITS_RESTART_AN;
1380 ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
1382 out:
1383 hw->phy.ops.release(hw);
1385 return (ret_val);
1389 * e1000_hv_phy_powerdown_workaround_ich8lan - Power down workaround on Sx
1390 * @hw: pointer to the HW structure
1393 e1000_hv_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
1395 DEBUGFUNC("e1000_hv_phy_powerdown_workaround_ich8lan");
1397 if ((hw->phy.type != e1000_phy_82577) || (hw->revision_id > 2))
1398 return (E1000_SUCCESS);
1400 return (hw->phy.ops.write_reg(hw, PHY_REG(768, 25), 0x0444));
1404 * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
1405 * @hw: pointer to the HW structure
1407 static s32
1408 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
1410 s32 ret_val;
1411 u16 data;
1413 DEBUGFUNC("e1000_set_mdio_slow_mode_hv");
1415 ret_val = hw->phy.ops.read_reg(hw, HV_KMRN_MODE_CTRL, &data);
1416 if (ret_val)
1417 return (ret_val);
1419 data |= HV_KMRN_MDIO_SLOW;
1421 ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_MODE_CTRL, data);
1423 return (ret_val);
1427 * e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
1428 * done after every PHY reset.
1430 static s32
1431 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
1433 s32 ret_val = E1000_SUCCESS;
1435 if (hw->mac.type != e1000_pchlan)
1436 goto out;
1438 if (((hw->phy.type == e1000_phy_82577) &&
1439 ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
1440 ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
1441 /* Disable generation of early preamble */
1442 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 25), 0x4431);
1443 if (ret_val)
1444 goto out;
1446 /* Preamble tuning for SSC */
1447 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(770, 16), 0xA204);
1448 if (ret_val)
1449 goto out;
1452 if (hw->phy.type == e1000_phy_82578) {
1454 * Return registers to default by doing a soft reset then
1455 * writing 0x3140 to the control register.
1457 if (hw->phy.revision < 2) {
1458 (void) e1000_phy_sw_reset_generic(hw);
1459 ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL,
1460 0x3140);
1464 /* Select page 0 */
1465 ret_val = hw->phy.ops.acquire(hw);
1466 if (ret_val)
1467 goto out;
1469 hw->phy.addr = 1;
1470 ret_val = e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
1471 if (ret_val)
1472 goto out;
1473 hw->phy.ops.release(hw);
1476 * Configure the K1 Si workaround during phy reset assuming there is
1477 * link so that it disables K1 if link is in 1Gbps.
1479 ret_val = e1000_k1_gig_workaround_hv(hw, true);
1481 out:
1482 return (ret_val);
1486 * e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
1487 * @hw: pointer to the HW structure
1489 void
1490 e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
1492 u32 mac_reg;
1493 u16 i;
1495 DEBUGFUNC("e1000_copy_rx_addrs_to_phy_ich8lan");
1497 /* Copy both RAL/H (rar_entry_count) and SHRAL/H (+4) to PHY */
1498 for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
1499 mac_reg = E1000_READ_REG(hw, E1000_RAL(i));
1500 hw->phy.ops.write_reg(hw, BM_RAR_L(i),
1501 (u16)(mac_reg & 0xFFFF));
1502 hw->phy.ops.write_reg(hw, BM_RAR_M(i),
1503 (u16)((mac_reg >> 16) & 0xFFFF));
1504 mac_reg = E1000_READ_REG(hw, E1000_RAH(i));
1505 hw->phy.ops.write_reg(hw, BM_RAR_H(i),
1506 (u16)(mac_reg & 0xFFFF));
1507 hw->phy.ops.write_reg(hw, BM_RAR_CTRL(i),
1508 (u16)((mac_reg >> 16) & 0x8000));
1512 static u32
1513 e1000_calc_rx_da_crc(u8 mac[])
1515 u32 poly = 0xEDB88320; /* Polynomial for 802.3 CRC calculation */
1516 u32 i, j, mask, crc;
1518 DEBUGFUNC("e1000_calc_rx_da_crc");
1520 crc = 0xffffffff;
1521 for (i = 0; i < 6; i++) {
1522 crc = crc ^ mac[i];
1523 for (j = 8; j > 0; j--) {
1524 mask = (crc & 1) * (-1);
1525 crc = (crc >> 1) ^ (poly & mask);
1528 return (~crc);
1532 * e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
1533 * with 82579 PHY
1534 * @hw: pointer to the HW structure
1535 * @enable: flag to enable/disable workaround when enabling/disabling jumbos
1538 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
1540 s32 ret_val = E1000_SUCCESS;
1541 u16 phy_reg, data;
1542 u32 mac_reg;
1543 u16 i;
1545 DEBUGFUNC("e1000_lv_jumbo_workaround_ich8lan");
1547 if (hw->mac.type != e1000_pch2lan)
1548 goto out;
1550 /* disable Rx path while enabling/disabling workaround */
1551 hw->phy.ops.read_reg(hw, PHY_REG(769, 20), &phy_reg);
1552 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 20),
1553 phy_reg | (1 << 14));
1554 if (ret_val)
1555 goto out;
1557 if (enable) {
1559 * Write Rx addresses (rar_entry_count for RAL/H, +4 for
1560 * SHRAL/H) and initial CRC values to the MAC
1562 for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
1563 u8 mac_addr[ETH_ADDR_LEN] = {0};
1564 u32 addr_high, addr_low;
1566 addr_high = E1000_READ_REG(hw, E1000_RAH(i));
1567 if (!(addr_high & E1000_RAH_AV))
1568 continue;
1569 addr_low = E1000_READ_REG(hw, E1000_RAL(i));
1570 mac_addr[0] = (addr_low & 0xFF);
1571 mac_addr[1] = ((addr_low >> 8) & 0xFF);
1572 mac_addr[2] = ((addr_low >> 16) & 0xFF);
1573 mac_addr[3] = ((addr_low >> 24) & 0xFF);
1574 mac_addr[4] = (addr_high & 0xFF);
1575 mac_addr[5] = ((addr_high >> 8) & 0xFF);
1577 E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
1578 e1000_calc_rx_da_crc(mac_addr));
1581 /* Write Rx addresses to the PHY */
1582 e1000_copy_rx_addrs_to_phy_ich8lan(hw);
1584 /* Enable jumbo frame workaround in the MAC */
1585 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
1586 mac_reg &= ~(1 << 14);
1587 mac_reg |= (7 << 15);
1588 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
1590 mac_reg = E1000_READ_REG(hw, E1000_RCTL);
1591 mac_reg |= E1000_RCTL_SECRC;
1592 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
1594 ret_val = e1000_read_kmrn_reg_generic(hw,
1595 E1000_KMRNCTRLSTA_CTRL_OFFSET,
1596 &data);
1597 if (ret_val)
1598 goto out;
1599 ret_val = e1000_write_kmrn_reg_generic(hw,
1600 E1000_KMRNCTRLSTA_CTRL_OFFSET,
1601 data | (1 << 0));
1602 if (ret_val)
1603 goto out;
1604 ret_val = e1000_read_kmrn_reg_generic(hw,
1605 E1000_KMRNCTRLSTA_HD_CTRL,
1606 &data);
1607 if (ret_val)
1608 goto out;
1609 data &= ~(0xF << 8);
1610 data |= (0xB << 8);
1611 ret_val = e1000_write_kmrn_reg_generic(hw,
1612 E1000_KMRNCTRLSTA_HD_CTRL,
1613 data);
1614 if (ret_val)
1615 goto out;
1617 /* Enable jumbo frame workaround in the PHY */
1618 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
1619 data &= ~(0x7F << 5);
1620 data |= (0x37 << 5);
1621 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
1622 if (ret_val)
1623 goto out;
1624 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
1625 data &= ~(1 << 13);
1626 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
1627 if (ret_val)
1628 goto out;
1629 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
1630 data &= ~(0x3FF << 2);
1631 data |= (0x1A << 2);
1632 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
1633 if (ret_val)
1634 goto out;
1635 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0xFE00);
1636 if (ret_val)
1637 goto out;
1638 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
1639 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL,
1640 data | (1 << 10));
1641 if (ret_val)
1642 goto out;
1643 } else {
1644 /* Write MAC register values back to h/w defaults */
1645 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
1646 mac_reg &= ~(0xF << 14);
1647 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
1649 mac_reg = E1000_READ_REG(hw, E1000_RCTL);
1650 mac_reg &= ~E1000_RCTL_SECRC;
1651 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
1653 ret_val = e1000_read_kmrn_reg_generic(hw,
1654 E1000_KMRNCTRLSTA_CTRL_OFFSET,
1655 &data);
1656 if (ret_val)
1657 goto out;
1658 ret_val = e1000_write_kmrn_reg_generic(hw,
1659 E1000_KMRNCTRLSTA_CTRL_OFFSET,
1660 data & ~(1 << 0));
1661 if (ret_val)
1662 goto out;
1663 ret_val = e1000_read_kmrn_reg_generic(hw,
1664 E1000_KMRNCTRLSTA_HD_CTRL,
1665 &data);
1666 if (ret_val)
1667 goto out;
1668 data &= ~(0xF << 8);
1669 data |= (0xB << 8);
1670 ret_val = e1000_write_kmrn_reg_generic(hw,
1671 E1000_KMRNCTRLSTA_HD_CTRL, data);
1672 if (ret_val)
1673 goto out;
1675 /* Write PHY register values back to h/w defaults */
1676 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
1677 data &= ~(0x7F << 5);
1678 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
1679 if (ret_val)
1680 goto out;
1681 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
1682 data |= (1 << 13);
1683 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
1684 if (ret_val)
1685 goto out;
1686 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
1687 data &= ~(0x3FF << 2);
1688 data |= (0x8 << 2);
1689 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
1690 if (ret_val)
1691 goto out;
1692 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0x7E00);
1693 if (ret_val)
1694 goto out;
1695 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
1696 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL,
1697 data & ~(1 << 10));
1698 if (ret_val)
1699 goto out;
1702 /* re-enable Rx path after enabling/disabling workaround */
1703 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 20),
1704 phy_reg & ~(1 << 14));
1706 out:
1707 return (ret_val);
1711 * e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
1712 * done after every PHY reset.
1714 static s32
1715 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
1717 s32 ret_val = E1000_SUCCESS;
1719 DEBUGFUNC("e1000_lv_phy_workarounds_ich8lan");
1721 if (hw->mac.type != e1000_pch2lan)
1722 goto out;
1724 /* Set MDIO slow mode before any other MDIO access */
1725 ret_val = e1000_set_mdio_slow_mode_hv(hw);
1727 out:
1728 return (ret_val);
1732 * e1000_k1_gig_workaround_lv - K1 Si workaround
1733 * @hw: pointer to the HW structure
1735 * Workaround to set the K1 beacon duration for 82579 parts
1737 static s32
1738 e1000_k1_workaround_lv(struct e1000_hw *hw)
1740 s32 ret_val = E1000_SUCCESS;
1741 u16 status_reg = 0;
1742 u32 mac_reg;
1744 DEBUGFUNC("e1000_k1_workaround_lv");
1746 if (hw->mac.type != e1000_pch2lan)
1747 goto out;
1749 /* Set K1 beacon duration based on 1Gbps speed or otherwise */
1750 ret_val = hw->phy.ops.read_reg(hw, HV_M_STATUS, &status_reg);
1751 if (ret_val)
1752 goto out;
1754 if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
1755 == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
1756 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
1757 mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
1759 if (status_reg & HV_M_STATUS_SPEED_1000)
1760 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
1761 else
1762 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
1764 E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
1767 out:
1768 return (ret_val);
1772 * e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
1773 * @hw: pointer to the HW structure
1774 * @gate: boolean set to true to gate, false to ungate
1776 * Gate/ungate the automatic PHY configuration via hardware; perform
1777 * the configuration via software instead.
1779 static void
1780 e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
1782 u32 extcnf_ctrl;
1784 DEBUGFUNC("e1000_gate_hw_phy_config_ich8lan");
1786 if (hw->mac.type != e1000_pch2lan)
1787 return;
1789 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1791 if (gate)
1792 extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
1793 else
1794 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
1796 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1800 * e1000_hv_phy_tuning_workaround_ich8lan - This is a Phy tuning work around
1801 * needed for Nahum3 + Hanksville testing, requested by HW team
1803 static s32
1804 e1000_hv_phy_tuning_workaround_ich8lan(struct e1000_hw *hw)
1806 s32 ret_val = E1000_SUCCESS;
1808 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 25), 0x4431);
1809 if (ret_val)
1810 goto out;
1812 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(770, 16), 0xA204);
1813 if (ret_val)
1814 goto out;
1816 ret_val = hw->phy.ops.write_reg(hw, (1 << 6) | 0x29, 0x66C0);
1817 if (ret_val)
1818 goto out;
1820 ret_val = hw->phy.ops.write_reg(hw, (1 << 6) | 0x1E, 0xFFFF);
1822 out:
1823 return (ret_val);
1827 * e1000_lan_init_done_ich8lan - Check for PHY config completion
1828 * @hw: pointer to the HW structure
1830 * Check the appropriate indication the MAC has finished configuring the
1831 * PHY after a software reset.
1833 static void
1834 e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
1836 u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
1838 DEBUGFUNC("e1000_lan_init_done_ich8lan");
1840 /* Wait for basic configuration completes before proceeding */
1841 do {
1842 data = E1000_READ_REG(hw, E1000_STATUS);
1843 data &= E1000_STATUS_LAN_INIT_DONE;
1844 usec_delay(100);
1845 } while ((!data) && --loop);
1848 * If basic configuration is incomplete before the above loop
1849 * count reaches 0, loading the configuration from NVM will
1850 * leave the PHY in a bad state possibly resulting in no link.
1852 if (loop == 0) {
1853 /* EMPTY */
1854 DEBUGOUT("LAN_INIT_DONE not set, increase timeout\n");
1857 /* Clear the Init Done bit for the next init event */
1858 data = E1000_READ_REG(hw, E1000_STATUS);
1859 data &= ~E1000_STATUS_LAN_INIT_DONE;
1860 E1000_WRITE_REG(hw, E1000_STATUS, data);
1864 * e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
1865 * @hw: pointer to the HW structure
1867 static s32
1868 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
1870 s32 ret_val = E1000_SUCCESS;
1871 u16 reg;
1873 DEBUGFUNC("e1000_post_phy_reset_ich8lan");
1875 if (hw->phy.ops.check_reset_block(hw))
1876 goto out;
1878 /* Allow time for h/w to get to quiescent state after reset */
1879 msec_delay(10);
1881 /* Perform any necessary post-reset workarounds */
1882 switch (hw->mac.type) {
1883 case e1000_pchlan:
1884 ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
1885 if (ret_val)
1886 goto out;
1887 break;
1888 case e1000_pch2lan:
1889 ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
1890 if (ret_val)
1891 goto out;
1892 break;
1893 default:
1894 break;
1897 if (hw->device_id == E1000_DEV_ID_ICH10_HANKSVILLE) {
1898 ret_val = e1000_hv_phy_tuning_workaround_ich8lan(hw);
1899 if (ret_val)
1900 goto out;
1903 /* Dummy read to clear the phy wakeup bit after lcd reset */
1904 if (hw->mac.type >= e1000_pchlan)
1905 hw->phy.ops.read_reg(hw, BM_WUC, &reg);
1907 /* Configure the LCD with the extended configuration region in NVM */
1908 ret_val = e1000_sw_lcd_config_ich8lan(hw);
1909 if (ret_val)
1910 goto out;
1912 /* Configure the LCD with the OEM bits in NVM */
1913 ret_val = e1000_oem_bits_config_ich8lan(hw, true);
1915 if (hw->mac.type == e1000_pch2lan) {
1916 /* Ungate automatic PHY configuration on non-managed 82579 */
1917 if (!(E1000_READ_REG(hw, E1000_FWSM) &
1918 E1000_ICH_FWSM_FW_VALID)) {
1919 msec_delay(10);
1920 e1000_gate_hw_phy_config_ich8lan(hw, false);
1923 /* Set EEE LPI Update Timer to 200usec */
1924 ret_val = hw->phy.ops.acquire(hw);
1925 if (ret_val)
1926 goto out;
1927 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR,
1928 I82579_LPI_UPDATE_TIMER);
1929 if (ret_val)
1930 goto release;
1931 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA,
1932 0x1387);
1933 release:
1934 hw->phy.ops.release(hw);
1937 out:
1938 return (ret_val);
1942 * e1000_phy_hw_reset_ich8lan - Performs a PHY reset
1943 * @hw: pointer to the HW structure
1945 * Resets the PHY
1946 * This is a function pointer entry point called by drivers
1947 * or other shared routines.
1949 static s32
1950 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
1952 s32 ret_val = E1000_SUCCESS;
1954 DEBUGFUNC("e1000_phy_hw_reset_ich8lan");
1956 /* Gate automatic PHY configuration by hardware on non-managed 82579 */
1957 if ((hw->mac.type == e1000_pch2lan) &&
1958 !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
1959 e1000_gate_hw_phy_config_ich8lan(hw, true);
1961 ret_val = e1000_phy_hw_reset_generic(hw);
1962 if (ret_val)
1963 goto out;
1965 ret_val = e1000_post_phy_reset_ich8lan(hw);
1967 out:
1968 return (ret_val);
1973 * e1000_get_phy_info_ich8lan - Calls appropriate PHY type get_phy_info
1974 * @hw: pointer to the HW structure
1976 * Wrapper for calling the get_phy_info routines for the appropriate phy type.
1978 static s32
1979 e1000_get_phy_info_ich8lan(struct e1000_hw *hw)
1981 s32 ret_val = -E1000_ERR_PHY_TYPE;
1983 DEBUGFUNC("e1000_get_phy_info_ich8lan");
1985 switch (hw->phy.type) {
1986 case e1000_phy_ife:
1987 ret_val = e1000_get_phy_info_ife_ich8lan(hw);
1988 break;
1989 case e1000_phy_igp_3:
1990 case e1000_phy_bm:
1991 case e1000_phy_82578:
1992 case e1000_phy_82577:
1993 ret_val = e1000_get_phy_info_igp(hw);
1994 break;
1995 default:
1996 break;
1999 return (ret_val);
2003 * e1000_get_phy_info_ife_ich8lan - Retrieves various IFE PHY states
2004 * @hw: pointer to the HW structure
2006 * Populates "phy" structure with various feature states.
2007 * This function is only called by other family-specific
2008 * routines.
2010 static s32
2011 e1000_get_phy_info_ife_ich8lan(struct e1000_hw *hw)
2013 struct e1000_phy_info *phy = &hw->phy;
2014 s32 ret_val;
2015 u16 data;
2016 bool link;
2018 DEBUGFUNC("e1000_get_phy_info_ife_ich8lan");
2020 ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
2021 if (ret_val)
2022 goto out;
2024 if (!link) {
2025 DEBUGOUT("Phy info is only valid if link is up\n");
2026 ret_val = -E1000_ERR_CONFIG;
2027 goto out;
2030 ret_val = phy->ops.read_reg(hw, IFE_PHY_SPECIAL_CONTROL, &data);
2031 if (ret_val)
2032 goto out;
2033 phy->polarity_correction = (data & IFE_PSC_AUTO_POLARITY_DISABLE)
2034 ? false : true;
2036 if (phy->polarity_correction) {
2037 ret_val = e1000_check_polarity_ife(hw);
2038 if (ret_val)
2039 goto out;
2040 } else {
2041 /* Polarity is forced */
2042 phy->cable_polarity = (data & IFE_PSC_FORCE_POLARITY)
2043 ? e1000_rev_polarity_reversed
2044 : e1000_rev_polarity_normal;
2047 ret_val = phy->ops.read_reg(hw, IFE_PHY_MDIX_CONTROL, &data);
2048 if (ret_val)
2049 goto out;
2051 phy->is_mdix = (data & IFE_PMC_MDIX_STATUS) ? true : false;
2053 /* The following parameters are undefined for 10/100 operation. */
2054 phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
2055 phy->local_rx = e1000_1000t_rx_status_undefined;
2056 phy->remote_rx = e1000_1000t_rx_status_undefined;
2058 out:
2059 return (ret_val);
2063 * e1000_set_lplu_state_pchlan - Set Low Power Link Up state
2064 * @hw: pointer to the HW structure
2065 * @active: true to enable LPLU, false to disable
2067 * Sets the LPLU state according to the active flag. For PCH, if OEM write
2068 * bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
2069 * the phy speed. This function will manually set the LPLU bit and restart
2070 * auto-neg as hw would do. D3 and D0 LPLU will call the same function
2071 * since it configures the same bit.
2073 static s32
2074 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
2076 s32 ret_val = E1000_SUCCESS;
2077 u16 oem_reg;
2079 DEBUGFUNC("e1000_set_lplu_state_pchlan");
2081 ret_val = hw->phy.ops.read_reg(hw, HV_OEM_BITS, &oem_reg);
2082 if (ret_val)
2083 goto out;
2085 if (active)
2086 oem_reg |= HV_OEM_BITS_LPLU;
2087 else
2088 oem_reg &= ~HV_OEM_BITS_LPLU;
2090 oem_reg |= HV_OEM_BITS_RESTART_AN;
2091 ret_val = hw->phy.ops.write_reg(hw, HV_OEM_BITS, oem_reg);
2093 out:
2094 return (ret_val);
2098 * e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
2099 * @hw: pointer to the HW structure
2100 * @active: true to enable LPLU, false to disable
2102 * Sets the LPLU D0 state according to the active flag. When
2103 * activating LPLU this function also disables smart speed
2104 * and vice versa. LPLU will not be activated unless the
2105 * device autonegotiation advertisement meets standards of
2106 * either 10 or 10/100 or 10/100/1000 at all duplexes.
2107 * This is a function pointer entry point only called by
2108 * PHY setup routines.
2110 static s32
2111 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
2113 struct e1000_phy_info *phy = &hw->phy;
2114 u32 phy_ctrl;
2115 s32 ret_val = E1000_SUCCESS;
2116 u16 data;
2118 DEBUGFUNC("e1000_set_d0_lplu_state_ich8lan");
2120 if (phy->type == e1000_phy_ife)
2121 goto out;
2123 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
2125 if (active) {
2126 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
2127 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2129 if (phy->type != e1000_phy_igp_3)
2130 goto out;
2133 * Call gig speed drop workaround on LPLU before accessing any
2134 * PHY registers
2136 if (hw->mac.type == e1000_ich8lan)
2137 e1000_gig_downshift_workaround_ich8lan(hw);
2139 /* When LPLU is enabled, we should disable SmartSpeed */
2140 ret_val = phy->ops.read_reg(hw,
2141 IGP01E1000_PHY_PORT_CONFIG,
2142 &data);
2143 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2144 ret_val = phy->ops.write_reg(hw,
2145 IGP01E1000_PHY_PORT_CONFIG,
2146 data);
2147 if (ret_val)
2148 goto out;
2149 } else {
2150 phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
2151 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2153 if (phy->type != e1000_phy_igp_3)
2154 goto out;
2157 * LPLU and SmartSpeed are mutually exclusive. LPLU is used
2158 * during Dx states where the power conservation is most
2159 * important. During driver activity we should enable
2160 * SmartSpeed, so performance is maintained.
2162 if (phy->smart_speed == e1000_smart_speed_on) {
2163 ret_val = phy->ops.read_reg(hw,
2164 IGP01E1000_PHY_PORT_CONFIG,
2165 &data);
2166 if (ret_val)
2167 goto out;
2169 data |= IGP01E1000_PSCFR_SMART_SPEED;
2170 ret_val = phy->ops.write_reg(hw,
2171 IGP01E1000_PHY_PORT_CONFIG,
2172 data);
2173 if (ret_val)
2174 goto out;
2175 } else if (phy->smart_speed == e1000_smart_speed_off) {
2176 ret_val = phy->ops.read_reg(hw,
2177 IGP01E1000_PHY_PORT_CONFIG,
2178 &data);
2179 if (ret_val)
2180 goto out;
2182 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2183 ret_val = phy->ops.write_reg(hw,
2184 IGP01E1000_PHY_PORT_CONFIG,
2185 data);
2186 if (ret_val)
2187 goto out;
2191 out:
2192 return (ret_val);
2196 * e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
2197 * @hw: pointer to the HW structure
2198 * @active: true to enable LPLU, false to disable
2200 * Sets the LPLU D3 state according to the active flag. When
2201 * activating LPLU this function also disables smart speed
2202 * and vice versa. LPLU will not be activated unless the
2203 * device autonegotiation advertisement meets standards of
2204 * either 10 or 10/100 or 10/100/1000 at all duplexes.
2205 * This is a function pointer entry point only called by
2206 * PHY setup routines.
2208 static s32
2209 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
2211 struct e1000_phy_info *phy = &hw->phy;
2212 u32 phy_ctrl;
2213 s32 ret_val = E1000_SUCCESS;
2214 u16 data;
2216 DEBUGFUNC("e1000_set_d3_lplu_state_ich8lan");
2218 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
2220 if (!active) {
2221 phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
2222 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2224 if (phy->type != e1000_phy_igp_3)
2225 goto out;
2228 * LPLU and SmartSpeed are mutually exclusive. LPLU is used
2229 * during Dx states where the power conservation is most
2230 * important. During driver activity we should enable
2231 * SmartSpeed, so performance is maintained.
2233 if (phy->smart_speed == e1000_smart_speed_on) {
2234 ret_val = phy->ops.read_reg(hw,
2235 IGP01E1000_PHY_PORT_CONFIG,
2236 &data);
2237 if (ret_val)
2238 goto out;
2240 data |= IGP01E1000_PSCFR_SMART_SPEED;
2241 ret_val = phy->ops.write_reg(hw,
2242 IGP01E1000_PHY_PORT_CONFIG,
2243 data);
2244 if (ret_val)
2245 goto out;
2246 } else if (phy->smart_speed == e1000_smart_speed_off) {
2247 ret_val = phy->ops.read_reg(hw,
2248 IGP01E1000_PHY_PORT_CONFIG,
2249 &data);
2250 if (ret_val)
2251 goto out;
2253 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2254 ret_val = phy->ops.write_reg(hw,
2255 IGP01E1000_PHY_PORT_CONFIG,
2256 data);
2257 if (ret_val)
2258 goto out;
2260 } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
2261 (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
2262 (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
2263 phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
2264 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2266 if (phy->type != e1000_phy_igp_3)
2267 goto out;
2270 * Call gig speed drop workaround on LPLU before accessing any
2271 * PHY registers
2273 if (hw->mac.type == e1000_ich8lan)
2274 e1000_gig_downshift_workaround_ich8lan(hw);
2276 /* When LPLU is enabled, we should disable SmartSpeed */
2277 ret_val = phy->ops.read_reg(hw,
2278 IGP01E1000_PHY_PORT_CONFIG,
2279 &data);
2280 if (ret_val)
2281 goto out;
2283 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2284 ret_val = phy->ops.write_reg(hw,
2285 IGP01E1000_PHY_PORT_CONFIG,
2286 data);
2289 out:
2290 return (ret_val);
2294 * e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
2295 * @hw: pointer to the HW structure
2296 * @bank: pointer to the variable that returns the active bank
2298 * Reads signature byte from the NVM using the flash access registers.
2299 * Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
2301 static s32
2302 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
2304 u32 eecd;
2305 struct e1000_nvm_info *nvm = &hw->nvm;
2306 u32 bank1_offset = nvm->flash_bank_size * sizeof (u16);
2307 u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
2308 u8 sig_byte = 0;
2309 s32 ret_val = E1000_SUCCESS;
2311 switch (hw->mac.type) {
2312 case e1000_ich8lan:
2313 case e1000_ich9lan:
2314 eecd = E1000_READ_REG(hw, E1000_EECD);
2315 if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) ==
2316 E1000_EECD_SEC1VAL_VALID_MASK) {
2317 if (eecd & E1000_EECD_SEC1VAL)
2318 *bank = 1;
2319 else
2320 *bank = 0;
2322 goto out;
2324 DEBUGOUT("Unable to determine valid NVM bank via EEC - "
2325 "reading flash signature\n");
2326 /* fall-thru */
2327 default:
2328 /* set bank to 0 in case flash read fails */
2329 *bank = 0;
2331 /* Check bank 0 */
2332 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
2333 &sig_byte);
2334 if (ret_val)
2335 goto out;
2336 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
2337 E1000_ICH_NVM_SIG_VALUE) {
2338 *bank = 0;
2339 goto out;
2342 /* Check bank 1 */
2343 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
2344 bank1_offset, &sig_byte);
2345 if (ret_val)
2346 goto out;
2347 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
2348 E1000_ICH_NVM_SIG_VALUE) {
2349 *bank = 1;
2350 goto out;
2353 DEBUGOUT("ERROR: No valid NVM bank present\n");
2354 ret_val = -E1000_ERR_NVM;
2355 break;
2357 out:
2358 return (ret_val);
2362 * e1000_read_nvm_ich8lan - Read word(s) from the NVM
2363 * @hw: pointer to the HW structure
2364 * @offset: The offset (in bytes) of the word(s) to read.
2365 * @words: Size of data to read in words
2366 * @data: Pointer to the word(s) to read at offset.
2368 * Reads a word(s) from the NVM using the flash access registers.
2370 static s32
2371 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
2372 u16 *data)
2374 struct e1000_nvm_info *nvm = &hw->nvm;
2375 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
2376 u32 act_offset;
2377 s32 ret_val = E1000_SUCCESS;
2378 u32 bank = 0;
2379 u16 i, word;
2381 DEBUGFUNC("e1000_read_nvm_ich8lan");
2383 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
2384 (words == 0)) {
2385 DEBUGOUT("nvm parameter(s) out of bounds\n");
2386 ret_val = -E1000_ERR_NVM;
2387 goto out;
2390 nvm->ops.acquire(hw);
2392 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
2393 if (ret_val != E1000_SUCCESS) {
2394 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
2395 bank = 0;
2398 act_offset = (bank) ? nvm->flash_bank_size : 0;
2399 act_offset += offset;
2401 ret_val = E1000_SUCCESS;
2402 for (i = 0; i < words; i++) {
2403 if ((dev_spec->shadow_ram) &&
2404 (dev_spec->shadow_ram[offset + i].modified)) {
2405 data[i] = dev_spec->shadow_ram[offset + i].value;
2406 } else {
2407 ret_val = e1000_read_flash_word_ich8lan(hw,
2408 act_offset + i,
2409 &word);
2410 if (ret_val)
2411 break;
2412 data[i] = word;
2416 nvm->ops.release(hw);
2418 out:
2419 if (ret_val) {
2420 /* EMPTY */
2421 DEBUGOUT1("NVM read error: %d\n", ret_val);
2424 return (ret_val);
2428 * e1000_flash_cycle_init_ich8lan - Initialize flash
2429 * @hw: pointer to the HW structure
2431 * This function does initial flash setup so that a new read/write/erase cycle
2432 * can be started.
2434 static s32
2435 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
2437 union ich8_hws_flash_status hsfsts;
2438 s32 ret_val = -E1000_ERR_NVM;
2439 s32 i = 0;
2441 DEBUGFUNC("e1000_flash_cycle_init_ich8lan");
2443 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
2445 /* Check if the flash descriptor is valid */
2446 if (hsfsts.hsf_status.fldesvalid == 0) {
2447 DEBUGOUT("Flash descriptor invalid. "
2448 "SW Sequencing must be used.");
2449 goto out;
2452 /* Clear FCERR and DAEL in hw status by writing 1 */
2453 hsfsts.hsf_status.flcerr = 1;
2454 hsfsts.hsf_status.dael = 1;
2456 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
2459 * Either we should have a hardware SPI cycle in progress bit to check
2460 * against, in order to start a new cycle or FDONE bit should be
2461 * changed in the hardware so that it is 1 after hardware reset, which
2462 * can then be used as an indication whether a cycle is in progress or
2463 * has been completed.
2466 if (hsfsts.hsf_status.flcinprog == 0) {
2468 * There is no cycle running at present, so we can start a
2469 * cycle. Begin by setting Flash Cycle Done.
2471 hsfsts.hsf_status.flcdone = 1;
2472 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
2473 ret_val = E1000_SUCCESS;
2474 } else {
2476 * Otherwise poll for sometime so the current cycle has a
2477 * chance to end before giving up.
2479 for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
2480 hsfsts.regval = E1000_READ_FLASH_REG16(hw,
2481 ICH_FLASH_HSFSTS);
2482 if (hsfsts.hsf_status.flcinprog == 0) {
2483 ret_val = E1000_SUCCESS;
2484 break;
2486 usec_delay(1);
2488 if (ret_val == E1000_SUCCESS) {
2490 * Successful in waiting for previous cycle to
2491 * timeout, now set the Flash Cycle Done.
2493 hsfsts.hsf_status.flcdone = 1;
2494 E1000_WRITE_FLASH_REG16(hw,
2495 ICH_FLASH_HSFSTS,
2496 hsfsts.regval);
2497 } else {
2498 /* EMPTY */
2499 DEBUGOUT("Flash controller busy, cannot get access");
2503 out:
2504 return (ret_val);
2508 * e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
2509 * @hw: pointer to the HW structure
2510 * @timeout: maximum time to wait for completion
2512 * This function starts a flash cycle and waits for its completion.
2514 static s32
2515 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
2517 union ich8_hws_flash_ctrl hsflctl;
2518 union ich8_hws_flash_status hsfsts;
2519 s32 ret_val = -E1000_ERR_NVM;
2520 u32 i = 0;
2522 DEBUGFUNC("e1000_flash_cycle_ich8lan");
2524 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
2525 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
2526 hsflctl.hsf_ctrl.flcgo = 1;
2527 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
2529 /* wait till FDONE bit is set to 1 */
2530 do {
2531 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
2532 if (hsfsts.hsf_status.flcdone == 1)
2533 break;
2534 usec_delay(1);
2535 } while (i++ < timeout);
2537 if (hsfsts.hsf_status.flcdone == 1 && hsfsts.hsf_status.flcerr == 0)
2538 ret_val = E1000_SUCCESS;
2540 return (ret_val);
2544 * e1000_read_flash_word_ich8lan - Read word from flash
2545 * @hw: pointer to the HW structure
2546 * @offset: offset to data location
2547 * @data: pointer to the location for storing the data
2549 * Reads the flash word at offset into data. Offset is converted
2550 * to bytes before read.
2552 static s32
2553 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset, u16 *data)
2555 s32 ret_val;
2557 DEBUGFUNC("e1000_read_flash_word_ich8lan");
2559 if (!data) {
2560 ret_val = -E1000_ERR_NVM;
2561 goto out;
2564 /* Must convert offset into bytes. */
2565 offset <<= 1;
2567 ret_val = e1000_read_flash_data_ich8lan(hw, offset, 2, data);
2569 out:
2570 return (ret_val);
2574 * e1000_read_flash_byte_ich8lan - Read byte from flash
2575 * @hw: pointer to the HW structure
2576 * @offset: The offset of the byte to read.
2577 * @data: Pointer to a byte to store the value read.
2579 * Reads a single byte from the NVM using the flash access registers.
2581 static s32
2582 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, u8 *data)
2584 s32 ret_val = E1000_SUCCESS;
2585 u16 word = 0;
2587 ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
2588 if (ret_val)
2589 goto out;
2591 *data = (u8)word;
2593 out:
2594 return (ret_val);
2598 * e1000_read_flash_data_ich8lan - Read byte or word from NVM
2599 * @hw: pointer to the HW structure
2600 * @offset: The offset (in bytes) of the byte or word to read.
2601 * @size: Size of data to read, 1=byte 2=word
2602 * @data: Pointer to the word to store the value read.
2604 * Reads a byte or word from the NVM using the flash access registers.
2606 static s32
2607 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
2608 u8 size, u16 *data)
2610 union ich8_hws_flash_status hsfsts;
2611 union ich8_hws_flash_ctrl hsflctl;
2612 u32 flash_linear_addr;
2613 u32 flash_data = 0;
2614 s32 ret_val = -E1000_ERR_NVM;
2615 u8 count = 0;
2617 DEBUGFUNC("e1000_read_flash_data_ich8lan");
2619 if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
2620 goto out;
2622 flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) +
2623 hw->nvm.flash_base_addr;
2625 do {
2626 usec_delay(1);
2627 /* Steps */
2628 ret_val = e1000_flash_cycle_init_ich8lan(hw);
2629 if (ret_val != E1000_SUCCESS)
2630 break;
2632 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
2633 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
2634 hsflctl.hsf_ctrl.fldbcount = size - 1;
2635 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
2636 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
2638 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
2640 ret_val = e1000_flash_cycle_ich8lan(hw,
2641 ICH_FLASH_READ_COMMAND_TIMEOUT);
2644 * Check if FCERR is set to 1, if set to 1, clear it and try
2645 * the whole sequence a few more times, else read in (shift
2646 * in) the Flash Data0, the order is least significant byte
2647 * first msb to lsb
2649 if (ret_val == E1000_SUCCESS) {
2650 flash_data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
2651 if (size == 1)
2652 *data = (u8)(flash_data & 0x000000FF);
2653 else if (size == 2)
2654 *data = (u16)(flash_data & 0x0000FFFF);
2655 break;
2656 } else {
2658 * If we've gotten here, then things are probably
2659 * completely hosed, but if the error condition is
2660 * detected, it won't hurt to give it another try...
2661 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
2663 hsfsts.regval = E1000_READ_FLASH_REG16(hw,
2664 ICH_FLASH_HSFSTS);
2665 if (hsfsts.hsf_status.flcerr == 1) {
2666 /* Repeat for some time before giving up. */
2667 continue;
2668 } else if (hsfsts.hsf_status.flcdone == 0) {
2669 DEBUGOUT("Timeout error - flash cycle "
2670 "did not complete.");
2671 break;
2674 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
2676 out:
2677 return (ret_val);
2681 * e1000_write_nvm_ich8lan - Write word(s) to the NVM
2682 * @hw: pointer to the HW structure
2683 * @offset: The offset (in bytes) of the word(s) to write.
2684 * @words: Size of data to write in words
2685 * @data: Pointer to the word(s) to write at offset.
2687 * Writes a byte or word to the NVM using the flash access registers.
2689 static s32
2690 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
2692 struct e1000_nvm_info *nvm = &hw->nvm;
2693 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
2694 s32 ret_val = E1000_SUCCESS;
2695 u16 i;
2697 DEBUGFUNC("e1000_write_nvm_ich8lan");
2699 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
2700 (words == 0)) {
2701 DEBUGOUT("nvm parameter(s) out of bounds\n");
2702 ret_val = -E1000_ERR_NVM;
2703 goto out;
2706 nvm->ops.acquire(hw);
2708 for (i = 0; i < words; i++) {
2709 dev_spec->shadow_ram[offset + i].modified = true;
2710 dev_spec->shadow_ram[offset + i].value = data[i];
2713 nvm->ops.release(hw);
2715 out:
2716 return (ret_val);
2720 * e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
2721 * @hw: pointer to the HW structure
2723 * The NVM checksum is updated by calling the generic update_nvm_checksum,
2724 * which writes the checksum to the shadow ram. The changes in the shadow
2725 * ram are then committed to the EEPROM by processing each bank at a time
2726 * checking for the modified bit and writing only the pending changes.
2727 * After a successful commit, the shadow ram is cleared and is ready for
2728 * future writes.
2730 static s32
2731 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
2733 struct e1000_nvm_info *nvm = &hw->nvm;
2734 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
2735 u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
2736 s32 ret_val;
2737 u16 data;
2739 DEBUGFUNC("e1000_update_nvm_checksum_ich8lan");
2741 ret_val = e1000_update_nvm_checksum_generic(hw);
2742 if (ret_val)
2743 goto out;
2745 if (nvm->type != e1000_nvm_flash_sw)
2746 goto out;
2748 nvm->ops.acquire(hw);
2751 * We're writing to the opposite bank so if we're on bank 1, write to
2752 * bank 0 etc. We also need to erase the segment that is going to be
2753 * written
2755 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
2756 if (ret_val != E1000_SUCCESS) {
2757 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
2758 bank = 0;
2761 if (bank == 0) {
2762 new_bank_offset = nvm->flash_bank_size;
2763 old_bank_offset = 0;
2764 ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
2765 if (ret_val) {
2766 nvm->ops.release(hw);
2767 goto out;
2769 } else {
2770 old_bank_offset = nvm->flash_bank_size;
2771 new_bank_offset = 0;
2772 ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
2773 if (ret_val) {
2774 nvm->ops.release(hw);
2775 goto out;
2779 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
2781 * Determine whether to write the value stored in the other
2782 * NVM bank or a modified value stored in the shadow RAM
2784 if (dev_spec->shadow_ram[i].modified) {
2785 data = dev_spec->shadow_ram[i].value;
2786 } else {
2787 ret_val = e1000_read_flash_word_ich8lan(hw,
2788 i + old_bank_offset,
2789 &data);
2790 if (ret_val)
2791 break;
2795 * If the word is 0x13, then make sure the signature bits
2796 * (15:14) are 11b until the commit has completed. This will
2797 * allow us to write 10b which indicates the signature is
2798 * valid. We want to do this after the write has completed so
2799 * that we don't mark the segment valid while the write is
2800 * still in progress
2802 if (i == E1000_ICH_NVM_SIG_WORD)
2803 data |= E1000_ICH_NVM_SIG_MASK;
2805 /* Convert offset to bytes. */
2806 act_offset = (i + new_bank_offset) << 1;
2808 usec_delay(100);
2809 /* Write the bytes to the new bank. */
2810 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
2811 act_offset,
2812 (u8)data);
2813 if (ret_val)
2814 break;
2816 usec_delay(100);
2817 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
2818 act_offset + 1,
2819 (u8)(data >> 8));
2820 if (ret_val)
2821 break;
2825 * Don't bother writing the segment valid bits if sector programming
2826 * failed.
2828 if (ret_val) {
2829 DEBUGOUT("Flash commit failed.\n");
2830 nvm->ops.release(hw);
2831 goto out;
2835 * Finally validate the new segment by setting bit 15:14 to 10b in
2836 * word 0x13 , this can be done without an erase as well since these
2837 * bits are 11 to start with and we need to change bit 14 to 0b
2839 act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
2840 ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
2841 if (ret_val) {
2842 nvm->ops.release(hw);
2843 goto out;
2846 data &= 0xBFFF;
2847 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
2848 act_offset * 2 + 1,
2849 (u8)(data >> 8));
2850 if (ret_val) {
2851 nvm->ops.release(hw);
2852 goto out;
2856 * And invalidate the previously valid segment by setting its
2857 * signature word (0x13) high_byte to 0b. This can be done without an
2858 * erase because flash erase sets all bits to 1's. We can write 1's to
2859 * 0's without an erase
2861 act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
2862 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
2863 if (ret_val) {
2864 nvm->ops.release(hw);
2865 goto out;
2868 /* Great! Everything worked, we can now clear the cached entries. */
2869 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
2870 dev_spec->shadow_ram[i].modified = false;
2871 dev_spec->shadow_ram[i].value = 0xFFFF;
2874 nvm->ops.release(hw);
2877 * Reload the EEPROM, or else modifications will not appear until
2878 * after the next adapter reset.
2880 nvm->ops.reload(hw);
2881 msec_delay(10);
2883 out:
2884 if (ret_val) {
2885 /* EMPTY */
2886 DEBUGOUT1("NVM update error: %d\n", ret_val);
2889 return (ret_val);
2893 * e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum
2894 * @hw: pointer to the HW structure
2896 * Check to see if checksum needs to be fixed by reading bit 6 in word 0x19.
2897 * If the bit is 0, that the EEPROM had been modified, but the checksum was not
2898 * calculated, in which case we need to calculate the checksum and set bit 6.
2900 static s32
2901 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
2903 s32 ret_val = E1000_SUCCESS;
2904 u16 data;
2906 DEBUGFUNC("e1000_validate_nvm_checksum_ich8lan");
2909 * Read 0x19 and check bit 6. If this bit is 0, the checksum needs to
2910 * be fixed. This bit is an indication that the NVM was prepared by
2911 * OEM software and did not calculate the checksum...a likely
2912 * scenario.
2914 ret_val = hw->nvm.ops.read(hw, 0x19, 1, &data);
2915 if (ret_val)
2916 goto out;
2918 if ((data & 0x40) == 0) {
2919 data |= 0x40;
2920 ret_val = hw->nvm.ops.write(hw, 0x19, 1, &data);
2921 if (ret_val)
2922 goto out;
2923 ret_val = hw->nvm.ops.update(hw);
2924 if (ret_val)
2925 goto out;
2928 ret_val = e1000_validate_nvm_checksum_generic(hw);
2930 out:
2931 return (ret_val);
2935 * e1000_write_flash_data_ich8lan - Writes bytes to the NVM
2936 * @hw: pointer to the HW structure
2937 * @offset: The offset (in bytes) of the byte/word to read.
2938 * @size: Size of data to read, 1=byte 2=word
2939 * @data: The byte(s) to write to the NVM.
2941 * Writes one/two bytes to the NVM using the flash access registers.
2943 static s32
2944 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
2945 u8 size, u16 data)
2947 union ich8_hws_flash_status hsfsts;
2948 union ich8_hws_flash_ctrl hsflctl;
2949 u32 flash_linear_addr;
2950 u32 flash_data = 0;
2951 s32 ret_val = -E1000_ERR_NVM;
2952 u8 count = 0;
2954 DEBUGFUNC("e1000_write_ich8_data");
2956 if (size < 1 || size > 2 || data > size * 0xff ||
2957 offset > ICH_FLASH_LINEAR_ADDR_MASK)
2958 goto out;
2960 flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) +
2961 hw->nvm.flash_base_addr;
2963 do {
2964 usec_delay(1);
2965 /* Steps */
2966 ret_val = e1000_flash_cycle_init_ich8lan(hw);
2967 if (ret_val != E1000_SUCCESS)
2968 break;
2970 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
2971 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
2972 hsflctl.hsf_ctrl.fldbcount = size - 1;
2973 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
2974 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
2976 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
2978 if (size == 1)
2979 flash_data = (u32)data & 0x00FF;
2980 else
2981 flash_data = (u32)data;
2983 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data);
2986 * check if FCERR is set to 1 , if set to 1, clear it and try
2987 * the whole sequence a few more times else done
2989 ret_val = e1000_flash_cycle_ich8lan(hw,
2990 ICH_FLASH_WRITE_COMMAND_TIMEOUT);
2991 if (ret_val == E1000_SUCCESS)
2992 break;
2995 * If we're here, then things are most likely
2996 * completely hosed, but if the error condition is
2997 * detected, it won't hurt to give it another
2998 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
3000 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3001 if (hsfsts.hsf_status.flcerr == 1) {
3002 /* Repeat for some time before giving up. */
3003 continue;
3004 } else if (hsfsts.hsf_status.flcdone == 0) {
3005 DEBUGOUT("Timeout error - flash cycle "
3006 "did not complete.");
3007 break;
3009 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3011 out:
3012 return (ret_val);
3016 * e1000_write_flash_byte_ich8lan - Write a single byte to NVM
3017 * @hw: pointer to the HW structure
3018 * @offset: The index of the byte to read.
3019 * @data: The byte to write to the NVM.
3021 * Writes a single byte to the NVM using the flash access registers.
3023 static s32
3024 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, u8 data)
3026 u16 word = (u16)data;
3028 DEBUGFUNC("e1000_write_flash_byte_ich8lan");
3030 return (e1000_write_flash_data_ich8lan(hw, offset, 1, word));
3034 * e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
3035 * @hw: pointer to the HW structure
3036 * @offset: The offset of the byte to write.
3037 * @byte: The byte to write to the NVM.
3039 * Writes a single byte to the NVM using the flash access registers.
3040 * Goes through a retry algorithm before giving up.
3042 static s32
3043 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, u8 byte)
3045 s32 ret_val;
3046 u16 program_retries;
3048 DEBUGFUNC("e1000_retry_write_flash_byte_ich8lan");
3050 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
3051 if (ret_val == E1000_SUCCESS)
3052 goto out;
3054 for (program_retries = 0; program_retries < 100; program_retries++) {
3055 DEBUGOUT2("Retrying Byte %2.2X at offset %u\n", byte, offset);
3056 usec_delay(100);
3057 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
3058 if (ret_val == E1000_SUCCESS)
3059 break;
3061 if (program_retries == 100) {
3062 ret_val = -E1000_ERR_NVM;
3063 goto out;
3066 out:
3067 return (ret_val);
3071 * e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM
3072 * @hw: pointer to the HW structure
3073 * @bank: 0 for first bank, 1 for second bank, etc.
3075 * Erases the bank specified. Each bank is a 4k block. Banks are 0 based.
3076 * bank N is 4096 * N + flash_reg_addr.
3078 static s32
3079 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
3081 struct e1000_nvm_info *nvm = &hw->nvm;
3082 union ich8_hws_flash_status hsfsts;
3083 union ich8_hws_flash_ctrl hsflctl;
3084 u32 flash_linear_addr;
3086 /* bank size is in 16bit words - adjust to bytes */
3087 u32 flash_bank_size = nvm->flash_bank_size * 2;
3088 s32 ret_val = E1000_SUCCESS;
3089 s32 count = 0;
3090 s32 j, iteration, sector_size;
3092 DEBUGFUNC("e1000_erase_flash_bank_ich8lan");
3094 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3097 * Determine HW Sector size: Read BERASE bits of hw flash status
3098 * register
3099 * 00: The Hw sector is 256 bytes, hence we need to erase 16
3100 * consecutive sectors. The start index for the nth Hw sector
3101 * can be calculated as = bank * 4096 + n * 256
3102 * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
3103 * The start index for the nth Hw sector can be calculated
3104 * as = bank * 4096
3105 * 10: The Hw sector is 8K bytes, nth sector = bank * 8192
3106 * (ich9 only, otherwise error condition)
3107 * 11: The Hw sector is 64K bytes, nth sector = bank * 65536
3109 switch (hsfsts.hsf_status.berasesz) {
3110 case 0:
3111 /* Hw sector size 256 */
3112 sector_size = ICH_FLASH_SEG_SIZE_256;
3113 iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256;
3114 break;
3115 case 1:
3116 sector_size = ICH_FLASH_SEG_SIZE_4K;
3117 iteration = 1;
3118 break;
3119 case 2:
3120 sector_size = ICH_FLASH_SEG_SIZE_8K;
3121 iteration = 1;
3122 break;
3123 case 3:
3124 sector_size = ICH_FLASH_SEG_SIZE_64K;
3125 iteration = 1;
3126 break;
3127 default:
3128 ret_val = -E1000_ERR_NVM;
3129 goto out;
3132 /* Start with the base address, then add the sector offset. */
3133 flash_linear_addr = hw->nvm.flash_base_addr;
3134 flash_linear_addr += (bank) ? flash_bank_size : 0;
3136 for (j = 0; j < iteration; j++) {
3137 do {
3138 /* Steps */
3139 ret_val = e1000_flash_cycle_init_ich8lan(hw);
3140 if (ret_val)
3141 goto out;
3144 * Write a value 11 (block Erase) in Flash Cycle field
3145 * in hw flash control
3147 hsflctl.regval = E1000_READ_FLASH_REG16(hw,
3148 ICH_FLASH_HSFCTL);
3149 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
3150 E1000_WRITE_FLASH_REG16(hw,
3151 ICH_FLASH_HSFCTL,
3152 hsflctl.regval);
3155 * Write the last 24 bits of an index within the block
3156 * into Flash Linear address field in Flash Address.
3158 flash_linear_addr += (j * sector_size);
3159 E1000_WRITE_FLASH_REG(hw,
3160 ICH_FLASH_FADDR,
3161 flash_linear_addr);
3163 ret_val = e1000_flash_cycle_ich8lan(hw,
3164 ICH_FLASH_ERASE_COMMAND_TIMEOUT);
3165 if (ret_val == E1000_SUCCESS)
3166 break;
3169 * Check if FCERR is set to 1. If 1,
3170 * clear it and try the whole sequence
3171 * a few more times else Done
3173 hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3174 ICH_FLASH_HSFSTS);
3175 if (hsfsts.hsf_status.flcerr == 1)
3176 /* repeat for some time before giving up */
3177 continue;
3178 else if (hsfsts.hsf_status.flcdone == 0)
3179 goto out;
3180 } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
3183 out:
3184 return (ret_val);
3188 * e1000_valid_led_default_ich8lan - Set the default LED settings
3189 * @hw: pointer to the HW structure
3190 * @data: Pointer to the LED settings
3192 * Reads the LED default settings from the NVM to data. If the NVM LED
3193 * settings is all 0's or F's, set the LED default to a valid LED default
3194 * setting.
3196 static s32
3197 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
3199 s32 ret_val;
3201 DEBUGFUNC("e1000_valid_led_default_ich8lan");
3203 ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
3204 if (ret_val) {
3205 DEBUGOUT("NVM Read Error\n");
3206 goto out;
3209 if (*data == ID_LED_RESERVED_0000 ||
3210 *data == ID_LED_RESERVED_FFFF)
3211 *data = ID_LED_DEFAULT_ICH8LAN;
3213 out:
3214 return (ret_val);
3218 * e1000_id_led_init_pchlan - store LED configurations
3219 * @hw: pointer to the HW structure
3221 * PCH does not control LEDs via the LEDCTL register, rather it uses
3222 * the PHY LED configuration register.
3224 * PCH also does not have an "always on" or "always off" mode which
3225 * complicates the ID feature. Instead of using the "on" mode to indicate
3226 * in ledctl_mode2 the LEDs to use for ID (see e1000_id_led_init_generic()),
3227 * use "link_up" mode. The LEDs will still ID on request if there is no
3228 * link based on logic in e1000_led_[on|off]_pchlan().
3230 static s32
3231 e1000_id_led_init_pchlan(struct e1000_hw *hw)
3233 struct e1000_mac_info *mac = &hw->mac;
3234 s32 ret_val;
3235 const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
3236 const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
3237 u16 data, i, temp, shift;
3239 DEBUGFUNC("e1000_id_led_init_pchlan");
3241 /* Get default ID LED modes */
3242 ret_val = hw->nvm.ops.valid_led_default(hw, &data);
3243 if (ret_val)
3244 goto out;
3246 mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL);
3247 mac->ledctl_mode1 = mac->ledctl_default;
3248 mac->ledctl_mode2 = mac->ledctl_default;
3250 for (i = 0; i < 4; i++) {
3251 temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
3252 shift = (i * 5);
3253 switch (temp) {
3254 case ID_LED_ON1_DEF2:
3255 case ID_LED_ON1_ON2:
3256 case ID_LED_ON1_OFF2:
3257 mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
3258 mac->ledctl_mode1 |= (ledctl_on << shift);
3259 break;
3260 case ID_LED_OFF1_DEF2:
3261 case ID_LED_OFF1_ON2:
3262 case ID_LED_OFF1_OFF2:
3263 mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
3264 mac->ledctl_mode1 |= (ledctl_off << shift);
3265 break;
3266 default:
3267 /* Do nothing */
3268 break;
3270 switch (temp) {
3271 case ID_LED_DEF1_ON2:
3272 case ID_LED_ON1_ON2:
3273 case ID_LED_OFF1_ON2:
3274 mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
3275 mac->ledctl_mode2 |= (ledctl_on << shift);
3276 break;
3277 case ID_LED_DEF1_OFF2:
3278 case ID_LED_ON1_OFF2:
3279 case ID_LED_OFF1_OFF2:
3280 mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
3281 mac->ledctl_mode2 |= (ledctl_off << shift);
3282 break;
3283 default:
3284 /* Do nothing */
3285 break;
3289 out:
3290 return (ret_val);
3294 * e1000_get_bus_info_ich8lan - Get/Set the bus type and width
3295 * @hw: pointer to the HW structure
3297 * ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
3298 * register, so the the bus width is hard coded.
3300 static s32
3301 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
3303 struct e1000_bus_info *bus = &hw->bus;
3304 s32 ret_val;
3306 DEBUGFUNC("e1000_get_bus_info_ich8lan");
3308 ret_val = e1000_get_bus_info_pcie_generic(hw);
3311 * ICH devices are "PCI Express"-ish. They have a configuration
3312 * space, but do not contain PCI Express Capability registers, so bus
3313 * width must be hardcoded.
3315 if (bus->width == e1000_bus_width_unknown)
3316 bus->width = e1000_bus_width_pcie_x1;
3318 return (ret_val);
3322 * e1000_reset_hw_ich8lan - Reset the hardware
3323 * @hw: pointer to the HW structure
3325 * Does a full reset of the hardware which includes a reset of the PHY and
3326 * MAC.
3328 static s32
3329 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
3331 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3332 u16 reg;
3333 u32 ctrl, kab;
3334 s32 ret_val;
3336 DEBUGFUNC("e1000_reset_hw_ich8lan");
3339 * Prevent the PCI-E bus from sticking if there is no TLP connection
3340 * on the last TLP read/write transaction when MAC is reset.
3342 ret_val = e1000_disable_pcie_master_generic(hw);
3343 if (ret_val)
3344 DEBUGOUT("PCI-E Master disable polling has failed.\n");
3346 DEBUGOUT("Masking off all interrupts\n");
3347 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
3350 * Disable the Transmit and Receive units. Then delay to allow any
3351 * pending transactions to complete before we hit the MAC with the
3352 * global reset.
3354 E1000_WRITE_REG(hw, E1000_RCTL, 0);
3355 E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
3356 E1000_WRITE_FLUSH(hw);
3358 msec_delay(10);
3360 /* Workaround for ICH8 bit corruption issue in FIFO memory */
3361 if (hw->mac.type == e1000_ich8lan) {
3362 /* Set Tx and Rx buffer allocation to 8k apiece. */
3363 E1000_WRITE_REG(hw, E1000_PBA, E1000_PBA_8K);
3364 /* Set Packet Buffer Size to 16k. */
3365 E1000_WRITE_REG(hw, E1000_PBS, E1000_PBS_16K);
3368 if (hw->mac.type == e1000_pchlan) {
3369 /* Save the NVM K1 bit setting */
3370 ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &reg);
3371 if (ret_val)
3372 return (ret_val);
3374 if (reg & E1000_NVM_K1_ENABLE)
3375 dev_spec->nvm_k1_enabled = true;
3376 else
3377 dev_spec->nvm_k1_enabled = false;
3380 ctrl = E1000_READ_REG(hw, E1000_CTRL);
3382 if (!hw->phy.ops.check_reset_block(hw)) {
3384 * Full-chip reset requires MAC and PHY reset at the same
3385 * time to make sure the interface between MAC and the
3386 * external PHY is reset.
3388 ctrl |= E1000_CTRL_PHY_RST;
3391 * Gate automatic PHY configuration by hardware on
3392 * non-managed 82579
3394 if ((hw->mac.type == e1000_pch2lan) &&
3395 !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
3396 e1000_gate_hw_phy_config_ich8lan(hw, true);
3398 ret_val = e1000_acquire_swflag_ich8lan(hw);
3399 DEBUGOUT("Issuing a global reset to ich8lan\n");
3400 E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_RST));
3401 msec_delay(20);
3403 if (!ret_val)
3404 e1000_release_swflag_ich8lan(hw);
3406 if (ctrl & E1000_CTRL_PHY_RST) {
3407 ret_val = hw->phy.ops.get_cfg_done(hw);
3408 if (ret_val)
3409 goto out;
3411 ret_val = e1000_post_phy_reset_ich8lan(hw);
3412 if (ret_val)
3413 goto out;
3417 * For PCH, this write will make sure that any noise
3418 * will be detected as a CRC error and be dropped rather than show up
3419 * as a bad packet to the DMA engine.
3421 if (hw->mac.type == e1000_pchlan)
3422 E1000_WRITE_REG(hw, E1000_CRC_OFFSET, 0x65656565);
3424 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
3425 (void) E1000_READ_REG(hw, E1000_ICR);
3427 kab = E1000_READ_REG(hw, E1000_KABGTXD);
3428 kab |= E1000_KABGTXD_BGSQLBIAS;
3429 E1000_WRITE_REG(hw, E1000_KABGTXD, kab);
3431 out:
3432 return (ret_val);
3437 * e1000_init_hw_ich8lan - Initialize the hardware
3438 * @hw: pointer to the HW structure
3440 * Prepares the hardware for transmit and receive by doing the following:
3441 * - initialize hardware bits
3442 * - initialize LED identification
3443 * - setup receive address registers
3444 * - setup flow control
3445 * - setup transmit descriptors
3446 * - clear statistics
3448 static s32
3449 e1000_init_hw_ich8lan(struct e1000_hw *hw)
3451 struct e1000_mac_info *mac = &hw->mac;
3452 u32 ctrl_ext, txdctl, snoop;
3453 s32 ret_val;
3454 u16 i;
3456 DEBUGFUNC("e1000_init_hw_ich8lan");
3458 e1000_initialize_hw_bits_ich8lan(hw);
3460 /* Initialize identification LED */
3461 ret_val = mac->ops.id_led_init(hw);
3462 if (ret_val) {
3463 /* EMPTY */
3464 /* This is not fatal and we should not stop init due to this */
3465 DEBUGOUT("Error initializing identification LED\n");
3468 /* Setup the receive address. */
3469 e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
3471 /* Zero out the Multicast HASH table */
3472 DEBUGOUT("Zeroing the MTA\n");
3473 for (i = 0; i < mac->mta_reg_count; i++)
3474 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
3477 * The 82578 Rx buffer will stall if wakeup is enabled in host and
3478 * the ME. Reading the BM_WUC register will clear the host wakeup bit.
3479 * Reset the phy after disabling host wakeup to reset the Rx buffer.
3481 if (hw->phy.type == e1000_phy_82578) {
3482 hw->phy.ops.read_reg(hw, BM_WUC, &i);
3483 ret_val = e1000_phy_hw_reset_ich8lan(hw);
3484 if (ret_val)
3485 return (ret_val);
3488 /* Setup link and flow control */
3489 ret_val = mac->ops.setup_link(hw);
3491 /* Set the transmit descriptor write-back policy for both queues */
3492 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0));
3493 txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
3494 E1000_TXDCTL_FULL_TX_DESC_WB;
3495 txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
3496 E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
3497 E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl);
3498 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(1));
3499 txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
3500 E1000_TXDCTL_FULL_TX_DESC_WB;
3501 txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
3502 E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
3503 E1000_WRITE_REG(hw, E1000_TXDCTL(1), txdctl);
3506 * ICH8 has opposite polarity of no_snoop bits. By default, we should
3507 * use snoop behavior.
3509 if (mac->type == e1000_ich8lan)
3510 snoop = PCIE_ICH8_SNOOP_ALL;
3511 else
3512 snoop = (u32)~(PCIE_NO_SNOOP_ALL);
3513 e1000_set_pcie_no_snoop_generic(hw, snoop);
3515 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
3516 ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
3517 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
3520 * Clear all of the statistics registers (clear on read). It is
3521 * important that we do this after we have tried to establish link
3522 * because the symbol error count will increment wildly if there
3523 * is no link.
3525 e1000_clear_hw_cntrs_ich8lan(hw);
3527 return (ret_val);
3531 * e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
3532 * @hw: pointer to the HW structure
3534 * Sets/Clears required hardware bits necessary for correctly setting up the
3535 * hardware for transmit and receive.
3537 static void
3538 e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
3540 u32 reg;
3542 DEBUGFUNC("e1000_initialize_hw_bits_ich8lan");
3544 /* Extended Device Control */
3545 reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
3546 reg |= (1 << 22);
3547 /* Enable PHY low-power state when MAC is at D3 w/o WoL */
3548 if (hw->mac.type >= e1000_pchlan)
3549 reg |= E1000_CTRL_EXT_PHYPDEN;
3550 E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
3552 /* Transmit Descriptor Control 0 */
3553 reg = E1000_READ_REG(hw, E1000_TXDCTL(0));
3554 reg |= (1 << 22);
3555 E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg);
3557 /* Transmit Descriptor Control 1 */
3558 reg = E1000_READ_REG(hw, E1000_TXDCTL(1));
3559 reg |= (1 << 22);
3560 E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg);
3562 /* Transmit Arbitration Control 0 */
3563 reg = E1000_READ_REG(hw, E1000_TARC(0));
3564 if (hw->mac.type == e1000_ich8lan)
3565 reg |= (1 << 28) | (1 << 29);
3566 reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
3567 E1000_WRITE_REG(hw, E1000_TARC(0), reg);
3569 /* Transmit Arbitration Control 1 */
3570 reg = E1000_READ_REG(hw, E1000_TARC(1));
3571 if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR)
3572 reg &= ~(1 << 28);
3573 else
3574 reg |= (1 << 28);
3575 reg |= (1 << 24) | (1 << 26) | (1 << 30);
3576 E1000_WRITE_REG(hw, E1000_TARC(1), reg);
3578 /* Device Status */
3579 if (hw->mac.type == e1000_ich8lan) {
3580 reg = E1000_READ_REG(hw, E1000_STATUS);
3581 reg &= ~((u32)1 << 31);
3582 E1000_WRITE_REG(hw, E1000_STATUS, reg);
3587 * e1000_setup_link_ich8lan - Setup flow control and link settings
3588 * @hw: pointer to the HW structure
3590 * Determines which flow control settings to use, then configures flow
3591 * control. Calls the appropriate media-specific link configuration
3592 * function. Assuming the adapter has a valid link partner, a valid link
3593 * should be established. Assumes the hardware has previously been reset
3594 * and the transmitter and receiver are not enabled.
3596 static s32
3597 e1000_setup_link_ich8lan(struct e1000_hw *hw)
3599 s32 ret_val = E1000_SUCCESS;
3601 DEBUGFUNC("e1000_setup_link_ich8lan");
3603 if (hw->phy.ops.check_reset_block(hw))
3604 goto out;
3607 * ICH parts do not have a word in the NVM to determine the default
3608 * flow control setting, so we explicitly set it to full.
3610 if (hw->fc.requested_mode == e1000_fc_default)
3611 hw->fc.requested_mode = e1000_fc_full;
3614 * Save off the requested flow control mode for use later. Depending
3615 * on the link partner's capabilities, we may or may not use this mode.
3617 hw->fc.current_mode = hw->fc.requested_mode;
3618 DEBUGOUT1("After fix-ups FlowControl is now = %x\n",
3619 hw->fc.current_mode);
3621 /* Continue to configure the copper link. */
3622 ret_val = hw->mac.ops.setup_physical_interface(hw);
3623 if (ret_val)
3624 goto out;
3626 E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
3627 if ((hw->phy.type == e1000_phy_82578) ||
3628 (hw->phy.type == e1000_phy_82579) ||
3629 (hw->phy.type == e1000_phy_82577)) {
3630 /* added from freebsd */
3631 E1000_WRITE_REG(hw, E1000_FCRTV_PCH, hw->fc.refresh_time);
3633 ret_val = hw->phy.ops.write_reg(hw,
3634 PHY_REG(BM_PORT_CTRL_PAGE, 27),
3635 hw->fc.pause_time);
3636 if (ret_val)
3637 goto out;
3640 ret_val = e1000_set_fc_watermarks_generic(hw);
3642 out:
3643 return (ret_val);
3647 * e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface
3648 * @hw: pointer to the HW structure
3650 * Configures the kumeran interface to the PHY to wait the appropriate time
3651 * when polling the PHY, then call the generic setup_copper_link to finish
3652 * configuring the copper link.
3654 static s32
3655 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
3657 u32 ctrl;
3658 s32 ret_val;
3659 u16 reg_data;
3661 DEBUGFUNC("e1000_setup_copper_link_ich8lan");
3663 ctrl = E1000_READ_REG(hw, E1000_CTRL);
3664 ctrl |= E1000_CTRL_SLU;
3665 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
3666 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
3669 * Set the mac to wait the maximum time between each iteration and
3670 * increase the max iterations when polling the phy; this fixes
3671 * erroneous timeouts at 10Mbps.
3673 ret_val = e1000_write_kmrn_reg_generic(hw,
3674 E1000_KMRNCTRLSTA_TIMEOUTS, 0xFFFF);
3675 if (ret_val)
3676 goto out;
3677 ret_val = e1000_read_kmrn_reg_generic(hw,
3678 E1000_KMRNCTRLSTA_INBAND_PARAM, &reg_data);
3679 if (ret_val)
3680 goto out;
3681 reg_data |= 0x3F;
3682 ret_val = e1000_write_kmrn_reg_generic(hw,
3683 E1000_KMRNCTRLSTA_INBAND_PARAM, reg_data);
3684 if (ret_val)
3685 goto out;
3687 switch (hw->phy.type) {
3688 case e1000_phy_igp_3:
3689 ret_val = e1000_copper_link_setup_igp(hw);
3690 if (ret_val)
3691 goto out;
3692 break;
3693 case e1000_phy_bm:
3694 case e1000_phy_82578:
3695 ret_val = e1000_copper_link_setup_m88(hw);
3696 if (ret_val)
3697 goto out;
3698 break;
3699 case e1000_phy_82577:
3700 ret_val = e1000_copper_link_setup_82577(hw);
3701 if (ret_val)
3702 goto out;
3703 break;
3704 case e1000_phy_ife:
3705 ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL,
3706 &reg_data);
3707 if (ret_val)
3708 goto out;
3710 reg_data &= ~IFE_PMC_AUTO_MDIX;
3712 switch (hw->phy.mdix) {
3713 case 1:
3714 reg_data &= ~IFE_PMC_FORCE_MDIX;
3715 break;
3716 case 2:
3717 reg_data |= IFE_PMC_FORCE_MDIX;
3718 break;
3719 case 0:
3720 default:
3721 reg_data |= IFE_PMC_AUTO_MDIX;
3722 break;
3724 ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL,
3725 reg_data);
3726 if (ret_val)
3727 goto out;
3728 break;
3729 default:
3730 break;
3732 ret_val = e1000_setup_copper_link_generic(hw);
3734 out:
3735 return (ret_val);
3739 * e1000_get_link_up_info_ich8lan - Get current link speed and duplex
3740 * @hw: pointer to the HW structure
3741 * @speed: pointer to store current link speed
3742 * @duplex: pointer to store the current link duplex
3744 * Calls the generic get_speed_and_duplex to retrieve the current link
3745 * information and then calls the Kumeran lock loss workaround for links at
3746 * gigabit speeds.
3748 static s32
3749 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed, u16 *duplex)
3751 s32 ret_val;
3753 DEBUGFUNC("e1000_get_link_up_info_ich8lan");
3755 ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex);
3756 if (ret_val)
3757 goto out;
3759 if ((hw->mac.type == e1000_ich8lan) &&
3760 (hw->phy.type == e1000_phy_igp_3) &&
3761 (*speed == SPEED_1000)) {
3762 ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
3765 out:
3766 return (ret_val);
3770 * e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround
3771 * @hw: pointer to the HW structure
3773 * Work-around for 82566 Kumeran PCS lock loss:
3774 * On link status change (i.e. PCI reset, speed change) and link is up and
3775 * speed is gigabit-
3776 * 0) if workaround is optionally disabled do nothing
3777 * 1) wait 1ms for Kumeran link to come up
3778 * 2) check Kumeran Diagnostic register PCS lock loss bit
3779 * 3) if not set the link is locked (all is good), otherwise...
3780 * 4) reset the PHY
3781 * 5) repeat up to 10 times
3782 * Note: this is only called for IGP3 copper when speed is 1gb.
3784 static s32
3785 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
3787 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3788 u32 phy_ctrl;
3789 s32 ret_val = E1000_SUCCESS;
3790 u16 i, data;
3791 bool link;
3793 DEBUGFUNC("e1000_kmrn_lock_loss_workaround_ich8lan");
3795 if (!(dev_spec->kmrn_lock_loss_workaround_enabled))
3796 goto out;
3799 * Make sure link is up before proceeding. If not just return.
3800 * Attempting this while link is negotiating fouled up link stability
3802 ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
3803 if (!link) {
3804 ret_val = E1000_SUCCESS;
3805 goto out;
3808 for (i = 0; i < 10; i++) {
3809 /* read once to clear */
3810 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
3811 if (ret_val)
3812 goto out;
3813 /* and again to get new status */
3814 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
3815 if (ret_val)
3816 goto out;
3818 /* check for PCS lock */
3819 if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS)) {
3820 ret_val = E1000_SUCCESS;
3821 goto out;
3824 /* Issue PHY reset */
3825 hw->phy.ops.reset(hw);
3826 msec_delay_irq(5);
3828 /* Disable GigE link negotiation */
3829 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3830 phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE |
3831 E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
3832 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3835 * Call gig speed drop workaround on Gig disable before accessing any
3836 * PHY registers
3838 e1000_gig_downshift_workaround_ich8lan(hw);
3840 /* unable to acquire PCS lock */
3841 ret_val = -E1000_ERR_PHY;
3843 out:
3844 return (ret_val);
3848 * e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
3849 * @hw: pointer to the HW structure
3850 * @state: boolean value used to set the current Kumeran workaround state
3852 * If ICH8, set the current Kumeran workaround state (enabled - true
3853 * /disabled - false).
3855 void
3856 e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
3857 bool state)
3859 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3861 DEBUGFUNC("e1000_set_kmrn_lock_loss_workaround_ich8lan");
3863 if (hw->mac.type != e1000_ich8lan) {
3864 DEBUGOUT("Workaround applies to ICH8 only.\n");
3865 return;
3868 dev_spec->kmrn_lock_loss_workaround_enabled = state;
3872 * e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
3873 * @hw: pointer to the HW structure
3875 * Workaround for 82566 power-down on D3 entry:
3876 * 1) disable gigabit link
3877 * 2) write VR power-down enable
3878 * 3) read it back
3879 * Continue if successful, else issue LCD reset and repeat
3881 void
3882 e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
3884 u32 reg;
3885 u16 data;
3886 u8 retry = 0;
3888 DEBUGFUNC("e1000_igp3_phy_powerdown_workaround_ich8lan");
3890 if (hw->phy.type != e1000_phy_igp_3)
3891 return;
3893 /* Try the workaround twice (if needed) */
3894 do {
3895 /* Disable link */
3896 reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
3897 reg |= (E1000_PHY_CTRL_GBE_DISABLE |
3898 E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
3899 E1000_WRITE_REG(hw, E1000_PHY_CTRL, reg);
3902 * Call gig speed drop workaround on Gig disable before
3903 * accessing any PHY registers
3905 if (hw->mac.type == e1000_ich8lan)
3906 e1000_gig_downshift_workaround_ich8lan(hw);
3908 /* Write VR power-down enable */
3909 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
3910 data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
3911 hw->phy.ops.write_reg(hw, IGP3_VR_CTRL,
3912 data | IGP3_VR_CTRL_MODE_SHUTDOWN);
3914 /* Read it back and test */
3915 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
3916 data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
3917 if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
3918 break;
3920 /* Issue PHY reset and repeat at most one more time */
3921 reg = E1000_READ_REG(hw, E1000_CTRL);
3922 E1000_WRITE_REG(hw, E1000_CTRL, reg | E1000_CTRL_PHY_RST);
3923 retry++;
3924 } while (retry);
3928 * e1000_gig_downshift_workaround_ich8lan - WoL from S5 stops working
3929 * @hw: pointer to the HW structure
3931 * Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
3932 * LPLU, Gig disable, MDIC PHY reset):
3933 * 1) Set Kumeran Near-end loopback
3934 * 2) Clear Kumeran Near-end loopback
3935 * Should only be called for ICH8[m] devices with IGP_3 Phy.
3937 void
3938 e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
3940 s32 ret_val = E1000_SUCCESS;
3941 u16 reg_data;
3943 DEBUGFUNC("e1000_gig_downshift_workaround_ich8lan");
3945 if ((hw->mac.type != e1000_ich8lan) ||
3946 (hw->phy.type != e1000_phy_igp_3))
3947 return;
3949 ret_val = e1000_read_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
3950 &reg_data);
3951 if (ret_val)
3952 return;
3953 reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
3954 ret_val = e1000_write_kmrn_reg_generic(hw,
3955 E1000_KMRNCTRLSTA_DIAG_OFFSET,
3956 reg_data);
3957 if (ret_val)
3958 return;
3959 reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
3960 ret_val = e1000_write_kmrn_reg_generic(hw,
3961 E1000_KMRNCTRLSTA_DIAG_OFFSET,
3962 reg_data);
3966 * e1000_disable_gig_wol_ich8lan - disable gig during WoL
3967 * @hw: pointer to the HW structure
3969 * During S0 to Sx transition, it is possible the link remains at gig
3970 * instead of negotiating to a lower speed. Before going to Sx, set
3971 * 'LPLU Enabled' and 'Gig Disable' to force link speed negotiation
3972 * to a lower speed.
3974 * Should only be called for applicable parts.
3976 void
3977 e1000_disable_gig_wol_ich8lan(struct e1000_hw *hw)
3979 u32 phy_ctrl;
3981 switch (hw->mac.type) {
3982 case e1000_ich9lan:
3983 case e1000_ich10lan:
3984 case e1000_pchlan:
3985 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3986 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU |
3987 E1000_PHY_CTRL_GBE_DISABLE;
3988 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3990 if (hw->mac.type == e1000_pchlan)
3991 (void) e1000_phy_hw_reset_ich8lan(hw);
3992 default:
3993 break;
3998 * e1000_cleanup_led_ich8lan - Restore the default LED operation
3999 * @hw: pointer to the HW structure
4001 * Return the LED back to the default configuration.
4003 static s32
4004 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
4006 s32 ret_val = E1000_SUCCESS;
4008 DEBUGFUNC("e1000_cleanup_led_ich8lan");
4010 if (hw->phy.type == e1000_phy_ife)
4011 ret_val = hw->phy.ops.write_reg(hw,
4012 IFE_PHY_SPECIAL_CONTROL_LED,
4014 else
4015 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
4017 return (ret_val);
4021 * e1000_led_on_ich8lan - Turn LEDs on
4022 * @hw: pointer to the HW structure
4024 * Turn on the LEDs.
4026 static s32
4027 e1000_led_on_ich8lan(struct e1000_hw *hw)
4029 s32 ret_val = E1000_SUCCESS;
4031 DEBUGFUNC("e1000_led_on_ich8lan");
4033 if (hw->phy.type == e1000_phy_ife)
4034 ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
4035 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
4036 else
4037 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
4039 return (ret_val);
4043 * e1000_led_off_ich8lan - Turn LEDs off
4044 * @hw: pointer to the HW structure
4046 * Turn off the LEDs.
4048 static s32
4049 e1000_led_off_ich8lan(struct e1000_hw *hw)
4051 s32 ret_val = E1000_SUCCESS;
4053 DEBUGFUNC("e1000_led_off_ich8lan");
4055 if (hw->phy.type == e1000_phy_ife)
4056 ret_val = hw->phy.ops.write_reg(hw,
4057 IFE_PHY_SPECIAL_CONTROL_LED,
4058 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
4059 else
4060 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
4062 return (ret_val);
4066 * e1000_setup_led_pchlan - Configures SW controllable LED
4067 * @hw: pointer to the HW structure
4069 * This prepares the SW controllable LED for use.
4071 static s32
4072 e1000_setup_led_pchlan(struct e1000_hw *hw)
4074 DEBUGFUNC("e1000_setup_led_pchlan");
4076 return (hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
4077 (u16)hw->mac.ledctl_mode1));
4081 * e1000_cleanup_led_pchlan - Restore the default LED operation
4082 * @hw: pointer to the HW structure
4084 * Return the LED back to the default configuration.
4086 static s32
4087 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
4089 DEBUGFUNC("e1000_cleanup_led_pchlan");
4091 return (hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
4092 (u16)hw->mac.ledctl_default));
4096 * e1000_led_on_pchlan - Turn LEDs on
4097 * @hw: pointer to the HW structure
4099 * Turn on the LEDs.
4101 static s32
4102 e1000_led_on_pchlan(struct e1000_hw *hw)
4104 u16 data = (u16)hw->mac.ledctl_mode2;
4105 u32 i, led;
4107 DEBUGFUNC("e1000_led_on_pchlan");
4110 * If no link, then turn LED on by setting the invert bit
4111 * for each LED that's mode is "link_up" in ledctl_mode2.
4113 if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
4114 for (i = 0; i < 3; i++) {
4115 led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
4116 if ((led & E1000_PHY_LED0_MODE_MASK) !=
4117 E1000_LEDCTL_MODE_LINK_UP)
4118 continue;
4119 if (led & E1000_PHY_LED0_IVRT)
4120 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
4121 else
4122 data |= (E1000_PHY_LED0_IVRT << (i * 5));
4126 return (hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data));
4130 * e1000_led_off_pchlan - Turn LEDs off
4131 * @hw: pointer to the HW structure
4133 * Turn off the LEDs.
4135 static s32
4136 e1000_led_off_pchlan(struct e1000_hw *hw)
4138 u16 data = (u16)hw->mac.ledctl_mode1;
4139 u32 i, led;
4141 DEBUGFUNC("e1000_led_off_pchlan");
4144 * If no link, then turn LED off by clearing the invert bit
4145 * for each LED that's mode is "link_up" in ledctl_mode1.
4147 if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
4148 for (i = 0; i < 3; i++) {
4149 led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
4150 if ((led & E1000_PHY_LED0_MODE_MASK) !=
4151 E1000_LEDCTL_MODE_LINK_UP)
4152 continue;
4153 if (led & E1000_PHY_LED0_IVRT)
4154 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
4155 else
4156 data |= (E1000_PHY_LED0_IVRT << (i * 5));
4160 return (hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data));
4164 * e1000_get_cfg_done_ich8lan - Read config done bit
4165 * @hw: pointer to the HW structure
4167 * Read the management control register for the config done bit for
4168 * completion status. NOTE: silicon which is EEPROM-less will fail trying
4169 * to read the config done bit, so an error is *ONLY* logged and returns
4170 * E1000_SUCCESS. If we were to return with error, EEPROM-less silicon
4171 * would not be able to be reset or change link.
4173 static s32
4174 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
4176 s32 ret_val = E1000_SUCCESS;
4177 u32 bank = 0;
4178 u32 status;
4180 DEBUGFUNC("e1000_get_cfg_done_ich8lan");
4182 (void) e1000_get_cfg_done_generic(hw);
4184 /* Wait for indication from h/w that it has completed basic config */
4185 if (hw->mac.type >= e1000_ich10lan) {
4186 e1000_lan_init_done_ich8lan(hw);
4187 } else {
4188 ret_val = e1000_get_auto_rd_done_generic(hw);
4189 if (ret_val) {
4191 * When auto config read does not complete, do not
4192 * return with an error. This can happen in situations
4193 * where there is no eeprom and prevents getting link.
4195 DEBUGOUT("Auto Read Done did not complete\n");
4196 ret_val = E1000_SUCCESS;
4200 /* Clear PHY Reset Asserted bit */
4201 status = E1000_READ_REG(hw, E1000_STATUS);
4202 if (status & E1000_STATUS_PHYRA) {
4203 E1000_WRITE_REG(hw, E1000_STATUS, status & ~E1000_STATUS_PHYRA);
4204 } else {
4205 DEBUGOUT("PHY Reset Asserted not set - needs delay\n");
4208 /* If EEPROM is not marked present, init the IGP 3 PHY manually */
4209 if (hw->mac.type <= e1000_ich9lan) {
4210 if (((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0) &&
4211 (hw->phy.type == e1000_phy_igp_3)) {
4212 ret_val = e1000_phy_init_script_igp3(hw);
4214 } else {
4215 if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
4216 /* Maybe we should do a basic PHY config */
4217 DEBUGOUT("EEPROM not present\n");
4218 ret_val = -E1000_ERR_CONFIG;
4222 return (ret_val);
4226 * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down
4227 * @hw: pointer to the HW structure
4229 * In the case of a PHY power down to save power, or to turn off link during a
4230 * driver unload, or wake on lan is not enabled, remove the link.
4232 static void
4233 e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
4235 /* If the management interface is not enabled, then power down */
4236 if (!(hw->mac.ops.check_mng_mode(hw) ||
4237 hw->phy.ops.check_reset_block(hw)))
4238 e1000_power_down_phy_copper(hw);
4242 * e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
4243 * @hw: pointer to the HW structure
4245 * Clears hardware counters specific to the silicon family and calls
4246 * clear_hw_cntrs_generic to clear all general purpose counters.
4248 static void
4249 e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
4251 u16 phy_data;
4253 DEBUGFUNC("e1000_clear_hw_cntrs_ich8lan");
4255 e1000_clear_hw_cntrs_base_generic(hw);
4257 (void) E1000_READ_REG(hw, E1000_ALGNERRC);
4258 (void) E1000_READ_REG(hw, E1000_RXERRC);
4259 (void) E1000_READ_REG(hw, E1000_TNCRS);
4260 (void) E1000_READ_REG(hw, E1000_CEXTERR);
4261 (void) E1000_READ_REG(hw, E1000_TSCTC);
4262 (void) E1000_READ_REG(hw, E1000_TSCTFC);
4264 (void) E1000_READ_REG(hw, E1000_MGTPRC);
4265 (void) E1000_READ_REG(hw, E1000_MGTPDC);
4266 (void) E1000_READ_REG(hw, E1000_MGTPTC);
4268 (void) E1000_READ_REG(hw, E1000_IAC);
4269 (void) E1000_READ_REG(hw, E1000_ICRXOC);
4271 /* Clear PHY statistics registers */
4272 if ((hw->phy.type == e1000_phy_82578) ||
4273 (hw->phy.type == e1000_phy_82577)) {
4274 (void) hw->phy.ops.read_reg(hw, HV_SCC_UPPER, &phy_data);
4275 (void) hw->phy.ops.read_reg(hw, HV_SCC_LOWER, &phy_data);
4276 (void) hw->phy.ops.read_reg(hw, HV_ECOL_UPPER, &phy_data);
4277 (void) hw->phy.ops.read_reg(hw, HV_ECOL_LOWER, &phy_data);
4278 (void) hw->phy.ops.read_reg(hw, HV_MCC_UPPER, &phy_data);
4279 (void) hw->phy.ops.read_reg(hw, HV_MCC_LOWER, &phy_data);
4280 (void) hw->phy.ops.read_reg(hw, HV_LATECOL_UPPER, &phy_data);
4281 (void) hw->phy.ops.read_reg(hw, HV_LATECOL_LOWER, &phy_data);
4282 (void) hw->phy.ops.read_reg(hw, HV_COLC_UPPER, &phy_data);
4283 (void) hw->phy.ops.read_reg(hw, HV_COLC_LOWER, &phy_data);
4284 (void) hw->phy.ops.read_reg(hw, HV_DC_UPPER, &phy_data);
4285 (void) hw->phy.ops.read_reg(hw, HV_DC_LOWER, &phy_data);
4286 (void) hw->phy.ops.read_reg(hw, HV_TNCRS_UPPER, &phy_data);
4287 (void) hw->phy.ops.read_reg(hw, HV_TNCRS_LOWER, &phy_data);