e1000 - Literally import e1000 driver from FreeBSD
[dragonfly.git] / sys / dev / netif / e1000 / e1000_ich8lan.c
blobc40085fcfd20db5b99a323101d6beab327d94362
1 /******************************************************************************
3 Copyright (c) 2001-2010, Intel Corporation
4 All rights reserved.
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
33 /*$FreeBSD$*/
36 * 82562G 10/100 Network Connection
37 * 82562G-2 10/100 Network Connection
38 * 82562GT 10/100 Network Connection
39 * 82562GT-2 10/100 Network Connection
40 * 82562V 10/100 Network Connection
41 * 82562V-2 10/100 Network Connection
42 * 82566DC-2 Gigabit Network Connection
43 * 82566DC Gigabit Network Connection
44 * 82566DM-2 Gigabit Network Connection
45 * 82566DM Gigabit Network Connection
46 * 82566MC Gigabit Network Connection
47 * 82566MM Gigabit Network Connection
48 * 82567LM Gigabit Network Connection
49 * 82567LF Gigabit Network Connection
50 * 82567V Gigabit Network Connection
51 * 82567LM-2 Gigabit Network Connection
52 * 82567LF-2 Gigabit Network Connection
53 * 82567V-2 Gigabit Network Connection
54 * 82567LF-3 Gigabit Network Connection
55 * 82567LM-3 Gigabit Network Connection
56 * 82567LM-4 Gigabit Network Connection
57 * 82577LM Gigabit Network Connection
58 * 82577LC Gigabit Network Connection
59 * 82578DM Gigabit Network Connection
60 * 82578DC Gigabit Network Connection
63 #include "e1000_api.h"
65 static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw);
66 static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw);
67 static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw);
68 static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw);
69 static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw);
70 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw);
71 static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw);
72 static void e1000_release_nvm_ich8lan(struct e1000_hw *hw);
73 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
74 static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw);
75 static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw);
76 static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
77 static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw,
78 bool active);
79 static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw,
80 bool active);
81 static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
82 u16 words, u16 *data);
83 static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
84 u16 words, u16 *data);
85 static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw);
86 static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw);
87 static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw,
88 u16 *data);
89 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
90 static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw);
91 static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw);
92 static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw);
93 static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw);
94 static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
95 static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw,
96 u16 *speed, u16 *duplex);
97 static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
98 static s32 e1000_led_on_ich8lan(struct e1000_hw *hw);
99 static s32 e1000_led_off_ich8lan(struct e1000_hw *hw);
100 static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
101 static s32 e1000_setup_led_pchlan(struct e1000_hw *hw);
102 static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw);
103 static s32 e1000_led_on_pchlan(struct e1000_hw *hw);
104 static s32 e1000_led_off_pchlan(struct e1000_hw *hw);
105 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
106 static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
107 static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout);
108 static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw);
109 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
110 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
111 static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw,
112 u32 offset, u8 *data);
113 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
114 u8 size, u16 *data);
115 static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw,
116 u32 offset, u16 *data);
117 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
118 u32 offset, u8 byte);
119 static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw,
120 u32 offset, u8 data);
121 static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
122 u8 size, u16 data);
123 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
124 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
125 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw);
126 static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw);
127 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw);
128 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
130 /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
131 /* Offset 04h HSFSTS */
132 union ich8_hws_flash_status {
133 struct ich8_hsfsts {
134 u16 flcdone :1; /* bit 0 Flash Cycle Done */
135 u16 flcerr :1; /* bit 1 Flash Cycle Error */
136 u16 dael :1; /* bit 2 Direct Access error Log */
137 u16 berasesz :2; /* bit 4:3 Sector Erase Size */
138 u16 flcinprog :1; /* bit 5 flash cycle in Progress */
139 u16 reserved1 :2; /* bit 13:6 Reserved */
140 u16 reserved2 :6; /* bit 13:6 Reserved */
141 u16 fldesvalid :1; /* bit 14 Flash Descriptor Valid */
142 u16 flockdn :1; /* bit 15 Flash Config Lock-Down */
143 } hsf_status;
144 u16 regval;
147 /* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
148 /* Offset 06h FLCTL */
149 union ich8_hws_flash_ctrl {
150 struct ich8_hsflctl {
151 u16 flcgo :1; /* 0 Flash Cycle Go */
152 u16 flcycle :2; /* 2:1 Flash Cycle */
153 u16 reserved :5; /* 7:3 Reserved */
154 u16 fldbcount :2; /* 9:8 Flash Data Byte Count */
155 u16 flockdn :6; /* 15:10 Reserved */
156 } hsf_ctrl;
157 u16 regval;
160 /* ICH Flash Region Access Permissions */
161 union ich8_hws_flash_regacc {
162 struct ich8_flracc {
163 u32 grra :8; /* 0:7 GbE region Read Access */
164 u32 grwa :8; /* 8:15 GbE region Write Access */
165 u32 gmrag :8; /* 23:16 GbE Master Read Access Grant */
166 u32 gmwag :8; /* 31:24 GbE Master Write Access Grant */
167 } hsf_flregacc;
168 u16 regval;
172 * e1000_init_phy_params_pchlan - Initialize PHY function pointers
173 * @hw: pointer to the HW structure
175 * Initialize family-specific PHY parameters and function pointers.
177 static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
179 struct e1000_phy_info *phy = &hw->phy;
180 s32 ret_val = E1000_SUCCESS;
182 DEBUGFUNC("e1000_init_phy_params_pchlan");
184 phy->addr = 1;
185 phy->reset_delay_us = 100;
187 phy->ops.acquire = e1000_acquire_swflag_ich8lan;
188 phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
189 phy->ops.get_cfg_done = e1000_get_cfg_done_ich8lan;
190 phy->ops.read_reg = e1000_read_phy_reg_hv;
191 phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked;
192 phy->ops.release = e1000_release_swflag_ich8lan;
193 phy->ops.reset = e1000_phy_hw_reset_ich8lan;
194 phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
195 phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
196 phy->ops.write_reg = e1000_write_phy_reg_hv;
197 phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked;
198 phy->ops.power_up = e1000_power_up_phy_copper;
199 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
200 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
202 phy->id = e1000_phy_unknown;
203 ret_val = e1000_get_phy_id(hw);
204 if (ret_val)
205 goto out;
206 if ((phy->id == 0) || (phy->id == PHY_REVISION_MASK)) {
208 * In case the PHY needs to be in mdio slow mode (eg. 82577),
209 * set slow mode and try to get the PHY id again.
211 ret_val = e1000_set_mdio_slow_mode_hv(hw);
212 if (ret_val)
213 goto out;
214 ret_val = e1000_get_phy_id(hw);
215 if (ret_val)
216 goto out;
218 phy->type = e1000_get_phy_type_from_id(phy->id);
220 switch (phy->type) {
221 case e1000_phy_82577:
222 phy->ops.check_polarity = e1000_check_polarity_82577;
223 phy->ops.force_speed_duplex =
224 e1000_phy_force_speed_duplex_82577;
225 phy->ops.get_cable_length = e1000_get_cable_length_82577;
226 phy->ops.get_info = e1000_get_phy_info_82577;
227 phy->ops.commit = e1000_phy_sw_reset_generic;
228 case e1000_phy_82578:
229 phy->ops.check_polarity = e1000_check_polarity_m88;
230 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
231 phy->ops.get_cable_length = e1000_get_cable_length_m88;
232 phy->ops.get_info = e1000_get_phy_info_m88;
233 break;
234 default:
235 ret_val = -E1000_ERR_PHY;
236 break;
239 out:
240 return ret_val;
244 * e1000_init_phy_params_ich8lan - Initialize PHY function pointers
245 * @hw: pointer to the HW structure
247 * Initialize family-specific PHY parameters and function pointers.
249 static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
251 struct e1000_phy_info *phy = &hw->phy;
252 s32 ret_val = E1000_SUCCESS;
253 u16 i = 0;
255 DEBUGFUNC("e1000_init_phy_params_ich8lan");
257 phy->addr = 1;
258 phy->reset_delay_us = 100;
260 phy->ops.acquire = e1000_acquire_swflag_ich8lan;
261 phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
262 phy->ops.get_cable_length = e1000_get_cable_length_igp_2;
263 phy->ops.get_cfg_done = e1000_get_cfg_done_ich8lan;
264 phy->ops.read_reg = e1000_read_phy_reg_igp;
265 phy->ops.release = e1000_release_swflag_ich8lan;
266 phy->ops.reset = e1000_phy_hw_reset_ich8lan;
267 phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan;
268 phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan;
269 phy->ops.write_reg = e1000_write_phy_reg_igp;
270 phy->ops.power_up = e1000_power_up_phy_copper;
271 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
274 * We may need to do this twice - once for IGP and if that fails,
275 * we'll set BM func pointers and try again
277 ret_val = e1000_determine_phy_address(hw);
278 if (ret_val) {
279 phy->ops.write_reg = e1000_write_phy_reg_bm;
280 phy->ops.read_reg = e1000_read_phy_reg_bm;
281 ret_val = e1000_determine_phy_address(hw);
282 if (ret_val) {
283 DEBUGOUT("Cannot determine PHY addr. Erroring out\n");
284 goto out;
288 phy->id = 0;
289 while ((e1000_phy_unknown == e1000_get_phy_type_from_id(phy->id)) &&
290 (i++ < 100)) {
291 msec_delay(1);
292 ret_val = e1000_get_phy_id(hw);
293 if (ret_val)
294 goto out;
297 /* Verify phy id */
298 switch (phy->id) {
299 case IGP03E1000_E_PHY_ID:
300 phy->type = e1000_phy_igp_3;
301 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
302 phy->ops.read_reg_locked = e1000_read_phy_reg_igp_locked;
303 phy->ops.write_reg_locked = e1000_write_phy_reg_igp_locked;
304 phy->ops.get_info = e1000_get_phy_info_igp;
305 phy->ops.check_polarity = e1000_check_polarity_igp;
306 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
307 break;
308 case IFE_E_PHY_ID:
309 case IFE_PLUS_E_PHY_ID:
310 case IFE_C_E_PHY_ID:
311 phy->type = e1000_phy_ife;
312 phy->autoneg_mask = E1000_ALL_NOT_GIG;
313 phy->ops.get_info = e1000_get_phy_info_ife;
314 phy->ops.check_polarity = e1000_check_polarity_ife;
315 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
316 break;
317 case BME1000_E_PHY_ID:
318 phy->type = e1000_phy_bm;
319 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
320 phy->ops.read_reg = e1000_read_phy_reg_bm;
321 phy->ops.write_reg = e1000_write_phy_reg_bm;
322 phy->ops.commit = e1000_phy_sw_reset_generic;
323 phy->ops.get_info = e1000_get_phy_info_m88;
324 phy->ops.check_polarity = e1000_check_polarity_m88;
325 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
326 break;
327 default:
328 ret_val = -E1000_ERR_PHY;
329 goto out;
332 out:
333 return ret_val;
337 * e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
338 * @hw: pointer to the HW structure
340 * Initialize family-specific NVM parameters and function
341 * pointers.
343 static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
345 struct e1000_nvm_info *nvm = &hw->nvm;
346 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
347 u32 gfpreg, sector_base_addr, sector_end_addr;
348 s32 ret_val = E1000_SUCCESS;
349 u16 i;
351 DEBUGFUNC("e1000_init_nvm_params_ich8lan");
353 /* Can't read flash registers if the register set isn't mapped. */
354 if (!hw->flash_address) {
355 DEBUGOUT("ERROR: Flash registers not mapped\n");
356 ret_val = -E1000_ERR_CONFIG;
357 goto out;
360 nvm->type = e1000_nvm_flash_sw;
362 gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG);
365 * sector_X_addr is a "sector"-aligned address (4096 bytes)
366 * Add 1 to sector_end_addr since this sector is included in
367 * the overall size.
369 sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
370 sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
372 /* flash_base_addr is byte-aligned */
373 nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT;
376 * find total size of the NVM, then cut in half since the total
377 * size represents two separate NVM banks.
379 nvm->flash_bank_size = (sector_end_addr - sector_base_addr)
380 << FLASH_SECTOR_ADDR_SHIFT;
381 nvm->flash_bank_size /= 2;
382 /* Adjust to word count */
383 nvm->flash_bank_size /= sizeof(u16);
385 nvm->word_size = E1000_SHADOW_RAM_WORDS;
387 /* Clear shadow ram */
388 for (i = 0; i < nvm->word_size; i++) {
389 dev_spec->shadow_ram[i].modified = FALSE;
390 dev_spec->shadow_ram[i].value = 0xFFFF;
393 E1000_MUTEX_INIT(&dev_spec->nvm_mutex);
394 E1000_MUTEX_INIT(&dev_spec->swflag_mutex);
396 /* Function Pointers */
397 nvm->ops.acquire = e1000_acquire_nvm_ich8lan;
398 nvm->ops.release = e1000_release_nvm_ich8lan;
399 nvm->ops.read = e1000_read_nvm_ich8lan;
400 nvm->ops.update = e1000_update_nvm_checksum_ich8lan;
401 nvm->ops.valid_led_default = e1000_valid_led_default_ich8lan;
402 nvm->ops.validate = e1000_validate_nvm_checksum_ich8lan;
403 nvm->ops.write = e1000_write_nvm_ich8lan;
405 out:
406 return ret_val;
410 * e1000_init_mac_params_ich8lan - Initialize MAC function pointers
411 * @hw: pointer to the HW structure
413 * Initialize family-specific MAC parameters and function
414 * pointers.
416 static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
418 struct e1000_mac_info *mac = &hw->mac;
419 u16 pci_cfg;
421 DEBUGFUNC("e1000_init_mac_params_ich8lan");
423 /* Set media type function pointer */
424 hw->phy.media_type = e1000_media_type_copper;
426 /* Set mta register count */
427 mac->mta_reg_count = 32;
428 /* Set rar entry count */
429 mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
430 if (mac->type == e1000_ich8lan)
431 mac->rar_entry_count--;
432 /* Set if part includes ASF firmware */
433 mac->asf_firmware_present = TRUE;
434 /* Set if manageability features are enabled. */
435 mac->arc_subsystem_valid = TRUE;
436 /* Adaptive IFS supported */
437 mac->adaptive_ifs = TRUE;
439 /* Function pointers */
441 /* bus type/speed/width */
442 mac->ops.get_bus_info = e1000_get_bus_info_ich8lan;
443 /* function id */
444 mac->ops.set_lan_id = e1000_set_lan_id_single_port;
445 /* reset */
446 mac->ops.reset_hw = e1000_reset_hw_ich8lan;
447 /* hw initialization */
448 mac->ops.init_hw = e1000_init_hw_ich8lan;
449 /* link setup */
450 mac->ops.setup_link = e1000_setup_link_ich8lan;
451 /* physical interface setup */
452 mac->ops.setup_physical_interface = e1000_setup_copper_link_ich8lan;
453 /* check for link */
454 mac->ops.check_for_link = e1000_check_for_copper_link_ich8lan;
455 /* check management mode */
456 mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
457 /* link info */
458 mac->ops.get_link_up_info = e1000_get_link_up_info_ich8lan;
459 /* multicast address update */
460 mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
461 /* clear hardware counters */
462 mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan;
464 /* LED operations */
465 switch (mac->type) {
466 case e1000_ich8lan:
467 case e1000_ich9lan:
468 case e1000_ich10lan:
469 /* ID LED init */
470 mac->ops.id_led_init = e1000_id_led_init_generic;
471 /* blink LED */
472 mac->ops.blink_led = e1000_blink_led_generic;
473 /* setup LED */
474 mac->ops.setup_led = e1000_setup_led_generic;
475 /* cleanup LED */
476 mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
477 /* turn on/off LED */
478 mac->ops.led_on = e1000_led_on_ich8lan;
479 mac->ops.led_off = e1000_led_off_ich8lan;
480 break;
481 case e1000_pchlan:
482 /* save PCH revision_id */
483 e1000_read_pci_cfg(hw, 0x2, &pci_cfg);
484 hw->revision_id = (u8)(pci_cfg &= 0x000F);
485 /* ID LED init */
486 mac->ops.id_led_init = e1000_id_led_init_pchlan;
487 /* setup LED */
488 mac->ops.setup_led = e1000_setup_led_pchlan;
489 /* cleanup LED */
490 mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
491 /* turn on/off LED */
492 mac->ops.led_on = e1000_led_on_pchlan;
493 mac->ops.led_off = e1000_led_off_pchlan;
494 break;
495 default:
496 break;
499 /* Enable PCS Lock-loss workaround for ICH8 */
500 if (mac->type == e1000_ich8lan)
501 e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, TRUE);
503 return E1000_SUCCESS;
507 * e1000_check_for_copper_link_ich8lan - Check for link (Copper)
508 * @hw: pointer to the HW structure
510 * Checks to see of the link status of the hardware has changed. If a
511 * change in link status has been detected, then we read the PHY registers
512 * to get the current speed/duplex if link exists.
514 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
516 struct e1000_mac_info *mac = &hw->mac;
517 s32 ret_val;
518 bool link;
520 DEBUGFUNC("e1000_check_for_copper_link_ich8lan");
523 * We only want to go out to the PHY registers to see if Auto-Neg
524 * has completed and/or if our link status has changed. The
525 * get_link_status flag is set upon receiving a Link Status
526 * Change or Rx Sequence Error interrupt.
528 if (!mac->get_link_status) {
529 ret_val = E1000_SUCCESS;
530 goto out;
534 * First we want to see if the MII Status Register reports
535 * link. If so, then we want to get the current speed/duplex
536 * of the PHY.
538 ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
539 if (ret_val)
540 goto out;
542 if (hw->mac.type == e1000_pchlan) {
543 ret_val = e1000_k1_gig_workaround_hv(hw, link);
544 if (ret_val)
545 goto out;
548 if (!link)
549 goto out; /* No link detected */
551 mac->get_link_status = FALSE;
553 if (hw->phy.type == e1000_phy_82578) {
554 ret_val = e1000_link_stall_workaround_hv(hw);
555 if (ret_val)
556 goto out;
560 * Check if there was DownShift, must be checked
561 * immediately after link-up
563 e1000_check_downshift_generic(hw);
566 * If we are forcing speed/duplex, then we simply return since
567 * we have already determined whether we have link or not.
569 if (!mac->autoneg) {
570 ret_val = -E1000_ERR_CONFIG;
571 goto out;
575 * Auto-Neg is enabled. Auto Speed Detection takes care
576 * of MAC speed/duplex configuration. So we only need to
577 * configure Collision Distance in the MAC.
579 e1000_config_collision_dist_generic(hw);
582 * Configure Flow Control now that Auto-Neg has completed.
583 * First, we need to restore the desired flow control
584 * settings because we may have had to re-autoneg with a
585 * different link partner.
587 ret_val = e1000_config_fc_after_link_up_generic(hw);
588 if (ret_val)
589 DEBUGOUT("Error configuring flow control\n");
591 out:
592 return ret_val;
596 * e1000_init_function_pointers_ich8lan - Initialize ICH8 function pointers
597 * @hw: pointer to the HW structure
599 * Initialize family-specific function pointers for PHY, MAC, and NVM.
601 void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw)
603 DEBUGFUNC("e1000_init_function_pointers_ich8lan");
605 hw->mac.ops.init_params = e1000_init_mac_params_ich8lan;
606 hw->nvm.ops.init_params = e1000_init_nvm_params_ich8lan;
607 switch (hw->mac.type) {
608 case e1000_ich8lan:
609 case e1000_ich9lan:
610 case e1000_ich10lan:
611 hw->phy.ops.init_params = e1000_init_phy_params_ich8lan;
612 break;
613 case e1000_pchlan:
614 hw->phy.ops.init_params = e1000_init_phy_params_pchlan;
615 break;
616 default:
617 break;
622 * e1000_acquire_nvm_ich8lan - Acquire NVM mutex
623 * @hw: pointer to the HW structure
625 * Acquires the mutex for performing NVM operations.
627 static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
629 DEBUGFUNC("e1000_acquire_nvm_ich8lan");
631 E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.nvm_mutex);
633 return E1000_SUCCESS;
637 * e1000_release_nvm_ich8lan - Release NVM mutex
638 * @hw: pointer to the HW structure
640 * Releases the mutex used while performing NVM operations.
642 static void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
644 DEBUGFUNC("e1000_release_nvm_ich8lan");
646 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.nvm_mutex);
648 return;
652 * e1000_acquire_swflag_ich8lan - Acquire software control flag
653 * @hw: pointer to the HW structure
655 * Acquires the software control flag for performing PHY and select
656 * MAC CSR accesses.
658 static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
660 u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
661 s32 ret_val = E1000_SUCCESS;
663 DEBUGFUNC("e1000_acquire_swflag_ich8lan");
665 E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.swflag_mutex);
667 while (timeout) {
668 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
669 if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
670 break;
672 msec_delay_irq(1);
673 timeout--;
676 if (!timeout) {
677 DEBUGOUT("SW/FW/HW has locked the resource for too long.\n");
678 ret_val = -E1000_ERR_CONFIG;
679 goto out;
682 timeout = SW_FLAG_TIMEOUT;
684 extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
685 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
687 while (timeout) {
688 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
689 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
690 break;
692 msec_delay_irq(1);
693 timeout--;
696 if (!timeout) {
697 DEBUGOUT("Failed to acquire the semaphore.\n");
698 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
699 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
700 ret_val = -E1000_ERR_CONFIG;
701 goto out;
704 out:
705 if (ret_val)
706 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
708 return ret_val;
712 * e1000_release_swflag_ich8lan - Release software control flag
713 * @hw: pointer to the HW structure
715 * Releases the software control flag for performing PHY and select
716 * MAC CSR accesses.
718 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
720 u32 extcnf_ctrl;
722 DEBUGFUNC("e1000_release_swflag_ich8lan");
724 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
725 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
726 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
728 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
730 return;
734 * e1000_check_mng_mode_ich8lan - Checks management mode
735 * @hw: pointer to the HW structure
737 * This checks if the adapter has manageability enabled.
738 * This is a function pointer entry point only called by read/write
739 * routines for the PHY and NVM parts.
741 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
743 u32 fwsm;
745 DEBUGFUNC("e1000_check_mng_mode_ich8lan");
747 fwsm = E1000_READ_REG(hw, E1000_FWSM);
749 return (fwsm & E1000_FWSM_MODE_MASK) ==
750 (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT);
754 * e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
755 * @hw: pointer to the HW structure
757 * Checks if firmware is blocking the reset of the PHY.
758 * This is a function pointer entry point only called by
759 * reset routines.
761 static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
763 u32 fwsm;
765 DEBUGFUNC("e1000_check_reset_block_ich8lan");
767 fwsm = E1000_READ_REG(hw, E1000_FWSM);
769 return (fwsm & E1000_ICH_FWSM_RSPCIPHY) ? E1000_SUCCESS
770 : E1000_BLK_PHY_RESET;
774 * e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
775 * @hw: pointer to the HW structure
777 * SW should configure the LCD from the NVM extended configuration region
778 * as a workaround for certain parts.
780 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
782 struct e1000_phy_info *phy = &hw->phy;
783 u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
784 s32 ret_val = E1000_SUCCESS;
785 u16 word_addr, reg_data, reg_addr, phy_page = 0;
787 if (!(hw->mac.type == e1000_ich8lan && phy->type == e1000_phy_igp_3) &&
788 !(hw->mac.type == e1000_pchlan))
789 return ret_val;
791 ret_val = hw->phy.ops.acquire(hw);
792 if (ret_val)
793 return ret_val;
796 * Initialize the PHY from the NVM on ICH platforms. This
797 * is needed due to an issue where the NVM configuration is
798 * not properly autoloaded after power transitions.
799 * Therefore, after each PHY reset, we will load the
800 * configuration data out of the NVM manually.
802 if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_M_AMT) ||
803 (hw->device_id == E1000_DEV_ID_ICH8_IGP_M) ||
804 (hw->mac.type == e1000_pchlan))
805 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
806 else
807 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
809 data = E1000_READ_REG(hw, E1000_FEXTNVM);
810 if (!(data & sw_cfg_mask))
811 goto out;
813 /* Wait for basic configuration completes before proceeding */
814 e1000_lan_init_done_ich8lan(hw);
817 * Make sure HW does not configure LCD from PHY
818 * extended configuration before SW configuration
820 data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
821 if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE)
822 goto out;
824 cnf_size = E1000_READ_REG(hw, E1000_EXTCNF_SIZE);
825 cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
826 cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
827 if (!cnf_size)
828 goto out;
830 cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
831 cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
833 if (!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) &&
834 (hw->mac.type == e1000_pchlan)) {
836 * HW configures the SMBus address and LEDs when the
837 * OEM and LCD Write Enable bits are set in the NVM.
838 * When both NVM bits are cleared, SW will configure
839 * them instead.
841 data = E1000_READ_REG(hw, E1000_STRAP);
842 data &= E1000_STRAP_SMBUS_ADDRESS_MASK;
843 reg_data = data >> E1000_STRAP_SMBUS_ADDRESS_SHIFT;
844 reg_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
845 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR,
846 reg_data);
847 if (ret_val)
848 goto out;
850 data = E1000_READ_REG(hw, E1000_LEDCTL);
851 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
852 (u16)data);
853 if (ret_val)
854 goto out;
857 /* Configure LCD from extended configuration region. */
859 /* cnf_base_addr is in DWORD */
860 word_addr = (u16)(cnf_base_addr << 1);
862 for (i = 0; i < cnf_size; i++) {
863 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2), 1,
864 &reg_data);
865 if (ret_val)
866 goto out;
868 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2 + 1),
869 1, &reg_addr);
870 if (ret_val)
871 goto out;
873 /* Save off the PHY page for future writes. */
874 if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
875 phy_page = reg_data;
876 continue;
879 reg_addr &= PHY_REG_MASK;
880 reg_addr |= phy_page;
882 ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
883 reg_data);
884 if (ret_val)
885 goto out;
888 out:
889 hw->phy.ops.release(hw);
890 return ret_val;
894 * e1000_k1_gig_workaround_hv - K1 Si workaround
895 * @hw: pointer to the HW structure
896 * @link: link up bool flag
898 * If K1 is enabled for 1Gbps, the MAC might stall when transitioning
899 * from a lower speed. This workaround disables K1 whenever link is at 1Gig
900 * If link is down, the function will restore the default K1 setting located
901 * in the NVM.
903 static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
905 s32 ret_val = E1000_SUCCESS;
906 u16 status_reg = 0;
907 bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
909 DEBUGFUNC("e1000_k1_gig_workaround_hv");
911 if (hw->mac.type != e1000_pchlan)
912 goto out;
914 /* Wrap the whole flow with the sw flag */
915 ret_val = hw->phy.ops.acquire(hw);
916 if (ret_val)
917 goto out;
919 /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
920 if (link) {
921 if (hw->phy.type == e1000_phy_82578) {
922 ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS,
923 &status_reg);
924 if (ret_val)
925 goto release;
927 status_reg &= BM_CS_STATUS_LINK_UP |
928 BM_CS_STATUS_RESOLVED |
929 BM_CS_STATUS_SPEED_MASK;
931 if (status_reg == (BM_CS_STATUS_LINK_UP |
932 BM_CS_STATUS_RESOLVED |
933 BM_CS_STATUS_SPEED_1000))
934 k1_enable = FALSE;
937 if (hw->phy.type == e1000_phy_82577) {
938 ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS,
939 &status_reg);
940 if (ret_val)
941 goto release;
943 status_reg &= HV_M_STATUS_LINK_UP |
944 HV_M_STATUS_AUTONEG_COMPLETE |
945 HV_M_STATUS_SPEED_MASK;
947 if (status_reg == (HV_M_STATUS_LINK_UP |
948 HV_M_STATUS_AUTONEG_COMPLETE |
949 HV_M_STATUS_SPEED_1000))
950 k1_enable = FALSE;
953 /* Link stall fix for link up */
954 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
955 0x0100);
956 if (ret_val)
957 goto release;
959 } else {
960 /* Link stall fix for link down */
961 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
962 0x4100);
963 if (ret_val)
964 goto release;
967 ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
969 release:
970 hw->phy.ops.release(hw);
971 out:
972 return ret_val;
976 * e1000_configure_k1_ich8lan - Configure K1 power state
977 * @hw: pointer to the HW structure
978 * @enable: K1 state to configure
980 * Configure the K1 power state based on the provided parameter.
981 * Assumes semaphore already acquired.
983 * Success returns 0, Failure returns -E1000_ERR_PHY (-2)
985 s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
987 s32 ret_val = E1000_SUCCESS;
988 u32 ctrl_reg = 0;
989 u32 ctrl_ext = 0;
990 u32 reg = 0;
991 u16 kmrn_reg = 0;
993 ret_val = e1000_read_kmrn_reg_locked(hw,
994 E1000_KMRNCTRLSTA_K1_CONFIG,
995 &kmrn_reg);
996 if (ret_val)
997 goto out;
999 if (k1_enable)
1000 kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
1001 else
1002 kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
1004 ret_val = e1000_write_kmrn_reg_locked(hw,
1005 E1000_KMRNCTRLSTA_K1_CONFIG,
1006 kmrn_reg);
1007 if (ret_val)
1008 goto out;
1010 usec_delay(20);
1011 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
1012 ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
1014 reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
1015 reg |= E1000_CTRL_FRCSPD;
1016 E1000_WRITE_REG(hw, E1000_CTRL, reg);
1018 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
1019 usec_delay(20);
1020 E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
1021 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
1022 usec_delay(20);
1024 out:
1025 return ret_val;
1029 * e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
1030 * @hw: pointer to the HW structure
1031 * @d0_state: boolean if entering d0 or d3 device state
1033 * SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
1034 * collectively called OEM bits. The OEM Write Enable bit and SW Config bit
1035 * in NVM determines whether HW should configure LPLU and Gbe Disable.
1037 s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
1039 s32 ret_val = 0;
1040 u32 mac_reg;
1041 u16 oem_reg;
1043 if (hw->mac.type != e1000_pchlan)
1044 return ret_val;
1046 ret_val = hw->phy.ops.acquire(hw);
1047 if (ret_val)
1048 return ret_val;
1050 mac_reg = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1051 if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
1052 goto out;
1054 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM);
1055 if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
1056 goto out;
1058 mac_reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
1060 ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg);
1061 if (ret_val)
1062 goto out;
1064 oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
1066 if (d0_state) {
1067 if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
1068 oem_reg |= HV_OEM_BITS_GBE_DIS;
1070 if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
1071 oem_reg |= HV_OEM_BITS_LPLU;
1072 } else {
1073 if (mac_reg & E1000_PHY_CTRL_NOND0A_GBE_DISABLE)
1074 oem_reg |= HV_OEM_BITS_GBE_DIS;
1076 if (mac_reg & E1000_PHY_CTRL_NOND0A_LPLU)
1077 oem_reg |= HV_OEM_BITS_LPLU;
1079 /* Restart auto-neg to activate the bits */
1080 if (!hw->phy.ops.check_reset_block(hw))
1081 oem_reg |= HV_OEM_BITS_RESTART_AN;
1082 ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
1084 out:
1085 hw->phy.ops.release(hw);
1087 return ret_val;
1092 * e1000_hv_phy_powerdown_workaround_ich8lan - Power down workaround on Sx
1093 * @hw: pointer to the HW structure
1095 s32 e1000_hv_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
1097 if ((hw->phy.type != e1000_phy_82577) || (hw->revision_id > 2))
1098 return E1000_SUCCESS;
1100 return hw->phy.ops.write_reg(hw, PHY_REG(768, 25), 0x0444);
1104 * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
1105 * @hw: pointer to the HW structure
1107 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
1109 s32 ret_val;
1110 u16 data;
1112 ret_val = hw->phy.ops.read_reg(hw, HV_KMRN_MODE_CTRL, &data);
1113 if (ret_val)
1114 return ret_val;
1116 data |= HV_KMRN_MDIO_SLOW;
1118 ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_MODE_CTRL, data);
1120 return ret_val;
1124 * e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
1125 * done after every PHY reset.
1127 static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
1129 s32 ret_val = E1000_SUCCESS;
1130 u16 phy_data;
1132 if (hw->mac.type != e1000_pchlan)
1133 goto out;
1135 /* Set MDIO slow mode before any other MDIO access */
1136 if (hw->phy.type == e1000_phy_82577) {
1137 ret_val = e1000_set_mdio_slow_mode_hv(hw);
1138 if (ret_val)
1139 goto out;
1142 /* Hanksville M Phy init for IEEE. */
1143 if ((hw->revision_id == 2) &&
1144 (hw->phy.type == e1000_phy_82577) &&
1145 ((hw->phy.revision == 2) || (hw->phy.revision == 3))) {
1146 hw->phy.ops.write_reg(hw, 0x10, 0x8823);
1147 hw->phy.ops.write_reg(hw, 0x11, 0x0018);
1148 hw->phy.ops.write_reg(hw, 0x10, 0x8824);
1149 hw->phy.ops.write_reg(hw, 0x11, 0x0016);
1150 hw->phy.ops.write_reg(hw, 0x10, 0x8825);
1151 hw->phy.ops.write_reg(hw, 0x11, 0x001A);
1152 hw->phy.ops.write_reg(hw, 0x10, 0x888C);
1153 hw->phy.ops.write_reg(hw, 0x11, 0x0007);
1154 hw->phy.ops.write_reg(hw, 0x10, 0x888D);
1155 hw->phy.ops.write_reg(hw, 0x11, 0x0007);
1156 hw->phy.ops.write_reg(hw, 0x10, 0x888E);
1157 hw->phy.ops.write_reg(hw, 0x11, 0x0007);
1158 hw->phy.ops.write_reg(hw, 0x10, 0x8827);
1159 hw->phy.ops.write_reg(hw, 0x11, 0x0001);
1160 hw->phy.ops.write_reg(hw, 0x10, 0x8835);
1161 hw->phy.ops.write_reg(hw, 0x11, 0x0001);
1162 hw->phy.ops.write_reg(hw, 0x10, 0x8834);
1163 hw->phy.ops.write_reg(hw, 0x11, 0x0001);
1164 hw->phy.ops.write_reg(hw, 0x10, 0x8833);
1165 hw->phy.ops.write_reg(hw, 0x11, 0x0002);
1168 if (((hw->phy.type == e1000_phy_82577) &&
1169 ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
1170 ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
1171 /* Disable generation of early preamble */
1172 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 25), 0x4431);
1173 if (ret_val)
1174 goto out;
1176 /* Preamble tuning for SSC */
1177 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(770, 16), 0xA204);
1178 if (ret_val)
1179 goto out;
1182 if (hw->phy.type == e1000_phy_82578) {
1183 if (hw->revision_id < 3) {
1184 /* PHY config */
1185 ret_val = hw->phy.ops.write_reg(hw, (1 << 6) | 0x29,
1186 0x66C0);
1187 if (ret_val)
1188 goto out;
1190 /* PHY config */
1191 ret_val = hw->phy.ops.write_reg(hw, (1 << 6) | 0x1E,
1192 0xFFFF);
1193 if (ret_val)
1194 goto out;
1198 * Return registers to default by doing a soft reset then
1199 * writing 0x3140 to the control register.
1201 if (hw->phy.revision < 2) {
1202 e1000_phy_sw_reset_generic(hw);
1203 ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL,
1204 0x3140);
1208 if ((hw->revision_id == 2) &&
1209 (hw->phy.type == e1000_phy_82577) &&
1210 ((hw->phy.revision == 2) || (hw->phy.revision == 3))) {
1212 * Workaround for OEM (GbE) not operating after reset -
1213 * restart AN (twice)
1215 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(768, 25), 0x0400);
1216 if (ret_val)
1217 goto out;
1218 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(768, 25), 0x0400);
1219 if (ret_val)
1220 goto out;
1223 /* Select page 0 */
1224 ret_val = hw->phy.ops.acquire(hw);
1225 if (ret_val)
1226 goto out;
1228 hw->phy.addr = 1;
1229 ret_val = e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
1230 hw->phy.ops.release(hw);
1231 if (ret_val)
1232 goto out;
1235 * Configure the K1 Si workaround during phy reset assuming there is
1236 * link so that it disables K1 if link is in 1Gbps.
1238 ret_val = e1000_k1_gig_workaround_hv(hw, TRUE);
1239 if (ret_val)
1240 goto out;
1242 /* Workaround for link disconnects on a busy hub in half duplex */
1243 ret_val = hw->phy.ops.acquire(hw);
1244 if (ret_val)
1245 goto out;
1246 ret_val = hw->phy.ops.read_reg_locked(hw,
1247 PHY_REG(BM_PORT_CTRL_PAGE, 17),
1248 &phy_data);
1249 if (ret_val)
1250 goto release;
1251 ret_val = hw->phy.ops.write_reg_locked(hw,
1252 PHY_REG(BM_PORT_CTRL_PAGE, 17),
1253 phy_data & 0x00FF);
1254 release:
1255 hw->phy.ops.release(hw);
1256 out:
1257 return ret_val;
1261 * e1000_lan_init_done_ich8lan - Check for PHY config completion
1262 * @hw: pointer to the HW structure
1264 * Check the appropriate indication the MAC has finished configuring the
1265 * PHY after a software reset.
1267 static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
1269 u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
1271 DEBUGFUNC("e1000_lan_init_done_ich8lan");
1273 /* Wait for basic configuration completes before proceeding */
1274 do {
1275 data = E1000_READ_REG(hw, E1000_STATUS);
1276 data &= E1000_STATUS_LAN_INIT_DONE;
1277 usec_delay(100);
1278 } while ((!data) && --loop);
1281 * If basic configuration is incomplete before the above loop
1282 * count reaches 0, loading the configuration from NVM will
1283 * leave the PHY in a bad state possibly resulting in no link.
1285 if (loop == 0)
1286 DEBUGOUT("LAN_INIT_DONE not set, increase timeout\n");
1288 /* Clear the Init Done bit for the next init event */
1289 data = E1000_READ_REG(hw, E1000_STATUS);
1290 data &= ~E1000_STATUS_LAN_INIT_DONE;
1291 E1000_WRITE_REG(hw, E1000_STATUS, data);
1295 * e1000_phy_hw_reset_ich8lan - Performs a PHY reset
1296 * @hw: pointer to the HW structure
1298 * Resets the PHY
1299 * This is a function pointer entry point called by drivers
1300 * or other shared routines.
1302 static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
1304 s32 ret_val = E1000_SUCCESS;
1305 u16 reg;
1307 DEBUGFUNC("e1000_phy_hw_reset_ich8lan");
1309 ret_val = e1000_phy_hw_reset_generic(hw);
1310 if (ret_val)
1311 goto out;
1313 /* Allow time for h/w to get to a quiescent state after reset */
1314 msec_delay(10);
1316 /* Perform any necessary post-reset workarounds */
1317 switch (hw->mac.type) {
1318 case e1000_pchlan:
1319 ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
1320 if (ret_val)
1321 goto out;
1322 break;
1323 default:
1324 break;
1327 /* Dummy read to clear the phy wakeup bit after lcd reset */
1328 if (hw->mac.type == e1000_pchlan)
1329 hw->phy.ops.read_reg(hw, BM_WUC, &reg);
1331 /* Configure the LCD with the extended configuration region in NVM */
1332 ret_val = e1000_sw_lcd_config_ich8lan(hw);
1333 if (ret_val)
1334 goto out;
1336 /* Configure the LCD with the OEM bits in NVM */
1337 ret_val = e1000_oem_bits_config_ich8lan(hw, TRUE);
1339 out:
1340 return ret_val;
1344 * e1000_set_lplu_state_pchlan - Set Low Power Link Up state
1345 * @hw: pointer to the HW structure
1346 * @active: TRUE to enable LPLU, FALSE to disable
1348 * Sets the LPLU state according to the active flag. For PCH, if OEM write
1349 * bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
1350 * the phy speed. This function will manually set the LPLU bit and restart
1351 * auto-neg as hw would do. D3 and D0 LPLU will call the same function
1352 * since it configures the same bit.
1354 static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
1356 s32 ret_val = E1000_SUCCESS;
1357 u16 oem_reg;
1359 DEBUGFUNC("e1000_set_lplu_state_pchlan");
1361 ret_val = hw->phy.ops.read_reg(hw, HV_OEM_BITS, &oem_reg);
1362 if (ret_val)
1363 goto out;
1365 if (active)
1366 oem_reg |= HV_OEM_BITS_LPLU;
1367 else
1368 oem_reg &= ~HV_OEM_BITS_LPLU;
1370 oem_reg |= HV_OEM_BITS_RESTART_AN;
1371 ret_val = hw->phy.ops.write_reg(hw, HV_OEM_BITS, oem_reg);
1373 out:
1374 return ret_val;
1378 * e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
1379 * @hw: pointer to the HW structure
1380 * @active: TRUE to enable LPLU, FALSE to disable
1382 * Sets the LPLU D0 state according to the active flag. When
1383 * activating LPLU this function also disables smart speed
1384 * and vice versa. LPLU will not be activated unless the
1385 * device autonegotiation advertisement meets standards of
1386 * either 10 or 10/100 or 10/100/1000 at all duplexes.
1387 * This is a function pointer entry point only called by
1388 * PHY setup routines.
1390 static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
1392 struct e1000_phy_info *phy = &hw->phy;
1393 u32 phy_ctrl;
1394 s32 ret_val = E1000_SUCCESS;
1395 u16 data;
1397 DEBUGFUNC("e1000_set_d0_lplu_state_ich8lan");
1399 if (phy->type == e1000_phy_ife)
1400 goto out;
1402 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
1404 if (active) {
1405 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
1406 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
1408 if (phy->type != e1000_phy_igp_3)
1409 goto out;
1412 * Call gig speed drop workaround on LPLU before accessing
1413 * any PHY registers
1415 if (hw->mac.type == e1000_ich8lan)
1416 e1000_gig_downshift_workaround_ich8lan(hw);
1418 /* When LPLU is enabled, we should disable SmartSpeed */
1419 ret_val = phy->ops.read_reg(hw,
1420 IGP01E1000_PHY_PORT_CONFIG,
1421 &data);
1422 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
1423 ret_val = phy->ops.write_reg(hw,
1424 IGP01E1000_PHY_PORT_CONFIG,
1425 data);
1426 if (ret_val)
1427 goto out;
1428 } else {
1429 phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
1430 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
1432 if (phy->type != e1000_phy_igp_3)
1433 goto out;
1436 * LPLU and SmartSpeed are mutually exclusive. LPLU is used
1437 * during Dx states where the power conservation is most
1438 * important. During driver activity we should enable
1439 * SmartSpeed, so performance is maintained.
1441 if (phy->smart_speed == e1000_smart_speed_on) {
1442 ret_val = phy->ops.read_reg(hw,
1443 IGP01E1000_PHY_PORT_CONFIG,
1444 &data);
1445 if (ret_val)
1446 goto out;
1448 data |= IGP01E1000_PSCFR_SMART_SPEED;
1449 ret_val = phy->ops.write_reg(hw,
1450 IGP01E1000_PHY_PORT_CONFIG,
1451 data);
1452 if (ret_val)
1453 goto out;
1454 } else if (phy->smart_speed == e1000_smart_speed_off) {
1455 ret_val = phy->ops.read_reg(hw,
1456 IGP01E1000_PHY_PORT_CONFIG,
1457 &data);
1458 if (ret_val)
1459 goto out;
1461 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
1462 ret_val = phy->ops.write_reg(hw,
1463 IGP01E1000_PHY_PORT_CONFIG,
1464 data);
1465 if (ret_val)
1466 goto out;
1470 out:
1471 return ret_val;
1475 * e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
1476 * @hw: pointer to the HW structure
1477 * @active: TRUE to enable LPLU, FALSE to disable
1479 * Sets the LPLU D3 state according to the active flag. When
1480 * activating LPLU this function also disables smart speed
1481 * and vice versa. LPLU will not be activated unless the
1482 * device autonegotiation advertisement meets standards of
1483 * either 10 or 10/100 or 10/100/1000 at all duplexes.
1484 * This is a function pointer entry point only called by
1485 * PHY setup routines.
1487 static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
1489 struct e1000_phy_info *phy = &hw->phy;
1490 u32 phy_ctrl;
1491 s32 ret_val = E1000_SUCCESS;
1492 u16 data;
1494 DEBUGFUNC("e1000_set_d3_lplu_state_ich8lan");
1496 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
1498 if (!active) {
1499 phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
1500 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
1502 if (phy->type != e1000_phy_igp_3)
1503 goto out;
1506 * LPLU and SmartSpeed are mutually exclusive. LPLU is used
1507 * during Dx states where the power conservation is most
1508 * important. During driver activity we should enable
1509 * SmartSpeed, so performance is maintained.
1511 if (phy->smart_speed == e1000_smart_speed_on) {
1512 ret_val = phy->ops.read_reg(hw,
1513 IGP01E1000_PHY_PORT_CONFIG,
1514 &data);
1515 if (ret_val)
1516 goto out;
1518 data |= IGP01E1000_PSCFR_SMART_SPEED;
1519 ret_val = phy->ops.write_reg(hw,
1520 IGP01E1000_PHY_PORT_CONFIG,
1521 data);
1522 if (ret_val)
1523 goto out;
1524 } else if (phy->smart_speed == e1000_smart_speed_off) {
1525 ret_val = phy->ops.read_reg(hw,
1526 IGP01E1000_PHY_PORT_CONFIG,
1527 &data);
1528 if (ret_val)
1529 goto out;
1531 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
1532 ret_val = phy->ops.write_reg(hw,
1533 IGP01E1000_PHY_PORT_CONFIG,
1534 data);
1535 if (ret_val)
1536 goto out;
1538 } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
1539 (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
1540 (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
1541 phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
1542 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
1544 if (phy->type != e1000_phy_igp_3)
1545 goto out;
1548 * Call gig speed drop workaround on LPLU before accessing
1549 * any PHY registers
1551 if (hw->mac.type == e1000_ich8lan)
1552 e1000_gig_downshift_workaround_ich8lan(hw);
1554 /* When LPLU is enabled, we should disable SmartSpeed */
1555 ret_val = phy->ops.read_reg(hw,
1556 IGP01E1000_PHY_PORT_CONFIG,
1557 &data);
1558 if (ret_val)
1559 goto out;
1561 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
1562 ret_val = phy->ops.write_reg(hw,
1563 IGP01E1000_PHY_PORT_CONFIG,
1564 data);
1567 out:
1568 return ret_val;
1572 * e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
1573 * @hw: pointer to the HW structure
1574 * @bank: pointer to the variable that returns the active bank
1576 * Reads signature byte from the NVM using the flash access registers.
1577 * Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
1579 static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
1581 u32 eecd;
1582 struct e1000_nvm_info *nvm = &hw->nvm;
1583 u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
1584 u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
1585 u8 sig_byte = 0;
1586 s32 ret_val = E1000_SUCCESS;
1588 switch (hw->mac.type) {
1589 case e1000_ich8lan:
1590 case e1000_ich9lan:
1591 eecd = E1000_READ_REG(hw, E1000_EECD);
1592 if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) ==
1593 E1000_EECD_SEC1VAL_VALID_MASK) {
1594 if (eecd & E1000_EECD_SEC1VAL)
1595 *bank = 1;
1596 else
1597 *bank = 0;
1599 goto out;
1601 DEBUGOUT("Unable to determine valid NVM bank via EEC - "
1602 "reading flash signature\n");
1603 /* fall-thru */
1604 default:
1605 /* set bank to 0 in case flash read fails */
1606 *bank = 0;
1608 /* Check bank 0 */
1609 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
1610 &sig_byte);
1611 if (ret_val)
1612 goto out;
1613 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
1614 E1000_ICH_NVM_SIG_VALUE) {
1615 *bank = 0;
1616 goto out;
1619 /* Check bank 1 */
1620 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
1621 bank1_offset,
1622 &sig_byte);
1623 if (ret_val)
1624 goto out;
1625 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
1626 E1000_ICH_NVM_SIG_VALUE) {
1627 *bank = 1;
1628 goto out;
1631 DEBUGOUT("ERROR: No valid NVM bank present\n");
1632 ret_val = -E1000_ERR_NVM;
1633 break;
1635 out:
1636 return ret_val;
1640 * e1000_read_nvm_ich8lan - Read word(s) from the NVM
1641 * @hw: pointer to the HW structure
1642 * @offset: The offset (in bytes) of the word(s) to read.
1643 * @words: Size of data to read in words
1644 * @data: Pointer to the word(s) to read at offset.
1646 * Reads a word(s) from the NVM using the flash access registers.
1648 static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
1649 u16 *data)
1651 struct e1000_nvm_info *nvm = &hw->nvm;
1652 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
1653 u32 act_offset;
1654 s32 ret_val = E1000_SUCCESS;
1655 u32 bank = 0;
1656 u16 i, word;
1658 DEBUGFUNC("e1000_read_nvm_ich8lan");
1660 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
1661 (words == 0)) {
1662 DEBUGOUT("nvm parameter(s) out of bounds\n");
1663 ret_val = -E1000_ERR_NVM;
1664 goto out;
1667 nvm->ops.acquire(hw);
1669 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
1670 if (ret_val != E1000_SUCCESS) {
1671 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
1672 bank = 0;
1675 act_offset = (bank) ? nvm->flash_bank_size : 0;
1676 act_offset += offset;
1678 ret_val = E1000_SUCCESS;
1679 for (i = 0; i < words; i++) {
1680 if ((dev_spec->shadow_ram) &&
1681 (dev_spec->shadow_ram[offset+i].modified)) {
1682 data[i] = dev_spec->shadow_ram[offset+i].value;
1683 } else {
1684 ret_val = e1000_read_flash_word_ich8lan(hw,
1685 act_offset + i,
1686 &word);
1687 if (ret_val)
1688 break;
1689 data[i] = word;
1693 nvm->ops.release(hw);
1695 out:
1696 if (ret_val)
1697 DEBUGOUT1("NVM read error: %d\n", ret_val);
1699 return ret_val;
1703 * e1000_flash_cycle_init_ich8lan - Initialize flash
1704 * @hw: pointer to the HW structure
1706 * This function does initial flash setup so that a new read/write/erase cycle
1707 * can be started.
1709 static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
1711 union ich8_hws_flash_status hsfsts;
1712 s32 ret_val = -E1000_ERR_NVM;
1713 s32 i = 0;
1715 DEBUGFUNC("e1000_flash_cycle_init_ich8lan");
1717 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
1719 /* Check if the flash descriptor is valid */
1720 if (hsfsts.hsf_status.fldesvalid == 0) {
1721 DEBUGOUT("Flash descriptor invalid. "
1722 "SW Sequencing must be used.");
1723 goto out;
1726 /* Clear FCERR and DAEL in hw status by writing 1 */
1727 hsfsts.hsf_status.flcerr = 1;
1728 hsfsts.hsf_status.dael = 1;
1730 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
1733 * Either we should have a hardware SPI cycle in progress
1734 * bit to check against, in order to start a new cycle or
1735 * FDONE bit should be changed in the hardware so that it
1736 * is 1 after hardware reset, which can then be used as an
1737 * indication whether a cycle is in progress or has been
1738 * completed.
1741 if (hsfsts.hsf_status.flcinprog == 0) {
1743 * There is no cycle running at present,
1744 * so we can start a cycle.
1745 * Begin by setting Flash Cycle Done.
1747 hsfsts.hsf_status.flcdone = 1;
1748 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
1749 ret_val = E1000_SUCCESS;
1750 } else {
1752 * Otherwise poll for sometime so the current
1753 * cycle has a chance to end before giving up.
1755 for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
1756 hsfsts.regval = E1000_READ_FLASH_REG16(hw,
1757 ICH_FLASH_HSFSTS);
1758 if (hsfsts.hsf_status.flcinprog == 0) {
1759 ret_val = E1000_SUCCESS;
1760 break;
1762 usec_delay(1);
1764 if (ret_val == E1000_SUCCESS) {
1766 * Successful in waiting for previous cycle to timeout,
1767 * now set the Flash Cycle Done.
1769 hsfsts.hsf_status.flcdone = 1;
1770 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
1771 hsfsts.regval);
1772 } else {
1773 DEBUGOUT("Flash controller busy, cannot get access");
1777 out:
1778 return ret_val;
1782 * e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
1783 * @hw: pointer to the HW structure
1784 * @timeout: maximum time to wait for completion
1786 * This function starts a flash cycle and waits for its completion.
1788 static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
1790 union ich8_hws_flash_ctrl hsflctl;
1791 union ich8_hws_flash_status hsfsts;
1792 s32 ret_val = -E1000_ERR_NVM;
1793 u32 i = 0;
1795 DEBUGFUNC("e1000_flash_cycle_ich8lan");
1797 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
1798 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
1799 hsflctl.hsf_ctrl.flcgo = 1;
1800 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
1802 /* wait till FDONE bit is set to 1 */
1803 do {
1804 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
1805 if (hsfsts.hsf_status.flcdone == 1)
1806 break;
1807 usec_delay(1);
1808 } while (i++ < timeout);
1810 if (hsfsts.hsf_status.flcdone == 1 && hsfsts.hsf_status.flcerr == 0)
1811 ret_val = E1000_SUCCESS;
1813 return ret_val;
1817 * e1000_read_flash_word_ich8lan - Read word from flash
1818 * @hw: pointer to the HW structure
1819 * @offset: offset to data location
1820 * @data: pointer to the location for storing the data
1822 * Reads the flash word at offset into data. Offset is converted
1823 * to bytes before read.
1825 static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
1826 u16 *data)
1828 s32 ret_val;
1830 DEBUGFUNC("e1000_read_flash_word_ich8lan");
1832 if (!data) {
1833 ret_val = -E1000_ERR_NVM;
1834 goto out;
1837 /* Must convert offset into bytes. */
1838 offset <<= 1;
1840 ret_val = e1000_read_flash_data_ich8lan(hw, offset, 2, data);
1842 out:
1843 return ret_val;
1847 * e1000_read_flash_byte_ich8lan - Read byte from flash
1848 * @hw: pointer to the HW structure
1849 * @offset: The offset of the byte to read.
1850 * @data: Pointer to a byte to store the value read.
1852 * Reads a single byte from the NVM using the flash access registers.
1854 static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
1855 u8 *data)
1857 s32 ret_val = E1000_SUCCESS;
1858 u16 word = 0;
1860 ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
1861 if (ret_val)
1862 goto out;
1864 *data = (u8)word;
1866 out:
1867 return ret_val;
1871 * e1000_read_flash_data_ich8lan - Read byte or word from NVM
1872 * @hw: pointer to the HW structure
1873 * @offset: The offset (in bytes) of the byte or word to read.
1874 * @size: Size of data to read, 1=byte 2=word
1875 * @data: Pointer to the word to store the value read.
1877 * Reads a byte or word from the NVM using the flash access registers.
1879 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
1880 u8 size, u16 *data)
1882 union ich8_hws_flash_status hsfsts;
1883 union ich8_hws_flash_ctrl hsflctl;
1884 u32 flash_linear_addr;
1885 u32 flash_data = 0;
1886 s32 ret_val = -E1000_ERR_NVM;
1887 u8 count = 0;
1889 DEBUGFUNC("e1000_read_flash_data_ich8lan");
1891 if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
1892 goto out;
1894 flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) +
1895 hw->nvm.flash_base_addr;
1897 do {
1898 usec_delay(1);
1899 /* Steps */
1900 ret_val = e1000_flash_cycle_init_ich8lan(hw);
1901 if (ret_val != E1000_SUCCESS)
1902 break;
1904 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
1905 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
1906 hsflctl.hsf_ctrl.fldbcount = size - 1;
1907 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
1908 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
1910 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
1912 ret_val = e1000_flash_cycle_ich8lan(hw,
1913 ICH_FLASH_READ_COMMAND_TIMEOUT);
1916 * Check if FCERR is set to 1, if set to 1, clear it
1917 * and try the whole sequence a few more times, else
1918 * read in (shift in) the Flash Data0, the order is
1919 * least significant byte first msb to lsb
1921 if (ret_val == E1000_SUCCESS) {
1922 flash_data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
1923 if (size == 1)
1924 *data = (u8)(flash_data & 0x000000FF);
1925 else if (size == 2)
1926 *data = (u16)(flash_data & 0x0000FFFF);
1927 break;
1928 } else {
1930 * If we've gotten here, then things are probably
1931 * completely hosed, but if the error condition is
1932 * detected, it won't hurt to give it another try...
1933 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
1935 hsfsts.regval = E1000_READ_FLASH_REG16(hw,
1936 ICH_FLASH_HSFSTS);
1937 if (hsfsts.hsf_status.flcerr == 1) {
1938 /* Repeat for some time before giving up. */
1939 continue;
1940 } else if (hsfsts.hsf_status.flcdone == 0) {
1941 DEBUGOUT("Timeout error - flash cycle "
1942 "did not complete.");
1943 break;
1946 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
1948 out:
1949 return ret_val;
1953 * e1000_write_nvm_ich8lan - Write word(s) to the NVM
1954 * @hw: pointer to the HW structure
1955 * @offset: The offset (in bytes) of the word(s) to write.
1956 * @words: Size of data to write in words
1957 * @data: Pointer to the word(s) to write at offset.
1959 * Writes a byte or word to the NVM using the flash access registers.
1961 static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
1962 u16 *data)
1964 struct e1000_nvm_info *nvm = &hw->nvm;
1965 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
1966 s32 ret_val = E1000_SUCCESS;
1967 u16 i;
1969 DEBUGFUNC("e1000_write_nvm_ich8lan");
1971 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
1972 (words == 0)) {
1973 DEBUGOUT("nvm parameter(s) out of bounds\n");
1974 ret_val = -E1000_ERR_NVM;
1975 goto out;
1978 nvm->ops.acquire(hw);
1980 for (i = 0; i < words; i++) {
1981 dev_spec->shadow_ram[offset+i].modified = TRUE;
1982 dev_spec->shadow_ram[offset+i].value = data[i];
1985 nvm->ops.release(hw);
1987 out:
1988 return ret_val;
1992 * e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
1993 * @hw: pointer to the HW structure
1995 * The NVM checksum is updated by calling the generic update_nvm_checksum,
1996 * which writes the checksum to the shadow ram. The changes in the shadow
1997 * ram are then committed to the EEPROM by processing each bank at a time
1998 * checking for the modified bit and writing only the pending changes.
1999 * After a successful commit, the shadow ram is cleared and is ready for
2000 * future writes.
2002 static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
2004 struct e1000_nvm_info *nvm = &hw->nvm;
2005 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
2006 u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
2007 s32 ret_val;
2008 u16 data;
2010 DEBUGFUNC("e1000_update_nvm_checksum_ich8lan");
2012 ret_val = e1000_update_nvm_checksum_generic(hw);
2013 if (ret_val)
2014 goto out;
2016 if (nvm->type != e1000_nvm_flash_sw)
2017 goto out;
2019 nvm->ops.acquire(hw);
2022 * We're writing to the opposite bank so if we're on bank 1,
2023 * write to bank 0 etc. We also need to erase the segment that
2024 * is going to be written
2026 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
2027 if (ret_val != E1000_SUCCESS) {
2028 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
2029 bank = 0;
2032 if (bank == 0) {
2033 new_bank_offset = nvm->flash_bank_size;
2034 old_bank_offset = 0;
2035 ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
2036 if (ret_val)
2037 goto release;
2038 } else {
2039 old_bank_offset = nvm->flash_bank_size;
2040 new_bank_offset = 0;
2041 ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
2042 if (ret_val)
2043 goto release;
2046 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
2048 * Determine whether to write the value stored
2049 * in the other NVM bank or a modified value stored
2050 * in the shadow RAM
2052 if (dev_spec->shadow_ram[i].modified) {
2053 data = dev_spec->shadow_ram[i].value;
2054 } else {
2055 ret_val = e1000_read_flash_word_ich8lan(hw, i +
2056 old_bank_offset,
2057 &data);
2058 if (ret_val)
2059 break;
2063 * If the word is 0x13, then make sure the signature bits
2064 * (15:14) are 11b until the commit has completed.
2065 * This will allow us to write 10b which indicates the
2066 * signature is valid. We want to do this after the write
2067 * has completed so that we don't mark the segment valid
2068 * while the write is still in progress
2070 if (i == E1000_ICH_NVM_SIG_WORD)
2071 data |= E1000_ICH_NVM_SIG_MASK;
2073 /* Convert offset to bytes. */
2074 act_offset = (i + new_bank_offset) << 1;
2076 usec_delay(100);
2077 /* Write the bytes to the new bank. */
2078 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
2079 act_offset,
2080 (u8)data);
2081 if (ret_val)
2082 break;
2084 usec_delay(100);
2085 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
2086 act_offset + 1,
2087 (u8)(data >> 8));
2088 if (ret_val)
2089 break;
2093 * Don't bother writing the segment valid bits if sector
2094 * programming failed.
2096 if (ret_val) {
2097 DEBUGOUT("Flash commit failed.\n");
2098 goto release;
2102 * Finally validate the new segment by setting bit 15:14
2103 * to 10b in word 0x13 , this can be done without an
2104 * erase as well since these bits are 11 to start with
2105 * and we need to change bit 14 to 0b
2107 act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
2108 ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
2109 if (ret_val)
2110 goto release;
2112 data &= 0xBFFF;
2113 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
2114 act_offset * 2 + 1,
2115 (u8)(data >> 8));
2116 if (ret_val)
2117 goto release;
2120 * And invalidate the previously valid segment by setting
2121 * its signature word (0x13) high_byte to 0b. This can be
2122 * done without an erase because flash erase sets all bits
2123 * to 1's. We can write 1's to 0's without an erase
2125 act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
2126 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
2127 if (ret_val)
2128 goto release;
2130 /* Great! Everything worked, we can now clear the cached entries. */
2131 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
2132 dev_spec->shadow_ram[i].modified = FALSE;
2133 dev_spec->shadow_ram[i].value = 0xFFFF;
2136 release:
2137 nvm->ops.release(hw);
2140 * Reload the EEPROM, or else modifications will not appear
2141 * until after the next adapter reset.
2143 if (!ret_val) {
2144 nvm->ops.reload(hw);
2145 msec_delay(10);
2148 out:
2149 if (ret_val)
2150 DEBUGOUT1("NVM update error: %d\n", ret_val);
2152 return ret_val;
2156 * e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum
2157 * @hw: pointer to the HW structure
2159 * Check to see if checksum needs to be fixed by reading bit 6 in word 0x19.
2160 * If the bit is 0, that the EEPROM had been modified, but the checksum was not
2161 * calculated, in which case we need to calculate the checksum and set bit 6.
2163 static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
2165 s32 ret_val = E1000_SUCCESS;
2166 u16 data;
2168 DEBUGFUNC("e1000_validate_nvm_checksum_ich8lan");
2171 * Read 0x19 and check bit 6. If this bit is 0, the checksum
2172 * needs to be fixed. This bit is an indication that the NVM
2173 * was prepared by OEM software and did not calculate the
2174 * checksum...a likely scenario.
2176 ret_val = hw->nvm.ops.read(hw, 0x19, 1, &data);
2177 if (ret_val)
2178 goto out;
2180 if ((data & 0x40) == 0) {
2181 data |= 0x40;
2182 ret_val = hw->nvm.ops.write(hw, 0x19, 1, &data);
2183 if (ret_val)
2184 goto out;
2185 ret_val = hw->nvm.ops.update(hw);
2186 if (ret_val)
2187 goto out;
2190 ret_val = e1000_validate_nvm_checksum_generic(hw);
2192 out:
2193 return ret_val;
2197 * e1000_write_flash_data_ich8lan - Writes bytes to the NVM
2198 * @hw: pointer to the HW structure
2199 * @offset: The offset (in bytes) of the byte/word to read.
2200 * @size: Size of data to read, 1=byte 2=word
2201 * @data: The byte(s) to write to the NVM.
2203 * Writes one/two bytes to the NVM using the flash access registers.
2205 static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
2206 u8 size, u16 data)
2208 union ich8_hws_flash_status hsfsts;
2209 union ich8_hws_flash_ctrl hsflctl;
2210 u32 flash_linear_addr;
2211 u32 flash_data = 0;
2212 s32 ret_val = -E1000_ERR_NVM;
2213 u8 count = 0;
2215 DEBUGFUNC("e1000_write_ich8_data");
2217 if (size < 1 || size > 2 || data > size * 0xff ||
2218 offset > ICH_FLASH_LINEAR_ADDR_MASK)
2219 goto out;
2221 flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) +
2222 hw->nvm.flash_base_addr;
2224 do {
2225 usec_delay(1);
2226 /* Steps */
2227 ret_val = e1000_flash_cycle_init_ich8lan(hw);
2228 if (ret_val != E1000_SUCCESS)
2229 break;
2231 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
2232 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
2233 hsflctl.hsf_ctrl.fldbcount = size - 1;
2234 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
2235 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
2237 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
2239 if (size == 1)
2240 flash_data = (u32)data & 0x00FF;
2241 else
2242 flash_data = (u32)data;
2244 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data);
2247 * check if FCERR is set to 1 , if set to 1, clear it
2248 * and try the whole sequence a few more times else done
2250 ret_val = e1000_flash_cycle_ich8lan(hw,
2251 ICH_FLASH_WRITE_COMMAND_TIMEOUT);
2252 if (ret_val == E1000_SUCCESS)
2253 break;
2256 * If we're here, then things are most likely
2257 * completely hosed, but if the error condition
2258 * is detected, it won't hurt to give it another
2259 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
2261 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
2262 if (hsfsts.hsf_status.flcerr == 1)
2263 /* Repeat for some time before giving up. */
2264 continue;
2265 if (hsfsts.hsf_status.flcdone == 0) {
2266 DEBUGOUT("Timeout error - flash cycle "
2267 "did not complete.");
2268 break;
2270 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
2272 out:
2273 return ret_val;
2277 * e1000_write_flash_byte_ich8lan - Write a single byte to NVM
2278 * @hw: pointer to the HW structure
2279 * @offset: The index of the byte to read.
2280 * @data: The byte to write to the NVM.
2282 * Writes a single byte to the NVM using the flash access registers.
2284 static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
2285 u8 data)
2287 u16 word = (u16)data;
2289 DEBUGFUNC("e1000_write_flash_byte_ich8lan");
2291 return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
2295 * e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
2296 * @hw: pointer to the HW structure
2297 * @offset: The offset of the byte to write.
2298 * @byte: The byte to write to the NVM.
2300 * Writes a single byte to the NVM using the flash access registers.
2301 * Goes through a retry algorithm before giving up.
2303 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
2304 u32 offset, u8 byte)
2306 s32 ret_val;
2307 u16 program_retries;
2309 DEBUGFUNC("e1000_retry_write_flash_byte_ich8lan");
2311 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
2312 if (ret_val == E1000_SUCCESS)
2313 goto out;
2315 for (program_retries = 0; program_retries < 100; program_retries++) {
2316 DEBUGOUT2("Retrying Byte %2.2X at offset %u\n", byte, offset);
2317 usec_delay(100);
2318 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
2319 if (ret_val == E1000_SUCCESS)
2320 break;
2322 if (program_retries == 100) {
2323 ret_val = -E1000_ERR_NVM;
2324 goto out;
2327 out:
2328 return ret_val;
2332 * e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM
2333 * @hw: pointer to the HW structure
2334 * @bank: 0 for first bank, 1 for second bank, etc.
2336 * Erases the bank specified. Each bank is a 4k block. Banks are 0 based.
2337 * bank N is 4096 * N + flash_reg_addr.
2339 static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
2341 struct e1000_nvm_info *nvm = &hw->nvm;
2342 union ich8_hws_flash_status hsfsts;
2343 union ich8_hws_flash_ctrl hsflctl;
2344 u32 flash_linear_addr;
2345 /* bank size is in 16bit words - adjust to bytes */
2346 u32 flash_bank_size = nvm->flash_bank_size * 2;
2347 s32 ret_val = E1000_SUCCESS;
2348 s32 count = 0;
2349 s32 j, iteration, sector_size;
2351 DEBUGFUNC("e1000_erase_flash_bank_ich8lan");
2353 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
2356 * Determine HW Sector size: Read BERASE bits of hw flash status
2357 * register
2358 * 00: The Hw sector is 256 bytes, hence we need to erase 16
2359 * consecutive sectors. The start index for the nth Hw sector
2360 * can be calculated as = bank * 4096 + n * 256
2361 * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
2362 * The start index for the nth Hw sector can be calculated
2363 * as = bank * 4096
2364 * 10: The Hw sector is 8K bytes, nth sector = bank * 8192
2365 * (ich9 only, otherwise error condition)
2366 * 11: The Hw sector is 64K bytes, nth sector = bank * 65536
2368 switch (hsfsts.hsf_status.berasesz) {
2369 case 0:
2370 /* Hw sector size 256 */
2371 sector_size = ICH_FLASH_SEG_SIZE_256;
2372 iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256;
2373 break;
2374 case 1:
2375 sector_size = ICH_FLASH_SEG_SIZE_4K;
2376 iteration = 1;
2377 break;
2378 case 2:
2379 sector_size = ICH_FLASH_SEG_SIZE_8K;
2380 iteration = 1;
2381 break;
2382 case 3:
2383 sector_size = ICH_FLASH_SEG_SIZE_64K;
2384 iteration = 1;
2385 break;
2386 default:
2387 ret_val = -E1000_ERR_NVM;
2388 goto out;
2391 /* Start with the base address, then add the sector offset. */
2392 flash_linear_addr = hw->nvm.flash_base_addr;
2393 flash_linear_addr += (bank) ? flash_bank_size : 0;
2395 for (j = 0; j < iteration ; j++) {
2396 do {
2397 /* Steps */
2398 ret_val = e1000_flash_cycle_init_ich8lan(hw);
2399 if (ret_val)
2400 goto out;
2403 * Write a value 11 (block Erase) in Flash
2404 * Cycle field in hw flash control
2406 hsflctl.regval = E1000_READ_FLASH_REG16(hw,
2407 ICH_FLASH_HSFCTL);
2408 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
2409 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
2410 hsflctl.regval);
2413 * Write the last 24 bits of an index within the
2414 * block into Flash Linear address field in Flash
2415 * Address.
2417 flash_linear_addr += (j * sector_size);
2418 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR,
2419 flash_linear_addr);
2421 ret_val = e1000_flash_cycle_ich8lan(hw,
2422 ICH_FLASH_ERASE_COMMAND_TIMEOUT);
2423 if (ret_val == E1000_SUCCESS)
2424 break;
2427 * Check if FCERR is set to 1. If 1,
2428 * clear it and try the whole sequence
2429 * a few more times else Done
2431 hsfsts.regval = E1000_READ_FLASH_REG16(hw,
2432 ICH_FLASH_HSFSTS);
2433 if (hsfsts.hsf_status.flcerr == 1)
2434 /* repeat for some time before giving up */
2435 continue;
2436 else if (hsfsts.hsf_status.flcdone == 0)
2437 goto out;
2438 } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
2441 out:
2442 return ret_val;
2446 * e1000_valid_led_default_ich8lan - Set the default LED settings
2447 * @hw: pointer to the HW structure
2448 * @data: Pointer to the LED settings
2450 * Reads the LED default settings from the NVM to data. If the NVM LED
2451 * settings is all 0's or F's, set the LED default to a valid LED default
2452 * setting.
2454 static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
2456 s32 ret_val;
2458 DEBUGFUNC("e1000_valid_led_default_ich8lan");
2460 ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
2461 if (ret_val) {
2462 DEBUGOUT("NVM Read Error\n");
2463 goto out;
2466 if (*data == ID_LED_RESERVED_0000 ||
2467 *data == ID_LED_RESERVED_FFFF)
2468 *data = ID_LED_DEFAULT_ICH8LAN;
2470 out:
2471 return ret_val;
2475 * e1000_id_led_init_pchlan - store LED configurations
2476 * @hw: pointer to the HW structure
2478 * PCH does not control LEDs via the LEDCTL register, rather it uses
2479 * the PHY LED configuration register.
2481 * PCH also does not have an "always on" or "always off" mode which
2482 * complicates the ID feature. Instead of using the "on" mode to indicate
2483 * in ledctl_mode2 the LEDs to use for ID (see e1000_id_led_init_generic()),
2484 * use "link_up" mode. The LEDs will still ID on request if there is no
2485 * link based on logic in e1000_led_[on|off]_pchlan().
2487 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
2489 struct e1000_mac_info *mac = &hw->mac;
2490 s32 ret_val;
2491 const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
2492 const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
2493 u16 data, i, temp, shift;
2495 DEBUGFUNC("e1000_id_led_init_pchlan");
2497 /* Get default ID LED modes */
2498 ret_val = hw->nvm.ops.valid_led_default(hw, &data);
2499 if (ret_val)
2500 goto out;
2502 mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL);
2503 mac->ledctl_mode1 = mac->ledctl_default;
2504 mac->ledctl_mode2 = mac->ledctl_default;
2506 for (i = 0; i < 4; i++) {
2507 temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
2508 shift = (i * 5);
2509 switch (temp) {
2510 case ID_LED_ON1_DEF2:
2511 case ID_LED_ON1_ON2:
2512 case ID_LED_ON1_OFF2:
2513 mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
2514 mac->ledctl_mode1 |= (ledctl_on << shift);
2515 break;
2516 case ID_LED_OFF1_DEF2:
2517 case ID_LED_OFF1_ON2:
2518 case ID_LED_OFF1_OFF2:
2519 mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
2520 mac->ledctl_mode1 |= (ledctl_off << shift);
2521 break;
2522 default:
2523 /* Do nothing */
2524 break;
2526 switch (temp) {
2527 case ID_LED_DEF1_ON2:
2528 case ID_LED_ON1_ON2:
2529 case ID_LED_OFF1_ON2:
2530 mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
2531 mac->ledctl_mode2 |= (ledctl_on << shift);
2532 break;
2533 case ID_LED_DEF1_OFF2:
2534 case ID_LED_ON1_OFF2:
2535 case ID_LED_OFF1_OFF2:
2536 mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
2537 mac->ledctl_mode2 |= (ledctl_off << shift);
2538 break;
2539 default:
2540 /* Do nothing */
2541 break;
2545 out:
2546 return ret_val;
2550 * e1000_get_bus_info_ich8lan - Get/Set the bus type and width
2551 * @hw: pointer to the HW structure
2553 * ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
2554 * register, so the the bus width is hard coded.
2556 static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
2558 struct e1000_bus_info *bus = &hw->bus;
2559 s32 ret_val;
2561 DEBUGFUNC("e1000_get_bus_info_ich8lan");
2563 ret_val = e1000_get_bus_info_pcie_generic(hw);
2566 * ICH devices are "PCI Express"-ish. They have
2567 * a configuration space, but do not contain
2568 * PCI Express Capability registers, so bus width
2569 * must be hardcoded.
2571 if (bus->width == e1000_bus_width_unknown)
2572 bus->width = e1000_bus_width_pcie_x1;
2574 return ret_val;
2578 * e1000_reset_hw_ich8lan - Reset the hardware
2579 * @hw: pointer to the HW structure
2581 * Does a full reset of the hardware which includes a reset of the PHY and
2582 * MAC.
2584 static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
2586 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
2587 u16 reg;
2588 u32 ctrl, icr, kab;
2589 s32 ret_val;
2591 DEBUGFUNC("e1000_reset_hw_ich8lan");
2594 * Prevent the PCI-E bus from sticking if there is no TLP connection
2595 * on the last TLP read/write transaction when MAC is reset.
2597 ret_val = e1000_disable_pcie_master_generic(hw);
2598 if (ret_val)
2599 DEBUGOUT("PCI-E Master disable polling has failed.\n");
2601 DEBUGOUT("Masking off all interrupts\n");
2602 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
2605 * Disable the Transmit and Receive units. Then delay to allow
2606 * any pending transactions to complete before we hit the MAC
2607 * with the global reset.
2609 E1000_WRITE_REG(hw, E1000_RCTL, 0);
2610 E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
2611 E1000_WRITE_FLUSH(hw);
2613 msec_delay(10);
2615 /* Workaround for ICH8 bit corruption issue in FIFO memory */
2616 if (hw->mac.type == e1000_ich8lan) {
2617 /* Set Tx and Rx buffer allocation to 8k apiece. */
2618 E1000_WRITE_REG(hw, E1000_PBA, E1000_PBA_8K);
2619 /* Set Packet Buffer Size to 16k. */
2620 E1000_WRITE_REG(hw, E1000_PBS, E1000_PBS_16K);
2623 if (hw->mac.type == e1000_pchlan) {
2624 /* Save the NVM K1 bit setting*/
2625 ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &reg);
2626 if (ret_val)
2627 return ret_val;
2629 if (reg & E1000_NVM_K1_ENABLE)
2630 dev_spec->nvm_k1_enabled = TRUE;
2631 else
2632 dev_spec->nvm_k1_enabled = FALSE;
2635 ctrl = E1000_READ_REG(hw, E1000_CTRL);
2637 if (!hw->phy.ops.check_reset_block(hw) && !hw->phy.reset_disable) {
2638 /* Clear PHY Reset Asserted bit */
2639 if (hw->mac.type >= e1000_pchlan) {
2640 u32 status = E1000_READ_REG(hw, E1000_STATUS);
2641 E1000_WRITE_REG(hw, E1000_STATUS, status &
2642 ~E1000_STATUS_PHYRA);
2646 * PHY HW reset requires MAC CORE reset at the same
2647 * time to make sure the interface between MAC and the
2648 * external PHY is reset.
2650 ctrl |= E1000_CTRL_PHY_RST;
2652 ret_val = e1000_acquire_swflag_ich8lan(hw);
2653 DEBUGOUT("Issuing a global reset to ich8lan\n");
2654 E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_RST));
2655 msec_delay(20);
2657 if (!ret_val)
2658 e1000_release_swflag_ich8lan(hw);
2660 /* Perform any necessary post-reset workarounds */
2661 switch (hw->mac.type) {
2662 case e1000_pchlan:
2663 ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
2664 if (ret_val)
2665 goto out;
2666 break;
2667 default:
2668 break;
2671 if (ctrl & E1000_CTRL_PHY_RST)
2672 ret_val = hw->phy.ops.get_cfg_done(hw);
2674 if (hw->mac.type >= e1000_ich10lan) {
2675 e1000_lan_init_done_ich8lan(hw);
2676 } else {
2677 ret_val = e1000_get_auto_rd_done_generic(hw);
2678 if (ret_val) {
2680 * When auto config read does not complete, do not
2681 * return with an error. This can happen in situations
2682 * where there is no eeprom and prevents getting link.
2684 DEBUGOUT("Auto Read Done did not complete\n");
2687 if (hw->mac.type == e1000_pchlan)
2688 hw->phy.ops.read_reg(hw, BM_WUC, &reg);
2690 ret_val = e1000_sw_lcd_config_ich8lan(hw);
2691 if (ret_val)
2692 goto out;
2694 ret_val = e1000_oem_bits_config_ich8lan(hw, TRUE);
2695 if (ret_val)
2696 goto out;
2698 * For PCH, this write will make sure that any noise
2699 * will be detected as a CRC error and be dropped rather than show up
2700 * as a bad packet to the DMA engine.
2702 if (hw->mac.type == e1000_pchlan)
2703 E1000_WRITE_REG(hw, E1000_CRC_OFFSET, 0x65656565);
2705 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
2706 icr = E1000_READ_REG(hw, E1000_ICR);
2708 kab = E1000_READ_REG(hw, E1000_KABGTXD);
2709 kab |= E1000_KABGTXD_BGSQLBIAS;
2710 E1000_WRITE_REG(hw, E1000_KABGTXD, kab);
2712 out:
2713 return ret_val;
2717 * e1000_init_hw_ich8lan - Initialize the hardware
2718 * @hw: pointer to the HW structure
2720 * Prepares the hardware for transmit and receive by doing the following:
2721 * - initialize hardware bits
2722 * - initialize LED identification
2723 * - setup receive address registers
2724 * - setup flow control
2725 * - setup transmit descriptors
2726 * - clear statistics
2728 static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
2730 struct e1000_mac_info *mac = &hw->mac;
2731 u32 ctrl_ext, txdctl, snoop;
2732 s32 ret_val;
2733 u16 i;
2735 DEBUGFUNC("e1000_init_hw_ich8lan");
2737 e1000_initialize_hw_bits_ich8lan(hw);
2739 /* Initialize identification LED */
2740 ret_val = mac->ops.id_led_init(hw);
2741 if (ret_val)
2742 DEBUGOUT("Error initializing identification LED\n");
2743 /* This is not fatal and we should not stop init due to this */
2745 /* Setup the receive address. */
2746 e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
2748 /* Zero out the Multicast HASH table */
2749 DEBUGOUT("Zeroing the MTA\n");
2750 for (i = 0; i < mac->mta_reg_count; i++)
2751 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
2754 * The 82578 Rx buffer will stall if wakeup is enabled in host and
2755 * the ME. Reading the BM_WUC register will clear the host wakeup bit.
2756 * Reset the phy after disabling host wakeup to reset the Rx buffer.
2758 if (hw->phy.type == e1000_phy_82578) {
2759 hw->phy.ops.read_reg(hw, BM_WUC, &i);
2760 ret_val = e1000_phy_hw_reset_ich8lan(hw);
2761 if (ret_val)
2762 return ret_val;
2765 /* Setup link and flow control */
2766 ret_val = mac->ops.setup_link(hw);
2768 /* Set the transmit descriptor write-back policy for both queues */
2769 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0));
2770 txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
2771 E1000_TXDCTL_FULL_TX_DESC_WB;
2772 txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
2773 E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
2774 E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl);
2775 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(1));
2776 txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
2777 E1000_TXDCTL_FULL_TX_DESC_WB;
2778 txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
2779 E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
2780 E1000_WRITE_REG(hw, E1000_TXDCTL(1), txdctl);
2783 * ICH8 has opposite polarity of no_snoop bits.
2784 * By default, we should use snoop behavior.
2786 if (mac->type == e1000_ich8lan)
2787 snoop = PCIE_ICH8_SNOOP_ALL;
2788 else
2789 snoop = (u32) ~(PCIE_NO_SNOOP_ALL);
2790 e1000_set_pcie_no_snoop_generic(hw, snoop);
2792 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
2793 ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
2794 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
2797 * Clear all of the statistics registers (clear on read). It is
2798 * important that we do this after we have tried to establish link
2799 * because the symbol error count will increment wildly if there
2800 * is no link.
2802 e1000_clear_hw_cntrs_ich8lan(hw);
2804 return ret_val;
2807 * e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
2808 * @hw: pointer to the HW structure
2810 * Sets/Clears required hardware bits necessary for correctly setting up the
2811 * hardware for transmit and receive.
2813 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
2815 u32 reg;
2817 DEBUGFUNC("e1000_initialize_hw_bits_ich8lan");
2819 /* Extended Device Control */
2820 reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
2821 reg |= (1 << 22);
2822 /* Enable PHY low-power state when MAC is at D3 w/o WoL */
2823 if (hw->mac.type >= e1000_pchlan)
2824 reg |= E1000_CTRL_EXT_PHYPDEN;
2825 E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
2827 /* Transmit Descriptor Control 0 */
2828 reg = E1000_READ_REG(hw, E1000_TXDCTL(0));
2829 reg |= (1 << 22);
2830 E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg);
2832 /* Transmit Descriptor Control 1 */
2833 reg = E1000_READ_REG(hw, E1000_TXDCTL(1));
2834 reg |= (1 << 22);
2835 E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg);
2837 /* Transmit Arbitration Control 0 */
2838 reg = E1000_READ_REG(hw, E1000_TARC(0));
2839 if (hw->mac.type == e1000_ich8lan)
2840 reg |= (1 << 28) | (1 << 29);
2841 reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
2842 E1000_WRITE_REG(hw, E1000_TARC(0), reg);
2844 /* Transmit Arbitration Control 1 */
2845 reg = E1000_READ_REG(hw, E1000_TARC(1));
2846 if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR)
2847 reg &= ~(1 << 28);
2848 else
2849 reg |= (1 << 28);
2850 reg |= (1 << 24) | (1 << 26) | (1 << 30);
2851 E1000_WRITE_REG(hw, E1000_TARC(1), reg);
2853 /* Device Status */
2854 if (hw->mac.type == e1000_ich8lan) {
2855 reg = E1000_READ_REG(hw, E1000_STATUS);
2856 reg &= ~(1 << 31);
2857 E1000_WRITE_REG(hw, E1000_STATUS, reg);
2860 return;
2864 * e1000_setup_link_ich8lan - Setup flow control and link settings
2865 * @hw: pointer to the HW structure
2867 * Determines which flow control settings to use, then configures flow
2868 * control. Calls the appropriate media-specific link configuration
2869 * function. Assuming the adapter has a valid link partner, a valid link
2870 * should be established. Assumes the hardware has previously been reset
2871 * and the transmitter and receiver are not enabled.
2873 static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
2875 s32 ret_val = E1000_SUCCESS;
2877 DEBUGFUNC("e1000_setup_link_ich8lan");
2879 if (hw->phy.ops.check_reset_block(hw))
2880 goto out;
2883 * ICH parts do not have a word in the NVM to determine
2884 * the default flow control setting, so we explicitly
2885 * set it to full.
2887 if (hw->fc.requested_mode == e1000_fc_default)
2888 hw->fc.requested_mode = e1000_fc_full;
2891 * Save off the requested flow control mode for use later. Depending
2892 * on the link partner's capabilities, we may or may not use this mode.
2894 hw->fc.current_mode = hw->fc.requested_mode;
2896 DEBUGOUT1("After fix-ups FlowControl is now = %x\n",
2897 hw->fc.current_mode);
2899 /* Continue to configure the copper link. */
2900 ret_val = hw->mac.ops.setup_physical_interface(hw);
2901 if (ret_val)
2902 goto out;
2904 E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
2905 if ((hw->phy.type == e1000_phy_82578) ||
2906 (hw->phy.type == e1000_phy_82577)) {
2907 ret_val = hw->phy.ops.write_reg(hw,
2908 PHY_REG(BM_PORT_CTRL_PAGE, 27),
2909 hw->fc.pause_time);
2910 if (ret_val)
2911 goto out;
2914 ret_val = e1000_set_fc_watermarks_generic(hw);
2916 out:
2917 return ret_val;
2921 * e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface
2922 * @hw: pointer to the HW structure
2924 * Configures the kumeran interface to the PHY to wait the appropriate time
2925 * when polling the PHY, then call the generic setup_copper_link to finish
2926 * configuring the copper link.
2928 static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
2930 u32 ctrl;
2931 s32 ret_val;
2932 u16 reg_data;
2934 DEBUGFUNC("e1000_setup_copper_link_ich8lan");
2936 ctrl = E1000_READ_REG(hw, E1000_CTRL);
2937 ctrl |= E1000_CTRL_SLU;
2938 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
2939 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
2942 * Set the mac to wait the maximum time between each iteration
2943 * and increase the max iterations when polling the phy;
2944 * this fixes erroneous timeouts at 10Mbps.
2946 ret_val = e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_TIMEOUTS,
2947 0xFFFF);
2948 if (ret_val)
2949 goto out;
2950 ret_val = e1000_read_kmrn_reg_generic(hw,
2951 E1000_KMRNCTRLSTA_INBAND_PARAM,
2952 &reg_data);
2953 if (ret_val)
2954 goto out;
2955 reg_data |= 0x3F;
2956 ret_val = e1000_write_kmrn_reg_generic(hw,
2957 E1000_KMRNCTRLSTA_INBAND_PARAM,
2958 reg_data);
2959 if (ret_val)
2960 goto out;
2962 switch (hw->phy.type) {
2963 case e1000_phy_igp_3:
2964 ret_val = e1000_copper_link_setup_igp(hw);
2965 if (ret_val)
2966 goto out;
2967 break;
2968 case e1000_phy_bm:
2969 case e1000_phy_82578:
2970 ret_val = e1000_copper_link_setup_m88(hw);
2971 if (ret_val)
2972 goto out;
2973 break;
2974 case e1000_phy_82577:
2975 ret_val = e1000_copper_link_setup_82577(hw);
2976 if (ret_val)
2977 goto out;
2978 break;
2979 case e1000_phy_ife:
2980 ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL,
2981 &reg_data);
2982 if (ret_val)
2983 goto out;
2985 reg_data &= ~IFE_PMC_AUTO_MDIX;
2987 switch (hw->phy.mdix) {
2988 case 1:
2989 reg_data &= ~IFE_PMC_FORCE_MDIX;
2990 break;
2991 case 2:
2992 reg_data |= IFE_PMC_FORCE_MDIX;
2993 break;
2994 case 0:
2995 default:
2996 reg_data |= IFE_PMC_AUTO_MDIX;
2997 break;
2999 ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL,
3000 reg_data);
3001 if (ret_val)
3002 goto out;
3003 break;
3004 default:
3005 break;
3007 ret_val = e1000_setup_copper_link_generic(hw);
3009 out:
3010 return ret_val;
3014 * e1000_get_link_up_info_ich8lan - Get current link speed and duplex
3015 * @hw: pointer to the HW structure
3016 * @speed: pointer to store current link speed
3017 * @duplex: pointer to store the current link duplex
3019 * Calls the generic get_speed_and_duplex to retrieve the current link
3020 * information and then calls the Kumeran lock loss workaround for links at
3021 * gigabit speeds.
3023 static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
3024 u16 *duplex)
3026 s32 ret_val;
3028 DEBUGFUNC("e1000_get_link_up_info_ich8lan");
3030 ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex);
3031 if (ret_val)
3032 goto out;
3034 if ((hw->mac.type == e1000_ich8lan) &&
3035 (hw->phy.type == e1000_phy_igp_3) &&
3036 (*speed == SPEED_1000)) {
3037 ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
3040 out:
3041 return ret_val;
3045 * e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround
3046 * @hw: pointer to the HW structure
3048 * Work-around for 82566 Kumeran PCS lock loss:
3049 * On link status change (i.e. PCI reset, speed change) and link is up and
3050 * speed is gigabit-
3051 * 0) if workaround is optionally disabled do nothing
3052 * 1) wait 1ms for Kumeran link to come up
3053 * 2) check Kumeran Diagnostic register PCS lock loss bit
3054 * 3) if not set the link is locked (all is good), otherwise...
3055 * 4) reset the PHY
3056 * 5) repeat up to 10 times
3057 * Note: this is only called for IGP3 copper when speed is 1gb.
3059 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
3061 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3062 u32 phy_ctrl;
3063 s32 ret_val = E1000_SUCCESS;
3064 u16 i, data;
3065 bool link;
3067 DEBUGFUNC("e1000_kmrn_lock_loss_workaround_ich8lan");
3069 if (!(dev_spec->kmrn_lock_loss_workaround_enabled))
3070 goto out;
3073 * Make sure link is up before proceeding. If not just return.
3074 * Attempting this while link is negotiating fouled up link
3075 * stability
3077 ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
3078 if (!link) {
3079 ret_val = E1000_SUCCESS;
3080 goto out;
3083 for (i = 0; i < 10; i++) {
3084 /* read once to clear */
3085 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
3086 if (ret_val)
3087 goto out;
3088 /* and again to get new status */
3089 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
3090 if (ret_val)
3091 goto out;
3093 /* check for PCS lock */
3094 if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS)) {
3095 ret_val = E1000_SUCCESS;
3096 goto out;
3099 /* Issue PHY reset */
3100 hw->phy.ops.reset(hw);
3101 msec_delay_irq(5);
3103 /* Disable GigE link negotiation */
3104 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3105 phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE |
3106 E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
3107 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3110 * Call gig speed drop workaround on Gig disable before accessing
3111 * any PHY registers
3113 e1000_gig_downshift_workaround_ich8lan(hw);
3115 /* unable to acquire PCS lock */
3116 ret_val = -E1000_ERR_PHY;
3118 out:
3119 return ret_val;
3123 * e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
3124 * @hw: pointer to the HW structure
3125 * @state: boolean value used to set the current Kumeran workaround state
3127 * If ICH8, set the current Kumeran workaround state (enabled - TRUE
3128 * /disabled - FALSE).
3130 void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
3131 bool state)
3133 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3135 DEBUGFUNC("e1000_set_kmrn_lock_loss_workaround_ich8lan");
3137 if (hw->mac.type != e1000_ich8lan) {
3138 DEBUGOUT("Workaround applies to ICH8 only.\n");
3139 return;
3142 dev_spec->kmrn_lock_loss_workaround_enabled = state;
3144 return;
3148 * e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
3149 * @hw: pointer to the HW structure
3151 * Workaround for 82566 power-down on D3 entry:
3152 * 1) disable gigabit link
3153 * 2) write VR power-down enable
3154 * 3) read it back
3155 * Continue if successful, else issue LCD reset and repeat
3157 void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
3159 u32 reg;
3160 u16 data;
3161 u8 retry = 0;
3163 DEBUGFUNC("e1000_igp3_phy_powerdown_workaround_ich8lan");
3165 if (hw->phy.type != e1000_phy_igp_3)
3166 goto out;
3168 /* Try the workaround twice (if needed) */
3169 do {
3170 /* Disable link */
3171 reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
3172 reg |= (E1000_PHY_CTRL_GBE_DISABLE |
3173 E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
3174 E1000_WRITE_REG(hw, E1000_PHY_CTRL, reg);
3177 * Call gig speed drop workaround on Gig disable before
3178 * accessing any PHY registers
3180 if (hw->mac.type == e1000_ich8lan)
3181 e1000_gig_downshift_workaround_ich8lan(hw);
3183 /* Write VR power-down enable */
3184 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
3185 data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
3186 hw->phy.ops.write_reg(hw, IGP3_VR_CTRL,
3187 data | IGP3_VR_CTRL_MODE_SHUTDOWN);
3189 /* Read it back and test */
3190 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
3191 data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
3192 if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
3193 break;
3195 /* Issue PHY reset and repeat at most one more time */
3196 reg = E1000_READ_REG(hw, E1000_CTRL);
3197 E1000_WRITE_REG(hw, E1000_CTRL, reg | E1000_CTRL_PHY_RST);
3198 retry++;
3199 } while (retry);
3201 out:
3202 return;
3206 * e1000_gig_downshift_workaround_ich8lan - WoL from S5 stops working
3207 * @hw: pointer to the HW structure
3209 * Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
3210 * LPLU, Gig disable, MDIC PHY reset):
3211 * 1) Set Kumeran Near-end loopback
3212 * 2) Clear Kumeran Near-end loopback
3213 * Should only be called for ICH8[m] devices with IGP_3 Phy.
3215 void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
3217 s32 ret_val = E1000_SUCCESS;
3218 u16 reg_data;
3220 DEBUGFUNC("e1000_gig_downshift_workaround_ich8lan");
3222 if ((hw->mac.type != e1000_ich8lan) ||
3223 (hw->phy.type != e1000_phy_igp_3))
3224 goto out;
3226 ret_val = e1000_read_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
3227 &reg_data);
3228 if (ret_val)
3229 goto out;
3230 reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
3231 ret_val = e1000_write_kmrn_reg_generic(hw,
3232 E1000_KMRNCTRLSTA_DIAG_OFFSET,
3233 reg_data);
3234 if (ret_val)
3235 goto out;
3236 reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
3237 ret_val = e1000_write_kmrn_reg_generic(hw,
3238 E1000_KMRNCTRLSTA_DIAG_OFFSET,
3239 reg_data);
3240 out:
3241 return;
3245 * e1000_disable_gig_wol_ich8lan - disable gig during WoL
3246 * @hw: pointer to the HW structure
3248 * During S0 to Sx transition, it is possible the link remains at gig
3249 * instead of negotiating to a lower speed. Before going to Sx, set
3250 * 'LPLU Enabled' and 'Gig Disable' to force link speed negotiation
3251 * to a lower speed.
3253 * Should only be called for applicable parts.
3255 void e1000_disable_gig_wol_ich8lan(struct e1000_hw *hw)
3257 u32 phy_ctrl;
3259 switch (hw->mac.type) {
3260 case e1000_ich8lan:
3261 case e1000_ich9lan:
3262 case e1000_ich10lan:
3263 case e1000_pchlan:
3264 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3265 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU |
3266 E1000_PHY_CTRL_GBE_DISABLE;
3267 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3269 if (hw->mac.type == e1000_pchlan)
3270 e1000_phy_hw_reset_ich8lan(hw);
3271 default:
3272 break;
3275 return;
3279 * e1000_cleanup_led_ich8lan - Restore the default LED operation
3280 * @hw: pointer to the HW structure
3282 * Return the LED back to the default configuration.
3284 static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
3286 DEBUGFUNC("e1000_cleanup_led_ich8lan");
3288 if (hw->phy.type == e1000_phy_ife)
3289 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
3292 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
3293 return E1000_SUCCESS;
3297 * e1000_led_on_ich8lan - Turn LEDs on
3298 * @hw: pointer to the HW structure
3300 * Turn on the LEDs.
3302 static s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
3304 DEBUGFUNC("e1000_led_on_ich8lan");
3306 if (hw->phy.type == e1000_phy_ife)
3307 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
3308 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
3310 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
3311 return E1000_SUCCESS;
3315 * e1000_led_off_ich8lan - Turn LEDs off
3316 * @hw: pointer to the HW structure
3318 * Turn off the LEDs.
3320 static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
3322 DEBUGFUNC("e1000_led_off_ich8lan");
3324 if (hw->phy.type == e1000_phy_ife)
3325 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
3326 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
3328 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
3329 return E1000_SUCCESS;
3333 * e1000_setup_led_pchlan - Configures SW controllable LED
3334 * @hw: pointer to the HW structure
3336 * This prepares the SW controllable LED for use.
3338 static s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
3340 DEBUGFUNC("e1000_setup_led_pchlan");
3342 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
3343 (u16)hw->mac.ledctl_mode1);
3347 * e1000_cleanup_led_pchlan - Restore the default LED operation
3348 * @hw: pointer to the HW structure
3350 * Return the LED back to the default configuration.
3352 static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
3354 DEBUGFUNC("e1000_cleanup_led_pchlan");
3356 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
3357 (u16)hw->mac.ledctl_default);
3361 * e1000_led_on_pchlan - Turn LEDs on
3362 * @hw: pointer to the HW structure
3364 * Turn on the LEDs.
3366 static s32 e1000_led_on_pchlan(struct e1000_hw *hw)
3368 u16 data = (u16)hw->mac.ledctl_mode2;
3369 u32 i, led;
3371 DEBUGFUNC("e1000_led_on_pchlan");
3374 * If no link, then turn LED on by setting the invert bit
3375 * for each LED that's mode is "link_up" in ledctl_mode2.
3377 if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
3378 for (i = 0; i < 3; i++) {
3379 led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
3380 if ((led & E1000_PHY_LED0_MODE_MASK) !=
3381 E1000_LEDCTL_MODE_LINK_UP)
3382 continue;
3383 if (led & E1000_PHY_LED0_IVRT)
3384 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
3385 else
3386 data |= (E1000_PHY_LED0_IVRT << (i * 5));
3390 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
3394 * e1000_led_off_pchlan - Turn LEDs off
3395 * @hw: pointer to the HW structure
3397 * Turn off the LEDs.
3399 static s32 e1000_led_off_pchlan(struct e1000_hw *hw)
3401 u16 data = (u16)hw->mac.ledctl_mode1;
3402 u32 i, led;
3404 DEBUGFUNC("e1000_led_off_pchlan");
3407 * If no link, then turn LED off by clearing the invert bit
3408 * for each LED that's mode is "link_up" in ledctl_mode1.
3410 if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
3411 for (i = 0; i < 3; i++) {
3412 led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
3413 if ((led & E1000_PHY_LED0_MODE_MASK) !=
3414 E1000_LEDCTL_MODE_LINK_UP)
3415 continue;
3416 if (led & E1000_PHY_LED0_IVRT)
3417 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
3418 else
3419 data |= (E1000_PHY_LED0_IVRT << (i * 5));
3423 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
3427 * e1000_get_cfg_done_ich8lan - Read config done bit
3428 * @hw: pointer to the HW structure
3430 * Read the management control register for the config done bit for
3431 * completion status. NOTE: silicon which is EEPROM-less will fail trying
3432 * to read the config done bit, so an error is *ONLY* logged and returns
3433 * E1000_SUCCESS. If we were to return with error, EEPROM-less silicon
3434 * would not be able to be reset or change link.
3436 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
3438 s32 ret_val = E1000_SUCCESS;
3439 u32 bank = 0;
3441 if (hw->mac.type >= e1000_pchlan) {
3442 u32 status = E1000_READ_REG(hw, E1000_STATUS);
3444 if (status & E1000_STATUS_PHYRA)
3445 E1000_WRITE_REG(hw, E1000_STATUS, status &
3446 ~E1000_STATUS_PHYRA);
3447 else
3448 DEBUGOUT("PHY Reset Asserted not set - needs delay\n");
3451 e1000_get_cfg_done_generic(hw);
3453 /* If EEPROM is not marked present, init the IGP 3 PHY manually */
3454 if (hw->mac.type <= e1000_ich9lan) {
3455 if (((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0) &&
3456 (hw->phy.type == e1000_phy_igp_3)) {
3457 e1000_phy_init_script_igp3(hw);
3459 } else {
3460 if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
3461 /* Maybe we should do a basic PHY config */
3462 DEBUGOUT("EEPROM not present\n");
3463 ret_val = -E1000_ERR_CONFIG;
3467 return ret_val;
3471 * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down
3472 * @hw: pointer to the HW structure
3474 * In the case of a PHY power down to save power, or to turn off link during a
3475 * driver unload, or wake on lan is not enabled, remove the link.
3477 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
3479 /* If the management interface is not enabled, then power down */
3480 if (!(hw->mac.ops.check_mng_mode(hw) ||
3481 hw->phy.ops.check_reset_block(hw)))
3482 e1000_power_down_phy_copper(hw);
3484 return;
3488 * e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
3489 * @hw: pointer to the HW structure
3491 * Clears hardware counters specific to the silicon family and calls
3492 * clear_hw_cntrs_generic to clear all general purpose counters.
3494 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
3496 u16 phy_data;
3498 DEBUGFUNC("e1000_clear_hw_cntrs_ich8lan");
3500 e1000_clear_hw_cntrs_base_generic(hw);
3502 E1000_READ_REG(hw, E1000_ALGNERRC);
3503 E1000_READ_REG(hw, E1000_RXERRC);
3504 E1000_READ_REG(hw, E1000_TNCRS);
3505 E1000_READ_REG(hw, E1000_CEXTERR);
3506 E1000_READ_REG(hw, E1000_TSCTC);
3507 E1000_READ_REG(hw, E1000_TSCTFC);
3509 E1000_READ_REG(hw, E1000_MGTPRC);
3510 E1000_READ_REG(hw, E1000_MGTPDC);
3511 E1000_READ_REG(hw, E1000_MGTPTC);
3513 E1000_READ_REG(hw, E1000_IAC);
3514 E1000_READ_REG(hw, E1000_ICRXOC);
3516 /* Clear PHY statistics registers */
3517 if ((hw->phy.type == e1000_phy_82578) ||
3518 (hw->phy.type == e1000_phy_82577)) {
3519 hw->phy.ops.read_reg(hw, HV_SCC_UPPER, &phy_data);
3520 hw->phy.ops.read_reg(hw, HV_SCC_LOWER, &phy_data);
3521 hw->phy.ops.read_reg(hw, HV_ECOL_UPPER, &phy_data);
3522 hw->phy.ops.read_reg(hw, HV_ECOL_LOWER, &phy_data);
3523 hw->phy.ops.read_reg(hw, HV_MCC_UPPER, &phy_data);
3524 hw->phy.ops.read_reg(hw, HV_MCC_LOWER, &phy_data);
3525 hw->phy.ops.read_reg(hw, HV_LATECOL_UPPER, &phy_data);
3526 hw->phy.ops.read_reg(hw, HV_LATECOL_LOWER, &phy_data);
3527 hw->phy.ops.read_reg(hw, HV_COLC_UPPER, &phy_data);
3528 hw->phy.ops.read_reg(hw, HV_COLC_LOWER, &phy_data);
3529 hw->phy.ops.read_reg(hw, HV_DC_UPPER, &phy_data);
3530 hw->phy.ops.read_reg(hw, HV_DC_LOWER, &phy_data);
3531 hw->phy.ops.read_reg(hw, HV_TNCRS_UPPER, &phy_data);
3532 hw->phy.ops.read_reg(hw, HV_TNCRS_LOWER, &phy_data);