x86, delay: tsc based udelay should have rdtsc_barrier
[linux-2.6/mini2440.git] / drivers / net / e1000e / lib.c
blobbe6d9e9903741650ff5fe83ff1797d112a4c53ce
1 /*******************************************************************************
3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2008 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *******************************************************************************/
29 #include <linux/netdevice.h>
30 #include <linux/ethtool.h>
31 #include <linux/delay.h>
32 #include <linux/pci.h>
34 #include "e1000.h"
36 enum e1000_mng_mode {
37 e1000_mng_mode_none = 0,
38 e1000_mng_mode_asf,
39 e1000_mng_mode_pt,
40 e1000_mng_mode_ipmi,
41 e1000_mng_mode_host_if_only
44 #define E1000_FACTPS_MNGCG 0x20000000
46 /* Intel(R) Active Management Technology signature */
47 #define E1000_IAMT_SIGNATURE 0x544D4149
49 /**
50 * e1000e_get_bus_info_pcie - Get PCIe bus information
51 * @hw: pointer to the HW structure
53 * Determines and stores the system bus information for a particular
54 * network interface. The following bus information is determined and stored:
55 * bus speed, bus width, type (PCIe), and PCIe function.
56 **/
57 s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw)
59 struct e1000_bus_info *bus = &hw->bus;
60 struct e1000_adapter *adapter = hw->adapter;
61 u32 status;
62 u16 pcie_link_status, pci_header_type, cap_offset;
64 cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
65 if (!cap_offset) {
66 bus->width = e1000_bus_width_unknown;
67 } else {
68 pci_read_config_word(adapter->pdev,
69 cap_offset + PCIE_LINK_STATUS,
70 &pcie_link_status);
71 bus->width = (enum e1000_bus_width)((pcie_link_status &
72 PCIE_LINK_WIDTH_MASK) >>
73 PCIE_LINK_WIDTH_SHIFT);
76 pci_read_config_word(adapter->pdev, PCI_HEADER_TYPE_REGISTER,
77 &pci_header_type);
78 if (pci_header_type & PCI_HEADER_TYPE_MULTIFUNC) {
79 status = er32(STATUS);
80 bus->func = (status & E1000_STATUS_FUNC_MASK)
81 >> E1000_STATUS_FUNC_SHIFT;
82 } else {
83 bus->func = 0;
86 return 0;
89 /**
90 * e1000e_write_vfta - Write value to VLAN filter table
91 * @hw: pointer to the HW structure
92 * @offset: register offset in VLAN filter table
93 * @value: register value written to VLAN filter table
95 * Writes value at the given offset in the register array which stores
96 * the VLAN filter table.
97 **/
98 void e1000e_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
100 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value);
101 e1e_flush();
105 * e1000e_init_rx_addrs - Initialize receive address's
106 * @hw: pointer to the HW structure
107 * @rar_count: receive address registers
109 * Setups the receive address registers by setting the base receive address
110 * register to the devices MAC address and clearing all the other receive
111 * address registers to 0.
113 void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
115 u32 i;
117 /* Setup the receive address */
118 hw_dbg(hw, "Programming MAC Address into RAR[0]\n");
120 e1000e_rar_set(hw, hw->mac.addr, 0);
122 /* Zero out the other (rar_entry_count - 1) receive addresses */
123 hw_dbg(hw, "Clearing RAR[1-%u]\n", rar_count-1);
124 for (i = 1; i < rar_count; i++) {
125 E1000_WRITE_REG_ARRAY(hw, E1000_RA, (i << 1), 0);
126 e1e_flush();
127 E1000_WRITE_REG_ARRAY(hw, E1000_RA, ((i << 1) + 1), 0);
128 e1e_flush();
133 * e1000e_rar_set - Set receive address register
134 * @hw: pointer to the HW structure
135 * @addr: pointer to the receive address
136 * @index: receive address array register
138 * Sets the receive address array register at index to the address passed
139 * in by addr.
141 void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
143 u32 rar_low, rar_high;
146 * HW expects these in little endian so we reverse the byte order
147 * from network order (big endian) to little endian
149 rar_low = ((u32) addr[0] |
150 ((u32) addr[1] << 8) |
151 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
153 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
155 rar_high |= E1000_RAH_AV;
157 E1000_WRITE_REG_ARRAY(hw, E1000_RA, (index << 1), rar_low);
158 E1000_WRITE_REG_ARRAY(hw, E1000_RA, ((index << 1) + 1), rar_high);
162 * e1000_hash_mc_addr - Generate a multicast hash value
163 * @hw: pointer to the HW structure
164 * @mc_addr: pointer to a multicast address
166 * Generates a multicast address hash value which is used to determine
167 * the multicast filter table array address and new table value. See
168 * e1000_mta_set_generic()
170 static u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
172 u32 hash_value, hash_mask;
173 u8 bit_shift = 0;
175 /* Register count multiplied by bits per register */
176 hash_mask = (hw->mac.mta_reg_count * 32) - 1;
179 * For a mc_filter_type of 0, bit_shift is the number of left-shifts
180 * where 0xFF would still fall within the hash mask.
182 while (hash_mask >> bit_shift != 0xFF)
183 bit_shift++;
186 * The portion of the address that is used for the hash table
187 * is determined by the mc_filter_type setting.
188 * The algorithm is such that there is a total of 8 bits of shifting.
189 * The bit_shift for a mc_filter_type of 0 represents the number of
190 * left-shifts where the MSB of mc_addr[5] would still fall within
191 * the hash_mask. Case 0 does this exactly. Since there are a total
192 * of 8 bits of shifting, then mc_addr[4] will shift right the
193 * remaining number of bits. Thus 8 - bit_shift. The rest of the
194 * cases are a variation of this algorithm...essentially raising the
195 * number of bits to shift mc_addr[5] left, while still keeping the
196 * 8-bit shifting total.
198 * For example, given the following Destination MAC Address and an
199 * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask),
200 * we can see that the bit_shift for case 0 is 4. These are the hash
201 * values resulting from each mc_filter_type...
202 * [0] [1] [2] [3] [4] [5]
203 * 01 AA 00 12 34 56
204 * LSB MSB
206 * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563
207 * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6
208 * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163
209 * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634
211 switch (hw->mac.mc_filter_type) {
212 default:
213 case 0:
214 break;
215 case 1:
216 bit_shift += 1;
217 break;
218 case 2:
219 bit_shift += 2;
220 break;
221 case 3:
222 bit_shift += 4;
223 break;
226 hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
227 (((u16) mc_addr[5]) << bit_shift)));
229 return hash_value;
233 * e1000e_update_mc_addr_list_generic - Update Multicast addresses
234 * @hw: pointer to the HW structure
235 * @mc_addr_list: array of multicast addresses to program
236 * @mc_addr_count: number of multicast addresses to program
237 * @rar_used_count: the first RAR register free to program
238 * @rar_count: total number of supported Receive Address Registers
240 * Updates the Receive Address Registers and Multicast Table Array.
241 * The caller must have a packed mc_addr_list of multicast addresses.
242 * The parameter rar_count will usually be hw->mac.rar_entry_count
243 * unless there are workarounds that change this.
245 void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
246 u8 *mc_addr_list, u32 mc_addr_count,
247 u32 rar_used_count, u32 rar_count)
249 u32 i;
250 u32 *mcarray = kzalloc(hw->mac.mta_reg_count * sizeof(u32), GFP_ATOMIC);
252 if (!mcarray) {
253 printk(KERN_ERR "multicast array memory allocation failed\n");
254 return;
258 * Load the first set of multicast addresses into the exact
259 * filters (RAR). If there are not enough to fill the RAR
260 * array, clear the filters.
262 for (i = rar_used_count; i < rar_count; i++) {
263 if (mc_addr_count) {
264 e1000e_rar_set(hw, mc_addr_list, i);
265 mc_addr_count--;
266 mc_addr_list += ETH_ALEN;
267 } else {
268 E1000_WRITE_REG_ARRAY(hw, E1000_RA, i << 1, 0);
269 e1e_flush();
270 E1000_WRITE_REG_ARRAY(hw, E1000_RA, (i << 1) + 1, 0);
271 e1e_flush();
275 /* Load any remaining multicast addresses into the hash table. */
276 for (; mc_addr_count > 0; mc_addr_count--) {
277 u32 hash_value, hash_reg, hash_bit, mta;
278 hash_value = e1000_hash_mc_addr(hw, mc_addr_list);
279 hw_dbg(hw, "Hash value = 0x%03X\n", hash_value);
280 hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
281 hash_bit = hash_value & 0x1F;
282 mta = (1 << hash_bit);
283 mcarray[hash_reg] |= mta;
284 mc_addr_list += ETH_ALEN;
287 /* write the hash table completely */
288 for (i = 0; i < hw->mac.mta_reg_count; i++)
289 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, mcarray[i]);
291 e1e_flush();
292 kfree(mcarray);
296 * e1000e_clear_hw_cntrs_base - Clear base hardware counters
297 * @hw: pointer to the HW structure
299 * Clears the base hardware counters by reading the counter registers.
301 void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw)
303 u32 temp;
305 temp = er32(CRCERRS);
306 temp = er32(SYMERRS);
307 temp = er32(MPC);
308 temp = er32(SCC);
309 temp = er32(ECOL);
310 temp = er32(MCC);
311 temp = er32(LATECOL);
312 temp = er32(COLC);
313 temp = er32(DC);
314 temp = er32(SEC);
315 temp = er32(RLEC);
316 temp = er32(XONRXC);
317 temp = er32(XONTXC);
318 temp = er32(XOFFRXC);
319 temp = er32(XOFFTXC);
320 temp = er32(FCRUC);
321 temp = er32(GPRC);
322 temp = er32(BPRC);
323 temp = er32(MPRC);
324 temp = er32(GPTC);
325 temp = er32(GORCL);
326 temp = er32(GORCH);
327 temp = er32(GOTCL);
328 temp = er32(GOTCH);
329 temp = er32(RNBC);
330 temp = er32(RUC);
331 temp = er32(RFC);
332 temp = er32(ROC);
333 temp = er32(RJC);
334 temp = er32(TORL);
335 temp = er32(TORH);
336 temp = er32(TOTL);
337 temp = er32(TOTH);
338 temp = er32(TPR);
339 temp = er32(TPT);
340 temp = er32(MPTC);
341 temp = er32(BPTC);
345 * e1000e_check_for_copper_link - Check for link (Copper)
346 * @hw: pointer to the HW structure
348 * Checks to see of the link status of the hardware has changed. If a
349 * change in link status has been detected, then we read the PHY registers
350 * to get the current speed/duplex if link exists.
352 s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
354 struct e1000_mac_info *mac = &hw->mac;
355 s32 ret_val;
356 bool link;
359 * We only want to go out to the PHY registers to see if Auto-Neg
360 * has completed and/or if our link status has changed. The
361 * get_link_status flag is set upon receiving a Link Status
362 * Change or Rx Sequence Error interrupt.
364 if (!mac->get_link_status)
365 return 0;
368 * First we want to see if the MII Status Register reports
369 * link. If so, then we want to get the current speed/duplex
370 * of the PHY.
372 ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
373 if (ret_val)
374 return ret_val;
376 if (!link)
377 return ret_val; /* No link detected */
379 mac->get_link_status = 0;
381 if (hw->phy.type == e1000_phy_82578) {
382 ret_val = e1000_link_stall_workaround_hv(hw);
383 if (ret_val)
384 return ret_val;
388 * Check if there was DownShift, must be checked
389 * immediately after link-up
391 e1000e_check_downshift(hw);
394 * If we are forcing speed/duplex, then we simply return since
395 * we have already determined whether we have link or not.
397 if (!mac->autoneg) {
398 ret_val = -E1000_ERR_CONFIG;
399 return ret_val;
403 * Auto-Neg is enabled. Auto Speed Detection takes care
404 * of MAC speed/duplex configuration. So we only need to
405 * configure Collision Distance in the MAC.
407 e1000e_config_collision_dist(hw);
410 * Configure Flow Control now that Auto-Neg has completed.
411 * First, we need to restore the desired flow control
412 * settings because we may have had to re-autoneg with a
413 * different link partner.
415 ret_val = e1000e_config_fc_after_link_up(hw);
416 if (ret_val) {
417 hw_dbg(hw, "Error configuring flow control\n");
420 return ret_val;
424 * e1000e_check_for_fiber_link - Check for link (Fiber)
425 * @hw: pointer to the HW structure
427 * Checks for link up on the hardware. If link is not up and we have
428 * a signal, then we need to force link up.
430 s32 e1000e_check_for_fiber_link(struct e1000_hw *hw)
432 struct e1000_mac_info *mac = &hw->mac;
433 u32 rxcw;
434 u32 ctrl;
435 u32 status;
436 s32 ret_val;
438 ctrl = er32(CTRL);
439 status = er32(STATUS);
440 rxcw = er32(RXCW);
443 * If we don't have link (auto-negotiation failed or link partner
444 * cannot auto-negotiate), the cable is plugged in (we have signal),
445 * and our link partner is not trying to auto-negotiate with us (we
446 * are receiving idles or data), we need to force link up. We also
447 * need to give auto-negotiation time to complete, in case the cable
448 * was just plugged in. The autoneg_failed flag does this.
450 /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */
451 if ((ctrl & E1000_CTRL_SWDPIN1) && (!(status & E1000_STATUS_LU)) &&
452 (!(rxcw & E1000_RXCW_C))) {
453 if (mac->autoneg_failed == 0) {
454 mac->autoneg_failed = 1;
455 return 0;
457 hw_dbg(hw, "NOT RXing /C/, disable AutoNeg and force link.\n");
459 /* Disable auto-negotiation in the TXCW register */
460 ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE));
462 /* Force link-up and also force full-duplex. */
463 ctrl = er32(CTRL);
464 ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
465 ew32(CTRL, ctrl);
467 /* Configure Flow Control after forcing link up. */
468 ret_val = e1000e_config_fc_after_link_up(hw);
469 if (ret_val) {
470 hw_dbg(hw, "Error configuring flow control\n");
471 return ret_val;
473 } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
475 * If we are forcing link and we are receiving /C/ ordered
476 * sets, re-enable auto-negotiation in the TXCW register
477 * and disable forced link in the Device Control register
478 * in an attempt to auto-negotiate with our link partner.
480 hw_dbg(hw, "RXing /C/, enable AutoNeg and stop forcing link.\n");
481 ew32(TXCW, mac->txcw);
482 ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
484 mac->serdes_has_link = true;
487 return 0;
491 * e1000e_check_for_serdes_link - Check for link (Serdes)
492 * @hw: pointer to the HW structure
494 * Checks for link up on the hardware. If link is not up and we have
495 * a signal, then we need to force link up.
497 s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
499 struct e1000_mac_info *mac = &hw->mac;
500 u32 rxcw;
501 u32 ctrl;
502 u32 status;
503 s32 ret_val;
505 ctrl = er32(CTRL);
506 status = er32(STATUS);
507 rxcw = er32(RXCW);
510 * If we don't have link (auto-negotiation failed or link partner
511 * cannot auto-negotiate), and our link partner is not trying to
512 * auto-negotiate with us (we are receiving idles or data),
513 * we need to force link up. We also need to give auto-negotiation
514 * time to complete.
516 /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */
517 if ((!(status & E1000_STATUS_LU)) && (!(rxcw & E1000_RXCW_C))) {
518 if (mac->autoneg_failed == 0) {
519 mac->autoneg_failed = 1;
520 return 0;
522 hw_dbg(hw, "NOT RXing /C/, disable AutoNeg and force link.\n");
524 /* Disable auto-negotiation in the TXCW register */
525 ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE));
527 /* Force link-up and also force full-duplex. */
528 ctrl = er32(CTRL);
529 ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
530 ew32(CTRL, ctrl);
532 /* Configure Flow Control after forcing link up. */
533 ret_val = e1000e_config_fc_after_link_up(hw);
534 if (ret_val) {
535 hw_dbg(hw, "Error configuring flow control\n");
536 return ret_val;
538 } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
540 * If we are forcing link and we are receiving /C/ ordered
541 * sets, re-enable auto-negotiation in the TXCW register
542 * and disable forced link in the Device Control register
543 * in an attempt to auto-negotiate with our link partner.
545 hw_dbg(hw, "RXing /C/, enable AutoNeg and stop forcing link.\n");
546 ew32(TXCW, mac->txcw);
547 ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
549 mac->serdes_has_link = true;
550 } else if (!(E1000_TXCW_ANE & er32(TXCW))) {
552 * If we force link for non-auto-negotiation switch, check
553 * link status based on MAC synchronization for internal
554 * serdes media type.
556 /* SYNCH bit and IV bit are sticky. */
557 udelay(10);
558 rxcw = er32(RXCW);
559 if (rxcw & E1000_RXCW_SYNCH) {
560 if (!(rxcw & E1000_RXCW_IV)) {
561 mac->serdes_has_link = true;
562 hw_dbg(hw, "SERDES: Link up - forced.\n");
564 } else {
565 mac->serdes_has_link = false;
566 hw_dbg(hw, "SERDES: Link down - force failed.\n");
570 if (E1000_TXCW_ANE & er32(TXCW)) {
571 status = er32(STATUS);
572 if (status & E1000_STATUS_LU) {
573 /* SYNCH bit and IV bit are sticky, so reread rxcw. */
574 udelay(10);
575 rxcw = er32(RXCW);
576 if (rxcw & E1000_RXCW_SYNCH) {
577 if (!(rxcw & E1000_RXCW_IV)) {
578 mac->serdes_has_link = true;
579 hw_dbg(hw, "SERDES: Link up - autoneg "
580 "completed sucessfully.\n");
581 } else {
582 mac->serdes_has_link = false;
583 hw_dbg(hw, "SERDES: Link down - invalid"
584 "codewords detected in autoneg.\n");
586 } else {
587 mac->serdes_has_link = false;
588 hw_dbg(hw, "SERDES: Link down - no sync.\n");
590 } else {
591 mac->serdes_has_link = false;
592 hw_dbg(hw, "SERDES: Link down - autoneg failed\n");
596 return 0;
600 * e1000_set_default_fc_generic - Set flow control default values
601 * @hw: pointer to the HW structure
603 * Read the EEPROM for the default values for flow control and store the
604 * values.
606 static s32 e1000_set_default_fc_generic(struct e1000_hw *hw)
608 s32 ret_val;
609 u16 nvm_data;
612 * Read and store word 0x0F of the EEPROM. This word contains bits
613 * that determine the hardware's default PAUSE (flow control) mode,
614 * a bit that determines whether the HW defaults to enabling or
615 * disabling auto-negotiation, and the direction of the
616 * SW defined pins. If there is no SW over-ride of the flow
617 * control setting, then the variable hw->fc will
618 * be initialized based on a value in the EEPROM.
620 ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data);
622 if (ret_val) {
623 hw_dbg(hw, "NVM Read Error\n");
624 return ret_val;
627 if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0)
628 hw->fc.requested_mode = e1000_fc_none;
629 else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) ==
630 NVM_WORD0F_ASM_DIR)
631 hw->fc.requested_mode = e1000_fc_tx_pause;
632 else
633 hw->fc.requested_mode = e1000_fc_full;
635 return 0;
639 * e1000e_setup_link - Setup flow control and link settings
640 * @hw: pointer to the HW structure
642 * Determines which flow control settings to use, then configures flow
643 * control. Calls the appropriate media-specific link configuration
644 * function. Assuming the adapter has a valid link partner, a valid link
645 * should be established. Assumes the hardware has previously been reset
646 * and the transmitter and receiver are not enabled.
648 s32 e1000e_setup_link(struct e1000_hw *hw)
650 struct e1000_mac_info *mac = &hw->mac;
651 s32 ret_val;
654 * In the case of the phy reset being blocked, we already have a link.
655 * We do not need to set it up again.
657 if (e1000_check_reset_block(hw))
658 return 0;
661 * If requested flow control is set to default, set flow control
662 * based on the EEPROM flow control settings.
664 if (hw->fc.requested_mode == e1000_fc_default) {
665 ret_val = e1000_set_default_fc_generic(hw);
666 if (ret_val)
667 return ret_val;
671 * Save off the requested flow control mode for use later. Depending
672 * on the link partner's capabilities, we may or may not use this mode.
674 hw->fc.current_mode = hw->fc.requested_mode;
676 hw_dbg(hw, "After fix-ups FlowControl is now = %x\n",
677 hw->fc.current_mode);
679 /* Call the necessary media_type subroutine to configure the link. */
680 ret_val = mac->ops.setup_physical_interface(hw);
681 if (ret_val)
682 return ret_val;
685 * Initialize the flow control address, type, and PAUSE timer
686 * registers to their default values. This is done even if flow
687 * control is disabled, because it does not hurt anything to
688 * initialize these registers.
690 hw_dbg(hw, "Initializing the Flow Control address, type and timer regs\n");
691 ew32(FCT, FLOW_CONTROL_TYPE);
692 ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH);
693 ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW);
695 ew32(FCTTV, hw->fc.pause_time);
697 return e1000e_set_fc_watermarks(hw);
701 * e1000_commit_fc_settings_generic - Configure flow control
702 * @hw: pointer to the HW structure
704 * Write the flow control settings to the Transmit Config Word Register (TXCW)
705 * base on the flow control settings in e1000_mac_info.
707 static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw)
709 struct e1000_mac_info *mac = &hw->mac;
710 u32 txcw;
713 * Check for a software override of the flow control settings, and
714 * setup the device accordingly. If auto-negotiation is enabled, then
715 * software will have to set the "PAUSE" bits to the correct value in
716 * the Transmit Config Word Register (TXCW) and re-start auto-
717 * negotiation. However, if auto-negotiation is disabled, then
718 * software will have to manually configure the two flow control enable
719 * bits in the CTRL register.
721 * The possible values of the "fc" parameter are:
722 * 0: Flow control is completely disabled
723 * 1: Rx flow control is enabled (we can receive pause frames,
724 * but not send pause frames).
725 * 2: Tx flow control is enabled (we can send pause frames but we
726 * do not support receiving pause frames).
727 * 3: Both Rx and Tx flow control (symmetric) are enabled.
729 switch (hw->fc.current_mode) {
730 case e1000_fc_none:
731 /* Flow control completely disabled by a software over-ride. */
732 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD);
733 break;
734 case e1000_fc_rx_pause:
736 * Rx Flow control is enabled and Tx Flow control is disabled
737 * by a software over-ride. Since there really isn't a way to
738 * advertise that we are capable of Rx Pause ONLY, we will
739 * advertise that we support both symmetric and asymmetric Rx
740 * PAUSE. Later, we will disable the adapter's ability to send
741 * PAUSE frames.
743 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
744 break;
745 case e1000_fc_tx_pause:
747 * Tx Flow control is enabled, and Rx Flow control is disabled,
748 * by a software over-ride.
750 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR);
751 break;
752 case e1000_fc_full:
754 * Flow control (both Rx and Tx) is enabled by a software
755 * over-ride.
757 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
758 break;
759 default:
760 hw_dbg(hw, "Flow control param set incorrectly\n");
761 return -E1000_ERR_CONFIG;
762 break;
765 ew32(TXCW, txcw);
766 mac->txcw = txcw;
768 return 0;
772 * e1000_poll_fiber_serdes_link_generic - Poll for link up
773 * @hw: pointer to the HW structure
775 * Polls for link up by reading the status register, if link fails to come
776 * up with auto-negotiation, then the link is forced if a signal is detected.
778 static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw)
780 struct e1000_mac_info *mac = &hw->mac;
781 u32 i, status;
782 s32 ret_val;
785 * If we have a signal (the cable is plugged in, or assumed true for
786 * serdes media) then poll for a "Link-Up" indication in the Device
787 * Status Register. Time-out if a link isn't seen in 500 milliseconds
788 * seconds (Auto-negotiation should complete in less than 500
789 * milliseconds even if the other end is doing it in SW).
791 for (i = 0; i < FIBER_LINK_UP_LIMIT; i++) {
792 msleep(10);
793 status = er32(STATUS);
794 if (status & E1000_STATUS_LU)
795 break;
797 if (i == FIBER_LINK_UP_LIMIT) {
798 hw_dbg(hw, "Never got a valid link from auto-neg!!!\n");
799 mac->autoneg_failed = 1;
801 * AutoNeg failed to achieve a link, so we'll call
802 * mac->check_for_link. This routine will force the
803 * link up if we detect a signal. This will allow us to
804 * communicate with non-autonegotiating link partners.
806 ret_val = mac->ops.check_for_link(hw);
807 if (ret_val) {
808 hw_dbg(hw, "Error while checking for link\n");
809 return ret_val;
811 mac->autoneg_failed = 0;
812 } else {
813 mac->autoneg_failed = 0;
814 hw_dbg(hw, "Valid Link Found\n");
817 return 0;
821 * e1000e_setup_fiber_serdes_link - Setup link for fiber/serdes
822 * @hw: pointer to the HW structure
824 * Configures collision distance and flow control for fiber and serdes
825 * links. Upon successful setup, poll for link.
827 s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw)
829 u32 ctrl;
830 s32 ret_val;
832 ctrl = er32(CTRL);
834 /* Take the link out of reset */
835 ctrl &= ~E1000_CTRL_LRST;
837 e1000e_config_collision_dist(hw);
839 ret_val = e1000_commit_fc_settings_generic(hw);
840 if (ret_val)
841 return ret_val;
844 * Since auto-negotiation is enabled, take the link out of reset (the
845 * link will be in reset, because we previously reset the chip). This
846 * will restart auto-negotiation. If auto-negotiation is successful
847 * then the link-up status bit will be set and the flow control enable
848 * bits (RFCE and TFCE) will be set according to their negotiated value.
850 hw_dbg(hw, "Auto-negotiation enabled\n");
852 ew32(CTRL, ctrl);
853 e1e_flush();
854 msleep(1);
857 * For these adapters, the SW definable pin 1 is set when the optics
858 * detect a signal. If we have a signal, then poll for a "Link-Up"
859 * indication.
861 if (hw->phy.media_type == e1000_media_type_internal_serdes ||
862 (er32(CTRL) & E1000_CTRL_SWDPIN1)) {
863 ret_val = e1000_poll_fiber_serdes_link_generic(hw);
864 } else {
865 hw_dbg(hw, "No signal detected\n");
868 return 0;
872 * e1000e_config_collision_dist - Configure collision distance
873 * @hw: pointer to the HW structure
875 * Configures the collision distance to the default value and is used
876 * during link setup. Currently no func pointer exists and all
877 * implementations are handled in the generic version of this function.
879 void e1000e_config_collision_dist(struct e1000_hw *hw)
881 u32 tctl;
883 tctl = er32(TCTL);
885 tctl &= ~E1000_TCTL_COLD;
886 tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT;
888 ew32(TCTL, tctl);
889 e1e_flush();
893 * e1000e_set_fc_watermarks - Set flow control high/low watermarks
894 * @hw: pointer to the HW structure
896 * Sets the flow control high/low threshold (watermark) registers. If
897 * flow control XON frame transmission is enabled, then set XON frame
898 * transmission as well.
900 s32 e1000e_set_fc_watermarks(struct e1000_hw *hw)
902 u32 fcrtl = 0, fcrth = 0;
905 * Set the flow control receive threshold registers. Normally,
906 * these registers will be set to a default threshold that may be
907 * adjusted later by the driver's runtime code. However, if the
908 * ability to transmit pause frames is not enabled, then these
909 * registers will be set to 0.
911 if (hw->fc.current_mode & e1000_fc_tx_pause) {
913 * We need to set up the Receive Threshold high and low water
914 * marks as well as (optionally) enabling the transmission of
915 * XON frames.
917 fcrtl = hw->fc.low_water;
918 fcrtl |= E1000_FCRTL_XONE;
919 fcrth = hw->fc.high_water;
921 ew32(FCRTL, fcrtl);
922 ew32(FCRTH, fcrth);
924 return 0;
928 * e1000e_force_mac_fc - Force the MAC's flow control settings
929 * @hw: pointer to the HW structure
931 * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the
932 * device control register to reflect the adapter settings. TFCE and RFCE
933 * need to be explicitly set by software when a copper PHY is used because
934 * autonegotiation is managed by the PHY rather than the MAC. Software must
935 * also configure these bits when link is forced on a fiber connection.
937 s32 e1000e_force_mac_fc(struct e1000_hw *hw)
939 u32 ctrl;
941 ctrl = er32(CTRL);
944 * Because we didn't get link via the internal auto-negotiation
945 * mechanism (we either forced link or we got link via PHY
946 * auto-neg), we have to manually enable/disable transmit an
947 * receive flow control.
949 * The "Case" statement below enables/disable flow control
950 * according to the "hw->fc.current_mode" parameter.
952 * The possible values of the "fc" parameter are:
953 * 0: Flow control is completely disabled
954 * 1: Rx flow control is enabled (we can receive pause
955 * frames but not send pause frames).
956 * 2: Tx flow control is enabled (we can send pause frames
957 * frames but we do not receive pause frames).
958 * 3: Both Rx and Tx flow control (symmetric) is enabled.
959 * other: No other values should be possible at this point.
961 hw_dbg(hw, "hw->fc.current_mode = %u\n", hw->fc.current_mode);
963 switch (hw->fc.current_mode) {
964 case e1000_fc_none:
965 ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE));
966 break;
967 case e1000_fc_rx_pause:
968 ctrl &= (~E1000_CTRL_TFCE);
969 ctrl |= E1000_CTRL_RFCE;
970 break;
971 case e1000_fc_tx_pause:
972 ctrl &= (~E1000_CTRL_RFCE);
973 ctrl |= E1000_CTRL_TFCE;
974 break;
975 case e1000_fc_full:
976 ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
977 break;
978 default:
979 hw_dbg(hw, "Flow control param set incorrectly\n");
980 return -E1000_ERR_CONFIG;
983 ew32(CTRL, ctrl);
985 return 0;
989 * e1000e_config_fc_after_link_up - Configures flow control after link
990 * @hw: pointer to the HW structure
992 * Checks the status of auto-negotiation after link up to ensure that the
993 * speed and duplex were not forced. If the link needed to be forced, then
994 * flow control needs to be forced also. If auto-negotiation is enabled
995 * and did not fail, then we configure flow control based on our link
996 * partner.
998 s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
1000 struct e1000_mac_info *mac = &hw->mac;
1001 s32 ret_val = 0;
1002 u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
1003 u16 speed, duplex;
1006 * Check for the case where we have fiber media and auto-neg failed
1007 * so we had to force link. In this case, we need to force the
1008 * configuration of the MAC to match the "fc" parameter.
1010 if (mac->autoneg_failed) {
1011 if (hw->phy.media_type == e1000_media_type_fiber ||
1012 hw->phy.media_type == e1000_media_type_internal_serdes)
1013 ret_val = e1000e_force_mac_fc(hw);
1014 } else {
1015 if (hw->phy.media_type == e1000_media_type_copper)
1016 ret_val = e1000e_force_mac_fc(hw);
1019 if (ret_val) {
1020 hw_dbg(hw, "Error forcing flow control settings\n");
1021 return ret_val;
1025 * Check for the case where we have copper media and auto-neg is
1026 * enabled. In this case, we need to check and see if Auto-Neg
1027 * has completed, and if so, how the PHY and link partner has
1028 * flow control configured.
1030 if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) {
1032 * Read the MII Status Register and check to see if AutoNeg
1033 * has completed. We read this twice because this reg has
1034 * some "sticky" (latched) bits.
1036 ret_val = e1e_rphy(hw, PHY_STATUS, &mii_status_reg);
1037 if (ret_val)
1038 return ret_val;
1039 ret_val = e1e_rphy(hw, PHY_STATUS, &mii_status_reg);
1040 if (ret_val)
1041 return ret_val;
1043 if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
1044 hw_dbg(hw, "Copper PHY and Auto Neg "
1045 "has not completed.\n");
1046 return ret_val;
1050 * The AutoNeg process has completed, so we now need to
1051 * read both the Auto Negotiation Advertisement
1052 * Register (Address 4) and the Auto_Negotiation Base
1053 * Page Ability Register (Address 5) to determine how
1054 * flow control was negotiated.
1056 ret_val = e1e_rphy(hw, PHY_AUTONEG_ADV, &mii_nway_adv_reg);
1057 if (ret_val)
1058 return ret_val;
1059 ret_val = e1e_rphy(hw, PHY_LP_ABILITY, &mii_nway_lp_ability_reg);
1060 if (ret_val)
1061 return ret_val;
1064 * Two bits in the Auto Negotiation Advertisement Register
1065 * (Address 4) and two bits in the Auto Negotiation Base
1066 * Page Ability Register (Address 5) determine flow control
1067 * for both the PHY and the link partner. The following
1068 * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
1069 * 1999, describes these PAUSE resolution bits and how flow
1070 * control is determined based upon these settings.
1071 * NOTE: DC = Don't Care
1073 * LOCAL DEVICE | LINK PARTNER
1074 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
1075 *-------|---------|-------|---------|--------------------
1076 * 0 | 0 | DC | DC | e1000_fc_none
1077 * 0 | 1 | 0 | DC | e1000_fc_none
1078 * 0 | 1 | 1 | 0 | e1000_fc_none
1079 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
1080 * 1 | 0 | 0 | DC | e1000_fc_none
1081 * 1 | DC | 1 | DC | e1000_fc_full
1082 * 1 | 1 | 0 | 0 | e1000_fc_none
1083 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
1086 * Are both PAUSE bits set to 1? If so, this implies
1087 * Symmetric Flow Control is enabled at both ends. The
1088 * ASM_DIR bits are irrelevant per the spec.
1090 * For Symmetric Flow Control:
1092 * LOCAL DEVICE | LINK PARTNER
1093 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
1094 *-------|---------|-------|---------|--------------------
1095 * 1 | DC | 1 | DC | E1000_fc_full
1098 if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
1099 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
1101 * Now we need to check if the user selected Rx ONLY
1102 * of pause frames. In this case, we had to advertise
1103 * FULL flow control because we could not advertise Rx
1104 * ONLY. Hence, we must now check to see if we need to
1105 * turn OFF the TRANSMISSION of PAUSE frames.
1107 if (hw->fc.requested_mode == e1000_fc_full) {
1108 hw->fc.current_mode = e1000_fc_full;
1109 hw_dbg(hw, "Flow Control = FULL.\r\n");
1110 } else {
1111 hw->fc.current_mode = e1000_fc_rx_pause;
1112 hw_dbg(hw, "Flow Control = "
1113 "RX PAUSE frames only.\r\n");
1117 * For receiving PAUSE frames ONLY.
1119 * LOCAL DEVICE | LINK PARTNER
1120 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
1121 *-------|---------|-------|---------|--------------------
1122 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
1125 else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
1126 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
1127 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
1128 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
1129 hw->fc.current_mode = e1000_fc_tx_pause;
1130 hw_dbg(hw, "Flow Control = Tx PAUSE frames only.\r\n");
1133 * For transmitting PAUSE frames ONLY.
1135 * LOCAL DEVICE | LINK PARTNER
1136 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
1137 *-------|---------|-------|---------|--------------------
1138 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
1141 else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
1142 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
1143 !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
1144 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
1145 hw->fc.current_mode = e1000_fc_rx_pause;
1146 hw_dbg(hw, "Flow Control = Rx PAUSE frames only.\r\n");
1147 } else {
1149 * Per the IEEE spec, at this point flow control
1150 * should be disabled.
1152 hw->fc.current_mode = e1000_fc_none;
1153 hw_dbg(hw, "Flow Control = NONE.\r\n");
1157 * Now we need to do one last check... If we auto-
1158 * negotiated to HALF DUPLEX, flow control should not be
1159 * enabled per IEEE 802.3 spec.
1161 ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex);
1162 if (ret_val) {
1163 hw_dbg(hw, "Error getting link speed and duplex\n");
1164 return ret_val;
1167 if (duplex == HALF_DUPLEX)
1168 hw->fc.current_mode = e1000_fc_none;
1171 * Now we call a subroutine to actually force the MAC
1172 * controller to use the correct flow control settings.
1174 ret_val = e1000e_force_mac_fc(hw);
1175 if (ret_val) {
1176 hw_dbg(hw, "Error forcing flow control settings\n");
1177 return ret_val;
1181 return 0;
1185 * e1000e_get_speed_and_duplex_copper - Retrieve current speed/duplex
1186 * @hw: pointer to the HW structure
1187 * @speed: stores the current speed
1188 * @duplex: stores the current duplex
1190 * Read the status register for the current speed/duplex and store the current
1191 * speed and duplex for copper connections.
1193 s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *duplex)
1195 u32 status;
1197 status = er32(STATUS);
1198 if (status & E1000_STATUS_SPEED_1000) {
1199 *speed = SPEED_1000;
1200 hw_dbg(hw, "1000 Mbs, ");
1201 } else if (status & E1000_STATUS_SPEED_100) {
1202 *speed = SPEED_100;
1203 hw_dbg(hw, "100 Mbs, ");
1204 } else {
1205 *speed = SPEED_10;
1206 hw_dbg(hw, "10 Mbs, ");
1209 if (status & E1000_STATUS_FD) {
1210 *duplex = FULL_DUPLEX;
1211 hw_dbg(hw, "Full Duplex\n");
1212 } else {
1213 *duplex = HALF_DUPLEX;
1214 hw_dbg(hw, "Half Duplex\n");
1217 return 0;
1221 * e1000e_get_speed_and_duplex_fiber_serdes - Retrieve current speed/duplex
1222 * @hw: pointer to the HW structure
1223 * @speed: stores the current speed
1224 * @duplex: stores the current duplex
1226 * Sets the speed and duplex to gigabit full duplex (the only possible option)
1227 * for fiber/serdes links.
1229 s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw, u16 *speed, u16 *duplex)
1231 *speed = SPEED_1000;
1232 *duplex = FULL_DUPLEX;
1234 return 0;
1238 * e1000e_get_hw_semaphore - Acquire hardware semaphore
1239 * @hw: pointer to the HW structure
1241 * Acquire the HW semaphore to access the PHY or NVM
1243 s32 e1000e_get_hw_semaphore(struct e1000_hw *hw)
1245 u32 swsm;
1246 s32 timeout = hw->nvm.word_size + 1;
1247 s32 i = 0;
1249 /* Get the SW semaphore */
1250 while (i < timeout) {
1251 swsm = er32(SWSM);
1252 if (!(swsm & E1000_SWSM_SMBI))
1253 break;
1255 udelay(50);
1256 i++;
1259 if (i == timeout) {
1260 hw_dbg(hw, "Driver can't access device - SMBI bit is set.\n");
1261 return -E1000_ERR_NVM;
1264 /* Get the FW semaphore. */
1265 for (i = 0; i < timeout; i++) {
1266 swsm = er32(SWSM);
1267 ew32(SWSM, swsm | E1000_SWSM_SWESMBI);
1269 /* Semaphore acquired if bit latched */
1270 if (er32(SWSM) & E1000_SWSM_SWESMBI)
1271 break;
1273 udelay(50);
1276 if (i == timeout) {
1277 /* Release semaphores */
1278 e1000e_put_hw_semaphore(hw);
1279 hw_dbg(hw, "Driver can't access the NVM\n");
1280 return -E1000_ERR_NVM;
1283 return 0;
1287 * e1000e_put_hw_semaphore - Release hardware semaphore
1288 * @hw: pointer to the HW structure
1290 * Release hardware semaphore used to access the PHY or NVM
1292 void e1000e_put_hw_semaphore(struct e1000_hw *hw)
1294 u32 swsm;
1296 swsm = er32(SWSM);
1297 swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
1298 ew32(SWSM, swsm);
1302 * e1000e_get_auto_rd_done - Check for auto read completion
1303 * @hw: pointer to the HW structure
1305 * Check EEPROM for Auto Read done bit.
1307 s32 e1000e_get_auto_rd_done(struct e1000_hw *hw)
1309 s32 i = 0;
1311 while (i < AUTO_READ_DONE_TIMEOUT) {
1312 if (er32(EECD) & E1000_EECD_AUTO_RD)
1313 break;
1314 msleep(1);
1315 i++;
1318 if (i == AUTO_READ_DONE_TIMEOUT) {
1319 hw_dbg(hw, "Auto read by HW from NVM has not completed.\n");
1320 return -E1000_ERR_RESET;
1323 return 0;
1327 * e1000e_valid_led_default - Verify a valid default LED config
1328 * @hw: pointer to the HW structure
1329 * @data: pointer to the NVM (EEPROM)
1331 * Read the EEPROM for the current default LED configuration. If the
1332 * LED configuration is not valid, set to a valid LED configuration.
1334 s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data)
1336 s32 ret_val;
1338 ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data);
1339 if (ret_val) {
1340 hw_dbg(hw, "NVM Read Error\n");
1341 return ret_val;
1344 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
1345 *data = ID_LED_DEFAULT;
1347 return 0;
1351 * e1000e_id_led_init -
1352 * @hw: pointer to the HW structure
1355 s32 e1000e_id_led_init(struct e1000_hw *hw)
1357 struct e1000_mac_info *mac = &hw->mac;
1358 s32 ret_val;
1359 const u32 ledctl_mask = 0x000000FF;
1360 const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON;
1361 const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF;
1362 u16 data, i, temp;
1363 const u16 led_mask = 0x0F;
1365 ret_val = hw->nvm.ops.valid_led_default(hw, &data);
1366 if (ret_val)
1367 return ret_val;
1369 mac->ledctl_default = er32(LEDCTL);
1370 mac->ledctl_mode1 = mac->ledctl_default;
1371 mac->ledctl_mode2 = mac->ledctl_default;
1373 for (i = 0; i < 4; i++) {
1374 temp = (data >> (i << 2)) & led_mask;
1375 switch (temp) {
1376 case ID_LED_ON1_DEF2:
1377 case ID_LED_ON1_ON2:
1378 case ID_LED_ON1_OFF2:
1379 mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
1380 mac->ledctl_mode1 |= ledctl_on << (i << 3);
1381 break;
1382 case ID_LED_OFF1_DEF2:
1383 case ID_LED_OFF1_ON2:
1384 case ID_LED_OFF1_OFF2:
1385 mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
1386 mac->ledctl_mode1 |= ledctl_off << (i << 3);
1387 break;
1388 default:
1389 /* Do nothing */
1390 break;
1392 switch (temp) {
1393 case ID_LED_DEF1_ON2:
1394 case ID_LED_ON1_ON2:
1395 case ID_LED_OFF1_ON2:
1396 mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
1397 mac->ledctl_mode2 |= ledctl_on << (i << 3);
1398 break;
1399 case ID_LED_DEF1_OFF2:
1400 case ID_LED_ON1_OFF2:
1401 case ID_LED_OFF1_OFF2:
1402 mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
1403 mac->ledctl_mode2 |= ledctl_off << (i << 3);
1404 break;
1405 default:
1406 /* Do nothing */
1407 break;
1411 return 0;
1415 * e1000e_setup_led_generic - Configures SW controllable LED
1416 * @hw: pointer to the HW structure
1418 * This prepares the SW controllable LED for use and saves the current state
1419 * of the LED so it can be later restored.
1421 s32 e1000e_setup_led_generic(struct e1000_hw *hw)
1423 u32 ledctl;
1425 if (hw->mac.ops.setup_led != e1000e_setup_led_generic) {
1426 return -E1000_ERR_CONFIG;
1429 if (hw->phy.media_type == e1000_media_type_fiber) {
1430 ledctl = er32(LEDCTL);
1431 hw->mac.ledctl_default = ledctl;
1432 /* Turn off LED0 */
1433 ledctl &= ~(E1000_LEDCTL_LED0_IVRT |
1434 E1000_LEDCTL_LED0_BLINK |
1435 E1000_LEDCTL_LED0_MODE_MASK);
1436 ledctl |= (E1000_LEDCTL_MODE_LED_OFF <<
1437 E1000_LEDCTL_LED0_MODE_SHIFT);
1438 ew32(LEDCTL, ledctl);
1439 } else if (hw->phy.media_type == e1000_media_type_copper) {
1440 ew32(LEDCTL, hw->mac.ledctl_mode1);
1443 return 0;
1447 * e1000e_cleanup_led_generic - Set LED config to default operation
1448 * @hw: pointer to the HW structure
1450 * Remove the current LED configuration and set the LED configuration
1451 * to the default value, saved from the EEPROM.
1453 s32 e1000e_cleanup_led_generic(struct e1000_hw *hw)
1455 ew32(LEDCTL, hw->mac.ledctl_default);
1456 return 0;
1460 * e1000e_blink_led - Blink LED
1461 * @hw: pointer to the HW structure
1463 * Blink the LEDs which are set to be on.
1465 s32 e1000e_blink_led(struct e1000_hw *hw)
1467 u32 ledctl_blink = 0;
1468 u32 i;
1470 if (hw->phy.media_type == e1000_media_type_fiber) {
1471 /* always blink LED0 for PCI-E fiber */
1472 ledctl_blink = E1000_LEDCTL_LED0_BLINK |
1473 (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
1474 } else {
1476 * set the blink bit for each LED that's "on" (0x0E)
1477 * in ledctl_mode2
1479 ledctl_blink = hw->mac.ledctl_mode2;
1480 for (i = 0; i < 4; i++)
1481 if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) ==
1482 E1000_LEDCTL_MODE_LED_ON)
1483 ledctl_blink |= (E1000_LEDCTL_LED0_BLINK <<
1484 (i * 8));
1487 ew32(LEDCTL, ledctl_blink);
1489 return 0;
1493 * e1000e_led_on_generic - Turn LED on
1494 * @hw: pointer to the HW structure
1496 * Turn LED on.
1498 s32 e1000e_led_on_generic(struct e1000_hw *hw)
1500 u32 ctrl;
1502 switch (hw->phy.media_type) {
1503 case e1000_media_type_fiber:
1504 ctrl = er32(CTRL);
1505 ctrl &= ~E1000_CTRL_SWDPIN0;
1506 ctrl |= E1000_CTRL_SWDPIO0;
1507 ew32(CTRL, ctrl);
1508 break;
1509 case e1000_media_type_copper:
1510 ew32(LEDCTL, hw->mac.ledctl_mode2);
1511 break;
1512 default:
1513 break;
1516 return 0;
1520 * e1000e_led_off_generic - Turn LED off
1521 * @hw: pointer to the HW structure
1523 * Turn LED off.
1525 s32 e1000e_led_off_generic(struct e1000_hw *hw)
1527 u32 ctrl;
1529 switch (hw->phy.media_type) {
1530 case e1000_media_type_fiber:
1531 ctrl = er32(CTRL);
1532 ctrl |= E1000_CTRL_SWDPIN0;
1533 ctrl |= E1000_CTRL_SWDPIO0;
1534 ew32(CTRL, ctrl);
1535 break;
1536 case e1000_media_type_copper:
1537 ew32(LEDCTL, hw->mac.ledctl_mode1);
1538 break;
1539 default:
1540 break;
1543 return 0;
1547 * e1000e_set_pcie_no_snoop - Set PCI-express capabilities
1548 * @hw: pointer to the HW structure
1549 * @no_snoop: bitmap of snoop events
1551 * Set the PCI-express register to snoop for events enabled in 'no_snoop'.
1553 void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop)
1555 u32 gcr;
1557 if (no_snoop) {
1558 gcr = er32(GCR);
1559 gcr &= ~(PCIE_NO_SNOOP_ALL);
1560 gcr |= no_snoop;
1561 ew32(GCR, gcr);
1566 * e1000e_disable_pcie_master - Disables PCI-express master access
1567 * @hw: pointer to the HW structure
1569 * Returns 0 if successful, else returns -10
1570 * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused
1571 * the master requests to be disabled.
1573 * Disables PCI-Express master access and verifies there are no pending
1574 * requests.
1576 s32 e1000e_disable_pcie_master(struct e1000_hw *hw)
1578 u32 ctrl;
1579 s32 timeout = MASTER_DISABLE_TIMEOUT;
1581 ctrl = er32(CTRL);
1582 ctrl |= E1000_CTRL_GIO_MASTER_DISABLE;
1583 ew32(CTRL, ctrl);
1585 while (timeout) {
1586 if (!(er32(STATUS) &
1587 E1000_STATUS_GIO_MASTER_ENABLE))
1588 break;
1589 udelay(100);
1590 timeout--;
1593 if (!timeout) {
1594 hw_dbg(hw, "Master requests are pending.\n");
1595 return -E1000_ERR_MASTER_REQUESTS_PENDING;
1598 return 0;
1602 * e1000e_reset_adaptive - Reset Adaptive Interframe Spacing
1603 * @hw: pointer to the HW structure
1605 * Reset the Adaptive Interframe Spacing throttle to default values.
1607 void e1000e_reset_adaptive(struct e1000_hw *hw)
1609 struct e1000_mac_info *mac = &hw->mac;
1611 mac->current_ifs_val = 0;
1612 mac->ifs_min_val = IFS_MIN;
1613 mac->ifs_max_val = IFS_MAX;
1614 mac->ifs_step_size = IFS_STEP;
1615 mac->ifs_ratio = IFS_RATIO;
1617 mac->in_ifs_mode = 0;
1618 ew32(AIT, 0);
1622 * e1000e_update_adaptive - Update Adaptive Interframe Spacing
1623 * @hw: pointer to the HW structure
1625 * Update the Adaptive Interframe Spacing Throttle value based on the
1626 * time between transmitted packets and time between collisions.
1628 void e1000e_update_adaptive(struct e1000_hw *hw)
1630 struct e1000_mac_info *mac = &hw->mac;
1632 if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) {
1633 if (mac->tx_packet_delta > MIN_NUM_XMITS) {
1634 mac->in_ifs_mode = 1;
1635 if (mac->current_ifs_val < mac->ifs_max_val) {
1636 if (!mac->current_ifs_val)
1637 mac->current_ifs_val = mac->ifs_min_val;
1638 else
1639 mac->current_ifs_val +=
1640 mac->ifs_step_size;
1641 ew32(AIT, mac->current_ifs_val);
1644 } else {
1645 if (mac->in_ifs_mode &&
1646 (mac->tx_packet_delta <= MIN_NUM_XMITS)) {
1647 mac->current_ifs_val = 0;
1648 mac->in_ifs_mode = 0;
1649 ew32(AIT, 0);
1655 * e1000_raise_eec_clk - Raise EEPROM clock
1656 * @hw: pointer to the HW structure
1657 * @eecd: pointer to the EEPROM
1659 * Enable/Raise the EEPROM clock bit.
1661 static void e1000_raise_eec_clk(struct e1000_hw *hw, u32 *eecd)
1663 *eecd = *eecd | E1000_EECD_SK;
1664 ew32(EECD, *eecd);
1665 e1e_flush();
1666 udelay(hw->nvm.delay_usec);
1670 * e1000_lower_eec_clk - Lower EEPROM clock
1671 * @hw: pointer to the HW structure
1672 * @eecd: pointer to the EEPROM
1674 * Clear/Lower the EEPROM clock bit.
1676 static void e1000_lower_eec_clk(struct e1000_hw *hw, u32 *eecd)
1678 *eecd = *eecd & ~E1000_EECD_SK;
1679 ew32(EECD, *eecd);
1680 e1e_flush();
1681 udelay(hw->nvm.delay_usec);
1685 * e1000_shift_out_eec_bits - Shift data bits our to the EEPROM
1686 * @hw: pointer to the HW structure
1687 * @data: data to send to the EEPROM
1688 * @count: number of bits to shift out
1690 * We need to shift 'count' bits out to the EEPROM. So, the value in the
1691 * "data" parameter will be shifted out to the EEPROM one bit at a time.
1692 * In order to do this, "data" must be broken down into bits.
1694 static void e1000_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count)
1696 struct e1000_nvm_info *nvm = &hw->nvm;
1697 u32 eecd = er32(EECD);
1698 u32 mask;
1700 mask = 0x01 << (count - 1);
1701 if (nvm->type == e1000_nvm_eeprom_spi)
1702 eecd |= E1000_EECD_DO;
1704 do {
1705 eecd &= ~E1000_EECD_DI;
1707 if (data & mask)
1708 eecd |= E1000_EECD_DI;
1710 ew32(EECD, eecd);
1711 e1e_flush();
1713 udelay(nvm->delay_usec);
1715 e1000_raise_eec_clk(hw, &eecd);
1716 e1000_lower_eec_clk(hw, &eecd);
1718 mask >>= 1;
1719 } while (mask);
1721 eecd &= ~E1000_EECD_DI;
1722 ew32(EECD, eecd);
1726 * e1000_shift_in_eec_bits - Shift data bits in from the EEPROM
1727 * @hw: pointer to the HW structure
1728 * @count: number of bits to shift in
1730 * In order to read a register from the EEPROM, we need to shift 'count' bits
1731 * in from the EEPROM. Bits are "shifted in" by raising the clock input to
1732 * the EEPROM (setting the SK bit), and then reading the value of the data out
1733 * "DO" bit. During this "shifting in" process the data in "DI" bit should
1734 * always be clear.
1736 static u16 e1000_shift_in_eec_bits(struct e1000_hw *hw, u16 count)
1738 u32 eecd;
1739 u32 i;
1740 u16 data;
1742 eecd = er32(EECD);
1744 eecd &= ~(E1000_EECD_DO | E1000_EECD_DI);
1745 data = 0;
1747 for (i = 0; i < count; i++) {
1748 data <<= 1;
1749 e1000_raise_eec_clk(hw, &eecd);
1751 eecd = er32(EECD);
1753 eecd &= ~E1000_EECD_DI;
1754 if (eecd & E1000_EECD_DO)
1755 data |= 1;
1757 e1000_lower_eec_clk(hw, &eecd);
1760 return data;
1764 * e1000e_poll_eerd_eewr_done - Poll for EEPROM read/write completion
1765 * @hw: pointer to the HW structure
1766 * @ee_reg: EEPROM flag for polling
1768 * Polls the EEPROM status bit for either read or write completion based
1769 * upon the value of 'ee_reg'.
1771 s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg)
1773 u32 attempts = 100000;
1774 u32 i, reg = 0;
1776 for (i = 0; i < attempts; i++) {
1777 if (ee_reg == E1000_NVM_POLL_READ)
1778 reg = er32(EERD);
1779 else
1780 reg = er32(EEWR);
1782 if (reg & E1000_NVM_RW_REG_DONE)
1783 return 0;
1785 udelay(5);
1788 return -E1000_ERR_NVM;
1792 * e1000e_acquire_nvm - Generic request for access to EEPROM
1793 * @hw: pointer to the HW structure
1795 * Set the EEPROM access request bit and wait for EEPROM access grant bit.
1796 * Return successful if access grant bit set, else clear the request for
1797 * EEPROM access and return -E1000_ERR_NVM (-1).
1799 s32 e1000e_acquire_nvm(struct e1000_hw *hw)
1801 u32 eecd = er32(EECD);
1802 s32 timeout = E1000_NVM_GRANT_ATTEMPTS;
1804 ew32(EECD, eecd | E1000_EECD_REQ);
1805 eecd = er32(EECD);
1807 while (timeout) {
1808 if (eecd & E1000_EECD_GNT)
1809 break;
1810 udelay(5);
1811 eecd = er32(EECD);
1812 timeout--;
1815 if (!timeout) {
1816 eecd &= ~E1000_EECD_REQ;
1817 ew32(EECD, eecd);
1818 hw_dbg(hw, "Could not acquire NVM grant\n");
1819 return -E1000_ERR_NVM;
1822 return 0;
1826 * e1000_standby_nvm - Return EEPROM to standby state
1827 * @hw: pointer to the HW structure
1829 * Return the EEPROM to a standby state.
1831 static void e1000_standby_nvm(struct e1000_hw *hw)
1833 struct e1000_nvm_info *nvm = &hw->nvm;
1834 u32 eecd = er32(EECD);
1836 if (nvm->type == e1000_nvm_eeprom_spi) {
1837 /* Toggle CS to flush commands */
1838 eecd |= E1000_EECD_CS;
1839 ew32(EECD, eecd);
1840 e1e_flush();
1841 udelay(nvm->delay_usec);
1842 eecd &= ~E1000_EECD_CS;
1843 ew32(EECD, eecd);
1844 e1e_flush();
1845 udelay(nvm->delay_usec);
1850 * e1000_stop_nvm - Terminate EEPROM command
1851 * @hw: pointer to the HW structure
1853 * Terminates the current command by inverting the EEPROM's chip select pin.
1855 static void e1000_stop_nvm(struct e1000_hw *hw)
1857 u32 eecd;
1859 eecd = er32(EECD);
1860 if (hw->nvm.type == e1000_nvm_eeprom_spi) {
1861 /* Pull CS high */
1862 eecd |= E1000_EECD_CS;
1863 e1000_lower_eec_clk(hw, &eecd);
1868 * e1000e_release_nvm - Release exclusive access to EEPROM
1869 * @hw: pointer to the HW structure
1871 * Stop any current commands to the EEPROM and clear the EEPROM request bit.
1873 void e1000e_release_nvm(struct e1000_hw *hw)
1875 u32 eecd;
1877 e1000_stop_nvm(hw);
1879 eecd = er32(EECD);
1880 eecd &= ~E1000_EECD_REQ;
1881 ew32(EECD, eecd);
1885 * e1000_ready_nvm_eeprom - Prepares EEPROM for read/write
1886 * @hw: pointer to the HW structure
1888 * Setups the EEPROM for reading and writing.
1890 static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw)
1892 struct e1000_nvm_info *nvm = &hw->nvm;
1893 u32 eecd = er32(EECD);
1894 u16 timeout = 0;
1895 u8 spi_stat_reg;
1897 if (nvm->type == e1000_nvm_eeprom_spi) {
1898 /* Clear SK and CS */
1899 eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
1900 ew32(EECD, eecd);
1901 udelay(1);
1902 timeout = NVM_MAX_RETRY_SPI;
1905 * Read "Status Register" repeatedly until the LSB is cleared.
1906 * The EEPROM will signal that the command has been completed
1907 * by clearing bit 0 of the internal status register. If it's
1908 * not cleared within 'timeout', then error out.
1910 while (timeout) {
1911 e1000_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI,
1912 hw->nvm.opcode_bits);
1913 spi_stat_reg = (u8)e1000_shift_in_eec_bits(hw, 8);
1914 if (!(spi_stat_reg & NVM_STATUS_RDY_SPI))
1915 break;
1917 udelay(5);
1918 e1000_standby_nvm(hw);
1919 timeout--;
1922 if (!timeout) {
1923 hw_dbg(hw, "SPI NVM Status error\n");
1924 return -E1000_ERR_NVM;
1928 return 0;
1932 * e1000e_read_nvm_eerd - Reads EEPROM using EERD register
1933 * @hw: pointer to the HW structure
1934 * @offset: offset of word in the EEPROM to read
1935 * @words: number of words to read
1936 * @data: word read from the EEPROM
1938 * Reads a 16 bit word from the EEPROM using the EERD register.
1940 s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
1942 struct e1000_nvm_info *nvm = &hw->nvm;
1943 u32 i, eerd = 0;
1944 s32 ret_val = 0;
1947 * A check for invalid values: offset too large, too many words,
1948 * too many words for the offset, and not enough words.
1950 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
1951 (words == 0)) {
1952 hw_dbg(hw, "nvm parameter(s) out of bounds\n");
1953 return -E1000_ERR_NVM;
1956 for (i = 0; i < words; i++) {
1957 eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) +
1958 E1000_NVM_RW_REG_START;
1960 ew32(EERD, eerd);
1961 ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ);
1962 if (ret_val)
1963 break;
1965 data[i] = (er32(EERD) >> E1000_NVM_RW_REG_DATA);
1968 return ret_val;
1972 * e1000e_write_nvm_spi - Write to EEPROM using SPI
1973 * @hw: pointer to the HW structure
1974 * @offset: offset within the EEPROM to be written to
1975 * @words: number of words to write
1976 * @data: 16 bit word(s) to be written to the EEPROM
1978 * Writes data to EEPROM at offset using SPI interface.
1980 * If e1000e_update_nvm_checksum is not called after this function , the
1981 * EEPROM will most likely contain an invalid checksum.
1983 s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
1985 struct e1000_nvm_info *nvm = &hw->nvm;
1986 s32 ret_val;
1987 u16 widx = 0;
1990 * A check for invalid values: offset too large, too many words,
1991 * and not enough words.
1993 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
1994 (words == 0)) {
1995 hw_dbg(hw, "nvm parameter(s) out of bounds\n");
1996 return -E1000_ERR_NVM;
1999 ret_val = nvm->ops.acquire_nvm(hw);
2000 if (ret_val)
2001 return ret_val;
2003 msleep(10);
2005 while (widx < words) {
2006 u8 write_opcode = NVM_WRITE_OPCODE_SPI;
2008 ret_val = e1000_ready_nvm_eeprom(hw);
2009 if (ret_val) {
2010 nvm->ops.release_nvm(hw);
2011 return ret_val;
2014 e1000_standby_nvm(hw);
2016 /* Send the WRITE ENABLE command (8 bit opcode) */
2017 e1000_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI,
2018 nvm->opcode_bits);
2020 e1000_standby_nvm(hw);
2023 * Some SPI eeproms use the 8th address bit embedded in the
2024 * opcode
2026 if ((nvm->address_bits == 8) && (offset >= 128))
2027 write_opcode |= NVM_A8_OPCODE_SPI;
2029 /* Send the Write command (8-bit opcode + addr) */
2030 e1000_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits);
2031 e1000_shift_out_eec_bits(hw, (u16)((offset + widx) * 2),
2032 nvm->address_bits);
2034 /* Loop to allow for up to whole page write of eeprom */
2035 while (widx < words) {
2036 u16 word_out = data[widx];
2037 word_out = (word_out >> 8) | (word_out << 8);
2038 e1000_shift_out_eec_bits(hw, word_out, 16);
2039 widx++;
2041 if ((((offset + widx) * 2) % nvm->page_size) == 0) {
2042 e1000_standby_nvm(hw);
2043 break;
2048 msleep(10);
2049 nvm->ops.release_nvm(hw);
2050 return 0;
2054 * e1000e_read_mac_addr - Read device MAC address
2055 * @hw: pointer to the HW structure
2057 * Reads the device MAC address from the EEPROM and stores the value.
2058 * Since devices with two ports use the same EEPROM, we increment the
2059 * last bit in the MAC address for the second port.
2061 s32 e1000e_read_mac_addr(struct e1000_hw *hw)
2063 s32 ret_val;
2064 u16 offset, nvm_data, i;
2065 u16 mac_addr_offset = 0;
2067 if (hw->mac.type == e1000_82571) {
2068 /* Check for an alternate MAC address. An alternate MAC
2069 * address can be setup by pre-boot software and must be
2070 * treated like a permanent address and must override the
2071 * actual permanent MAC address.*/
2072 ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1,
2073 &mac_addr_offset);
2074 if (ret_val) {
2075 hw_dbg(hw, "NVM Read Error\n");
2076 return ret_val;
2078 if (mac_addr_offset == 0xFFFF)
2079 mac_addr_offset = 0;
2081 if (mac_addr_offset) {
2082 if (hw->bus.func == E1000_FUNC_1)
2083 mac_addr_offset += ETH_ALEN/sizeof(u16);
2085 /* make sure we have a valid mac address here
2086 * before using it */
2087 ret_val = e1000_read_nvm(hw, mac_addr_offset, 1,
2088 &nvm_data);
2089 if (ret_val) {
2090 hw_dbg(hw, "NVM Read Error\n");
2091 return ret_val;
2093 if (nvm_data & 0x0001)
2094 mac_addr_offset = 0;
2097 if (mac_addr_offset)
2098 hw->dev_spec.e82571.alt_mac_addr_is_present = 1;
2101 for (i = 0; i < ETH_ALEN; i += 2) {
2102 offset = mac_addr_offset + (i >> 1);
2103 ret_val = e1000_read_nvm(hw, offset, 1, &nvm_data);
2104 if (ret_val) {
2105 hw_dbg(hw, "NVM Read Error\n");
2106 return ret_val;
2108 hw->mac.perm_addr[i] = (u8)(nvm_data & 0xFF);
2109 hw->mac.perm_addr[i+1] = (u8)(nvm_data >> 8);
2112 /* Flip last bit of mac address if we're on second port */
2113 if (!mac_addr_offset && hw->bus.func == E1000_FUNC_1)
2114 hw->mac.perm_addr[5] ^= 1;
2116 for (i = 0; i < ETH_ALEN; i++)
2117 hw->mac.addr[i] = hw->mac.perm_addr[i];
2119 return 0;
2123 * e1000e_validate_nvm_checksum_generic - Validate EEPROM checksum
2124 * @hw: pointer to the HW structure
2126 * Calculates the EEPROM checksum by reading/adding each word of the EEPROM
2127 * and then verifies that the sum of the EEPROM is equal to 0xBABA.
2129 s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw)
2131 s32 ret_val;
2132 u16 checksum = 0;
2133 u16 i, nvm_data;
2135 for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
2136 ret_val = e1000_read_nvm(hw, i, 1, &nvm_data);
2137 if (ret_val) {
2138 hw_dbg(hw, "NVM Read Error\n");
2139 return ret_val;
2141 checksum += nvm_data;
2144 if (checksum != (u16) NVM_SUM) {
2145 hw_dbg(hw, "NVM Checksum Invalid\n");
2146 return -E1000_ERR_NVM;
2149 return 0;
2153 * e1000e_update_nvm_checksum_generic - Update EEPROM checksum
2154 * @hw: pointer to the HW structure
2156 * Updates the EEPROM checksum by reading/adding each word of the EEPROM
2157 * up to the checksum. Then calculates the EEPROM checksum and writes the
2158 * value to the EEPROM.
2160 s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw)
2162 s32 ret_val;
2163 u16 checksum = 0;
2164 u16 i, nvm_data;
2166 for (i = 0; i < NVM_CHECKSUM_REG; i++) {
2167 ret_val = e1000_read_nvm(hw, i, 1, &nvm_data);
2168 if (ret_val) {
2169 hw_dbg(hw, "NVM Read Error while updating checksum.\n");
2170 return ret_val;
2172 checksum += nvm_data;
2174 checksum = (u16) NVM_SUM - checksum;
2175 ret_val = e1000_write_nvm(hw, NVM_CHECKSUM_REG, 1, &checksum);
2176 if (ret_val)
2177 hw_dbg(hw, "NVM Write Error while updating checksum.\n");
2179 return ret_val;
2183 * e1000e_reload_nvm - Reloads EEPROM
2184 * @hw: pointer to the HW structure
2186 * Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the
2187 * extended control register.
2189 void e1000e_reload_nvm(struct e1000_hw *hw)
2191 u32 ctrl_ext;
2193 udelay(10);
2194 ctrl_ext = er32(CTRL_EXT);
2195 ctrl_ext |= E1000_CTRL_EXT_EE_RST;
2196 ew32(CTRL_EXT, ctrl_ext);
2197 e1e_flush();
2201 * e1000_calculate_checksum - Calculate checksum for buffer
2202 * @buffer: pointer to EEPROM
2203 * @length: size of EEPROM to calculate a checksum for
2205 * Calculates the checksum for some buffer on a specified length. The
2206 * checksum calculated is returned.
2208 static u8 e1000_calculate_checksum(u8 *buffer, u32 length)
2210 u32 i;
2211 u8 sum = 0;
2213 if (!buffer)
2214 return 0;
2216 for (i = 0; i < length; i++)
2217 sum += buffer[i];
2219 return (u8) (0 - sum);
2223 * e1000_mng_enable_host_if - Checks host interface is enabled
2224 * @hw: pointer to the HW structure
2226 * Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND
2228 * This function checks whether the HOST IF is enabled for command operation
2229 * and also checks whether the previous command is completed. It busy waits
2230 * in case of previous command is not completed.
2232 static s32 e1000_mng_enable_host_if(struct e1000_hw *hw)
2234 u32 hicr;
2235 u8 i;
2237 /* Check that the host interface is enabled. */
2238 hicr = er32(HICR);
2239 if ((hicr & E1000_HICR_EN) == 0) {
2240 hw_dbg(hw, "E1000_HOST_EN bit disabled.\n");
2241 return -E1000_ERR_HOST_INTERFACE_COMMAND;
2243 /* check the previous command is completed */
2244 for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) {
2245 hicr = er32(HICR);
2246 if (!(hicr & E1000_HICR_C))
2247 break;
2248 mdelay(1);
2251 if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) {
2252 hw_dbg(hw, "Previous command timeout failed .\n");
2253 return -E1000_ERR_HOST_INTERFACE_COMMAND;
2256 return 0;
2260 * e1000e_check_mng_mode_generic - check management mode
2261 * @hw: pointer to the HW structure
2263 * Reads the firmware semaphore register and returns true (>0) if
2264 * manageability is enabled, else false (0).
2266 bool e1000e_check_mng_mode_generic(struct e1000_hw *hw)
2268 u32 fwsm = er32(FWSM);
2270 return (fwsm & E1000_FWSM_MODE_MASK) ==
2271 (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT);
2275 * e1000e_enable_tx_pkt_filtering - Enable packet filtering on Tx
2276 * @hw: pointer to the HW structure
2278 * Enables packet filtering on transmit packets if manageability is enabled
2279 * and host interface is enabled.
2281 bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw)
2283 struct e1000_host_mng_dhcp_cookie *hdr = &hw->mng_cookie;
2284 u32 *buffer = (u32 *)&hw->mng_cookie;
2285 u32 offset;
2286 s32 ret_val, hdr_csum, csum;
2287 u8 i, len;
2289 /* No manageability, no filtering */
2290 if (!e1000e_check_mng_mode(hw)) {
2291 hw->mac.tx_pkt_filtering = 0;
2292 return 0;
2296 * If we can't read from the host interface for whatever
2297 * reason, disable filtering.
2299 ret_val = e1000_mng_enable_host_if(hw);
2300 if (ret_val != 0) {
2301 hw->mac.tx_pkt_filtering = 0;
2302 return ret_val;
2305 /* Read in the header. Length and offset are in dwords. */
2306 len = E1000_MNG_DHCP_COOKIE_LENGTH >> 2;
2307 offset = E1000_MNG_DHCP_COOKIE_OFFSET >> 2;
2308 for (i = 0; i < len; i++)
2309 *(buffer + i) = E1000_READ_REG_ARRAY(hw, E1000_HOST_IF, offset + i);
2310 hdr_csum = hdr->checksum;
2311 hdr->checksum = 0;
2312 csum = e1000_calculate_checksum((u8 *)hdr,
2313 E1000_MNG_DHCP_COOKIE_LENGTH);
2315 * If either the checksums or signature don't match, then
2316 * the cookie area isn't considered valid, in which case we
2317 * take the safe route of assuming Tx filtering is enabled.
2319 if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) {
2320 hw->mac.tx_pkt_filtering = 1;
2321 return 1;
2324 /* Cookie area is valid, make the final check for filtering. */
2325 if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING)) {
2326 hw->mac.tx_pkt_filtering = 0;
2327 return 0;
2330 hw->mac.tx_pkt_filtering = 1;
2331 return 1;
2335 * e1000_mng_write_cmd_header - Writes manageability command header
2336 * @hw: pointer to the HW structure
2337 * @hdr: pointer to the host interface command header
2339 * Writes the command header after does the checksum calculation.
2341 static s32 e1000_mng_write_cmd_header(struct e1000_hw *hw,
2342 struct e1000_host_mng_command_header *hdr)
2344 u16 i, length = sizeof(struct e1000_host_mng_command_header);
2346 /* Write the whole command header structure with new checksum. */
2348 hdr->checksum = e1000_calculate_checksum((u8 *)hdr, length);
2350 length >>= 2;
2351 /* Write the relevant command block into the ram area. */
2352 for (i = 0; i < length; i++) {
2353 E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, i,
2354 *((u32 *) hdr + i));
2355 e1e_flush();
2358 return 0;
2362 * e1000_mng_host_if_write - Writes to the manageability host interface
2363 * @hw: pointer to the HW structure
2364 * @buffer: pointer to the host interface buffer
2365 * @length: size of the buffer
2366 * @offset: location in the buffer to write to
2367 * @sum: sum of the data (not checksum)
2369 * This function writes the buffer content at the offset given on the host if.
2370 * It also does alignment considerations to do the writes in most efficient
2371 * way. Also fills up the sum of the buffer in *buffer parameter.
2373 static s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer,
2374 u16 length, u16 offset, u8 *sum)
2376 u8 *tmp;
2377 u8 *bufptr = buffer;
2378 u32 data = 0;
2379 u16 remaining, i, j, prev_bytes;
2381 /* sum = only sum of the data and it is not checksum */
2383 if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH)
2384 return -E1000_ERR_PARAM;
2386 tmp = (u8 *)&data;
2387 prev_bytes = offset & 0x3;
2388 offset >>= 2;
2390 if (prev_bytes) {
2391 data = E1000_READ_REG_ARRAY(hw, E1000_HOST_IF, offset);
2392 for (j = prev_bytes; j < sizeof(u32); j++) {
2393 *(tmp + j) = *bufptr++;
2394 *sum += *(tmp + j);
2396 E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset, data);
2397 length -= j - prev_bytes;
2398 offset++;
2401 remaining = length & 0x3;
2402 length -= remaining;
2404 /* Calculate length in DWORDs */
2405 length >>= 2;
2408 * The device driver writes the relevant command block into the
2409 * ram area.
2411 for (i = 0; i < length; i++) {
2412 for (j = 0; j < sizeof(u32); j++) {
2413 *(tmp + j) = *bufptr++;
2414 *sum += *(tmp + j);
2417 E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset + i, data);
2419 if (remaining) {
2420 for (j = 0; j < sizeof(u32); j++) {
2421 if (j < remaining)
2422 *(tmp + j) = *bufptr++;
2423 else
2424 *(tmp + j) = 0;
2426 *sum += *(tmp + j);
2428 E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset + i, data);
2431 return 0;
2435 * e1000e_mng_write_dhcp_info - Writes DHCP info to host interface
2436 * @hw: pointer to the HW structure
2437 * @buffer: pointer to the host interface
2438 * @length: size of the buffer
2440 * Writes the DHCP information to the host interface.
2442 s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length)
2444 struct e1000_host_mng_command_header hdr;
2445 s32 ret_val;
2446 u32 hicr;
2448 hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD;
2449 hdr.command_length = length;
2450 hdr.reserved1 = 0;
2451 hdr.reserved2 = 0;
2452 hdr.checksum = 0;
2454 /* Enable the host interface */
2455 ret_val = e1000_mng_enable_host_if(hw);
2456 if (ret_val)
2457 return ret_val;
2459 /* Populate the host interface with the contents of "buffer". */
2460 ret_val = e1000_mng_host_if_write(hw, buffer, length,
2461 sizeof(hdr), &(hdr.checksum));
2462 if (ret_val)
2463 return ret_val;
2465 /* Write the manageability command header */
2466 ret_val = e1000_mng_write_cmd_header(hw, &hdr);
2467 if (ret_val)
2468 return ret_val;
2470 /* Tell the ARC a new command is pending. */
2471 hicr = er32(HICR);
2472 ew32(HICR, hicr | E1000_HICR_C);
2474 return 0;
2478 * e1000e_enable_mng_pass_thru - Enable processing of ARP's
2479 * @hw: pointer to the HW structure
2481 * Verifies the hardware needs to allow ARPs to be processed by the host.
2483 bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw)
2485 u32 manc;
2486 u32 fwsm, factps;
2487 bool ret_val = 0;
2489 manc = er32(MANC);
2491 if (!(manc & E1000_MANC_RCV_TCO_EN) ||
2492 !(manc & E1000_MANC_EN_MAC_ADDR_FILTER))
2493 return ret_val;
2495 if (hw->mac.arc_subsystem_valid) {
2496 fwsm = er32(FWSM);
2497 factps = er32(FACTPS);
2499 if (!(factps & E1000_FACTPS_MNGCG) &&
2500 ((fwsm & E1000_FWSM_MODE_MASK) ==
2501 (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) {
2502 ret_val = 1;
2503 return ret_val;
2505 } else {
2506 if ((manc & E1000_MANC_SMBUS_EN) &&
2507 !(manc & E1000_MANC_ASF_EN)) {
2508 ret_val = 1;
2509 return ret_val;
2513 return ret_val;
2516 s32 e1000e_read_pba_num(struct e1000_hw *hw, u32 *pba_num)
2518 s32 ret_val;
2519 u16 nvm_data;
2521 ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
2522 if (ret_val) {
2523 hw_dbg(hw, "NVM Read Error\n");
2524 return ret_val;
2526 *pba_num = (u32)(nvm_data << 16);
2528 ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_1, 1, &nvm_data);
2529 if (ret_val) {
2530 hw_dbg(hw, "NVM Read Error\n");
2531 return ret_val;
2533 *pba_num |= nvm_data;
2535 return 0;