ixgbe: remove unused functions
[linux-2.6.git] / drivers / net / ixgbe / ixgbe_common.c
blob939e60f43fb96d30e13c75590609d657c87476ac
1 /*******************************************************************************
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *******************************************************************************/
28 #include <linux/pci.h>
29 #include <linux/delay.h>
30 #include <linux/sched.h>
31 #include <linux/netdevice.h>
33 #include "ixgbe.h"
34 #include "ixgbe_common.h"
35 #include "ixgbe_phy.h"
37 static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
38 static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
39 static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
40 static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw);
41 static void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
42 static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
43 u16 count);
44 static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count);
45 static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
46 static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
47 static void ixgbe_release_eeprom(struct ixgbe_hw *hw);
48 static u16 ixgbe_calc_eeprom_checksum(struct ixgbe_hw *hw);
50 static void ixgbe_enable_rar(struct ixgbe_hw *hw, u32 index);
51 static void ixgbe_disable_rar(struct ixgbe_hw *hw, u32 index);
52 static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
53 static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq);
54 static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num);
56 /**
57 * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
58 * @hw: pointer to hardware structure
60 * Starts the hardware by filling the bus info structure and media type, clears
61 * all on chip counters, initializes receive address registers, multicast
62 * table, VLAN filter table, calls routine to set up link and flow control
63 * settings, and leaves transmit and receive units disabled and uninitialized
64 **/
65 s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
67 u32 ctrl_ext;
69 /* Set the media type */
70 hw->phy.media_type = hw->mac.ops.get_media_type(hw);
72 /* Identify the PHY */
73 hw->phy.ops.identify(hw);
75 /* Clear the VLAN filter table */
76 hw->mac.ops.clear_vfta(hw);
78 /* Clear statistics registers */
79 hw->mac.ops.clear_hw_cntrs(hw);
81 /* Set No Snoop Disable */
82 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
83 ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS;
84 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
85 IXGBE_WRITE_FLUSH(hw);
87 /* Setup flow control */
88 ixgbe_setup_fc(hw, 0);
90 /* Clear adapter stopped flag */
91 hw->adapter_stopped = false;
93 return 0;
96 /**
97 * ixgbe_init_hw_generic - Generic hardware initialization
98 * @hw: pointer to hardware structure
100 * Initialize the hardware by resetting the hardware, filling the bus info
101 * structure and media type, clears all on chip counters, initializes receive
102 * address registers, multicast table, VLAN filter table, calls routine to set
103 * up link and flow control settings, and leaves transmit and receive units
104 * disabled and uninitialized
106 s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
108 s32 status;
110 /* Reset the hardware */
111 status = hw->mac.ops.reset_hw(hw);
113 if (status == 0) {
114 /* Start the HW */
115 status = hw->mac.ops.start_hw(hw);
118 return status;
122 * ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters
123 * @hw: pointer to hardware structure
125 * Clears all hardware statistics counters by reading them from the hardware
126 * Statistics counters are clear on read.
128 s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
130 u16 i = 0;
132 IXGBE_READ_REG(hw, IXGBE_CRCERRS);
133 IXGBE_READ_REG(hw, IXGBE_ILLERRC);
134 IXGBE_READ_REG(hw, IXGBE_ERRBC);
135 IXGBE_READ_REG(hw, IXGBE_MSPDC);
136 for (i = 0; i < 8; i++)
137 IXGBE_READ_REG(hw, IXGBE_MPC(i));
139 IXGBE_READ_REG(hw, IXGBE_MLFC);
140 IXGBE_READ_REG(hw, IXGBE_MRFC);
141 IXGBE_READ_REG(hw, IXGBE_RLEC);
142 IXGBE_READ_REG(hw, IXGBE_LXONTXC);
143 IXGBE_READ_REG(hw, IXGBE_LXONRXC);
144 IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
145 IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
147 for (i = 0; i < 8; i++) {
148 IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
149 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
150 IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
151 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
154 IXGBE_READ_REG(hw, IXGBE_PRC64);
155 IXGBE_READ_REG(hw, IXGBE_PRC127);
156 IXGBE_READ_REG(hw, IXGBE_PRC255);
157 IXGBE_READ_REG(hw, IXGBE_PRC511);
158 IXGBE_READ_REG(hw, IXGBE_PRC1023);
159 IXGBE_READ_REG(hw, IXGBE_PRC1522);
160 IXGBE_READ_REG(hw, IXGBE_GPRC);
161 IXGBE_READ_REG(hw, IXGBE_BPRC);
162 IXGBE_READ_REG(hw, IXGBE_MPRC);
163 IXGBE_READ_REG(hw, IXGBE_GPTC);
164 IXGBE_READ_REG(hw, IXGBE_GORCL);
165 IXGBE_READ_REG(hw, IXGBE_GORCH);
166 IXGBE_READ_REG(hw, IXGBE_GOTCL);
167 IXGBE_READ_REG(hw, IXGBE_GOTCH);
168 for (i = 0; i < 8; i++)
169 IXGBE_READ_REG(hw, IXGBE_RNBC(i));
170 IXGBE_READ_REG(hw, IXGBE_RUC);
171 IXGBE_READ_REG(hw, IXGBE_RFC);
172 IXGBE_READ_REG(hw, IXGBE_ROC);
173 IXGBE_READ_REG(hw, IXGBE_RJC);
174 IXGBE_READ_REG(hw, IXGBE_MNGPRC);
175 IXGBE_READ_REG(hw, IXGBE_MNGPDC);
176 IXGBE_READ_REG(hw, IXGBE_MNGPTC);
177 IXGBE_READ_REG(hw, IXGBE_TORL);
178 IXGBE_READ_REG(hw, IXGBE_TORH);
179 IXGBE_READ_REG(hw, IXGBE_TPR);
180 IXGBE_READ_REG(hw, IXGBE_TPT);
181 IXGBE_READ_REG(hw, IXGBE_PTC64);
182 IXGBE_READ_REG(hw, IXGBE_PTC127);
183 IXGBE_READ_REG(hw, IXGBE_PTC255);
184 IXGBE_READ_REG(hw, IXGBE_PTC511);
185 IXGBE_READ_REG(hw, IXGBE_PTC1023);
186 IXGBE_READ_REG(hw, IXGBE_PTC1522);
187 IXGBE_READ_REG(hw, IXGBE_MPTC);
188 IXGBE_READ_REG(hw, IXGBE_BPTC);
189 for (i = 0; i < 16; i++) {
190 IXGBE_READ_REG(hw, IXGBE_QPRC(i));
191 IXGBE_READ_REG(hw, IXGBE_QBRC(i));
192 IXGBE_READ_REG(hw, IXGBE_QPTC(i));
193 IXGBE_READ_REG(hw, IXGBE_QBTC(i));
196 return 0;
200 * ixgbe_read_pba_num_generic - Reads part number from EEPROM
201 * @hw: pointer to hardware structure
202 * @pba_num: stores the part number from the EEPROM
204 * Reads the part number from the EEPROM.
206 s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num)
208 s32 ret_val;
209 u16 data;
211 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
212 if (ret_val) {
213 hw_dbg(hw, "NVM Read Error\n");
214 return ret_val;
216 *pba_num = (u32)(data << 16);
218 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &data);
219 if (ret_val) {
220 hw_dbg(hw, "NVM Read Error\n");
221 return ret_val;
223 *pba_num |= data;
225 return 0;
229 * ixgbe_get_mac_addr_generic - Generic get MAC address
230 * @hw: pointer to hardware structure
231 * @mac_addr: Adapter MAC address
233 * Reads the adapter's MAC address from first Receive Address Register (RAR0)
234 * A reset of the adapter must be performed prior to calling this function
235 * in order for the MAC address to have been loaded from the EEPROM into RAR0
237 s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr)
239 u32 rar_high;
240 u32 rar_low;
241 u16 i;
243 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0));
244 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0));
246 for (i = 0; i < 4; i++)
247 mac_addr[i] = (u8)(rar_low >> (i*8));
249 for (i = 0; i < 2; i++)
250 mac_addr[i+4] = (u8)(rar_high >> (i*8));
252 return 0;
256 * ixgbe_get_bus_info_generic - Generic set PCI bus info
257 * @hw: pointer to hardware structure
259 * Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure
261 s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
263 struct ixgbe_adapter *adapter = hw->back;
264 struct ixgbe_mac_info *mac = &hw->mac;
265 u16 link_status;
267 hw->bus.type = ixgbe_bus_type_pci_express;
269 /* Get the negotiated link width and speed from PCI config space */
270 pci_read_config_word(adapter->pdev, IXGBE_PCI_LINK_STATUS,
271 &link_status);
273 switch (link_status & IXGBE_PCI_LINK_WIDTH) {
274 case IXGBE_PCI_LINK_WIDTH_1:
275 hw->bus.width = ixgbe_bus_width_pcie_x1;
276 break;
277 case IXGBE_PCI_LINK_WIDTH_2:
278 hw->bus.width = ixgbe_bus_width_pcie_x2;
279 break;
280 case IXGBE_PCI_LINK_WIDTH_4:
281 hw->bus.width = ixgbe_bus_width_pcie_x4;
282 break;
283 case IXGBE_PCI_LINK_WIDTH_8:
284 hw->bus.width = ixgbe_bus_width_pcie_x8;
285 break;
286 default:
287 hw->bus.width = ixgbe_bus_width_unknown;
288 break;
291 switch (link_status & IXGBE_PCI_LINK_SPEED) {
292 case IXGBE_PCI_LINK_SPEED_2500:
293 hw->bus.speed = ixgbe_bus_speed_2500;
294 break;
295 case IXGBE_PCI_LINK_SPEED_5000:
296 hw->bus.speed = ixgbe_bus_speed_5000;
297 break;
298 default:
299 hw->bus.speed = ixgbe_bus_speed_unknown;
300 break;
303 mac->ops.set_lan_id(hw);
305 return 0;
309 * ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
310 * @hw: pointer to the HW structure
312 * Determines the LAN function id by reading memory-mapped registers
313 * and swaps the port value if requested.
315 void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
317 struct ixgbe_bus_info *bus = &hw->bus;
318 u32 reg;
320 reg = IXGBE_READ_REG(hw, IXGBE_STATUS);
321 bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT;
322 bus->lan_id = bus->func;
324 /* check for a port swap */
325 reg = IXGBE_READ_REG(hw, IXGBE_FACTPS);
326 if (reg & IXGBE_FACTPS_LFS)
327 bus->func ^= 0x1;
331 * ixgbe_stop_adapter_generic - Generic stop Tx/Rx units
332 * @hw: pointer to hardware structure
334 * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
335 * disables transmit and receive units. The adapter_stopped flag is used by
336 * the shared code and drivers to determine if the adapter is in a stopped
337 * state and should not touch the hardware.
339 s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
341 u32 number_of_queues;
342 u32 reg_val;
343 u16 i;
346 * Set the adapter_stopped flag so other driver functions stop touching
347 * the hardware
349 hw->adapter_stopped = true;
351 /* Disable the receive unit */
352 reg_val = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
353 reg_val &= ~(IXGBE_RXCTRL_RXEN);
354 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_val);
355 IXGBE_WRITE_FLUSH(hw);
356 msleep(2);
358 /* Clear interrupt mask to stop from interrupts being generated */
359 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
361 /* Clear any pending interrupts */
362 IXGBE_READ_REG(hw, IXGBE_EICR);
364 /* Disable the transmit unit. Each queue must be disabled. */
365 number_of_queues = hw->mac.max_tx_queues;
366 for (i = 0; i < number_of_queues; i++) {
367 reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
368 if (reg_val & IXGBE_TXDCTL_ENABLE) {
369 reg_val &= ~IXGBE_TXDCTL_ENABLE;
370 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), reg_val);
375 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
376 * access and verify no pending requests
378 if (ixgbe_disable_pcie_master(hw) != 0)
379 hw_dbg(hw, "PCI-E Master disable polling has failed.\n");
381 return 0;
385 * ixgbe_led_on_generic - Turns on the software controllable LEDs.
386 * @hw: pointer to hardware structure
387 * @index: led number to turn on
389 s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
391 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
393 /* To turn on the LED, set mode to ON. */
394 led_reg &= ~IXGBE_LED_MODE_MASK(index);
395 led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index);
396 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
397 IXGBE_WRITE_FLUSH(hw);
399 return 0;
403 * ixgbe_led_off_generic - Turns off the software controllable LEDs.
404 * @hw: pointer to hardware structure
405 * @index: led number to turn off
407 s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
409 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
411 /* To turn off the LED, set mode to OFF. */
412 led_reg &= ~IXGBE_LED_MODE_MASK(index);
413 led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index);
414 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
415 IXGBE_WRITE_FLUSH(hw);
417 return 0;
421 * ixgbe_init_eeprom_params_generic - Initialize EEPROM params
422 * @hw: pointer to hardware structure
424 * Initializes the EEPROM parameters ixgbe_eeprom_info within the
425 * ixgbe_hw struct in order to set up EEPROM access.
427 s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
429 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
430 u32 eec;
431 u16 eeprom_size;
433 if (eeprom->type == ixgbe_eeprom_uninitialized) {
434 eeprom->type = ixgbe_eeprom_none;
435 /* Set default semaphore delay to 10ms which is a well
436 * tested value */
437 eeprom->semaphore_delay = 10;
440 * Check for EEPROM present first.
441 * If not present leave as none
443 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
444 if (eec & IXGBE_EEC_PRES) {
445 eeprom->type = ixgbe_eeprom_spi;
448 * SPI EEPROM is assumed here. This code would need to
449 * change if a future EEPROM is not SPI.
451 eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
452 IXGBE_EEC_SIZE_SHIFT);
453 eeprom->word_size = 1 << (eeprom_size +
454 IXGBE_EEPROM_WORD_SIZE_SHIFT);
457 if (eec & IXGBE_EEC_ADDR_SIZE)
458 eeprom->address_bits = 16;
459 else
460 eeprom->address_bits = 8;
461 hw_dbg(hw, "Eeprom params: type = %d, size = %d, address bits: "
462 "%d\n", eeprom->type, eeprom->word_size,
463 eeprom->address_bits);
466 return 0;
470 * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM
471 * @hw: pointer to hardware structure
472 * @offset: offset within the EEPROM to be written to
473 * @data: 16 bit word to be written to the EEPROM
475 * If ixgbe_eeprom_update_checksum is not called after this function, the
476 * EEPROM will most likely contain an invalid checksum.
478 s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
480 s32 status;
481 u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
483 hw->eeprom.ops.init_params(hw);
485 if (offset >= hw->eeprom.word_size) {
486 status = IXGBE_ERR_EEPROM;
487 goto out;
490 /* Prepare the EEPROM for writing */
491 status = ixgbe_acquire_eeprom(hw);
493 if (status == 0) {
494 if (ixgbe_ready_eeprom(hw) != 0) {
495 ixgbe_release_eeprom(hw);
496 status = IXGBE_ERR_EEPROM;
500 if (status == 0) {
501 ixgbe_standby_eeprom(hw);
503 /* Send the WRITE ENABLE command (8 bit opcode ) */
504 ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_WREN_OPCODE_SPI,
505 IXGBE_EEPROM_OPCODE_BITS);
507 ixgbe_standby_eeprom(hw);
510 * Some SPI eeproms use the 8th address bit embedded in the
511 * opcode
513 if ((hw->eeprom.address_bits == 8) && (offset >= 128))
514 write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
516 /* Send the Write command (8-bit opcode + addr) */
517 ixgbe_shift_out_eeprom_bits(hw, write_opcode,
518 IXGBE_EEPROM_OPCODE_BITS);
519 ixgbe_shift_out_eeprom_bits(hw, (u16)(offset*2),
520 hw->eeprom.address_bits);
522 /* Send the data */
523 data = (data >> 8) | (data << 8);
524 ixgbe_shift_out_eeprom_bits(hw, data, 16);
525 ixgbe_standby_eeprom(hw);
527 msleep(hw->eeprom.semaphore_delay);
528 /* Done with writing - release the EEPROM */
529 ixgbe_release_eeprom(hw);
532 out:
533 return status;
537 * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang
538 * @hw: pointer to hardware structure
539 * @offset: offset within the EEPROM to be read
540 * @data: read 16 bit value from EEPROM
542 * Reads 16 bit value from EEPROM through bit-bang method
544 s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
545 u16 *data)
547 s32 status;
548 u16 word_in;
549 u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
551 hw->eeprom.ops.init_params(hw);
553 if (offset >= hw->eeprom.word_size) {
554 status = IXGBE_ERR_EEPROM;
555 goto out;
558 /* Prepare the EEPROM for reading */
559 status = ixgbe_acquire_eeprom(hw);
561 if (status == 0) {
562 if (ixgbe_ready_eeprom(hw) != 0) {
563 ixgbe_release_eeprom(hw);
564 status = IXGBE_ERR_EEPROM;
568 if (status == 0) {
569 ixgbe_standby_eeprom(hw);
572 * Some SPI eeproms use the 8th address bit embedded in the
573 * opcode
575 if ((hw->eeprom.address_bits == 8) && (offset >= 128))
576 read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
578 /* Send the READ command (opcode + addr) */
579 ixgbe_shift_out_eeprom_bits(hw, read_opcode,
580 IXGBE_EEPROM_OPCODE_BITS);
581 ixgbe_shift_out_eeprom_bits(hw, (u16)(offset*2),
582 hw->eeprom.address_bits);
584 /* Read the data. */
585 word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
586 *data = (word_in >> 8) | (word_in << 8);
588 /* End this read operation */
589 ixgbe_release_eeprom(hw);
592 out:
593 return status;
597 * ixgbe_read_eerd_generic - Read EEPROM word using EERD
598 * @hw: pointer to hardware structure
599 * @offset: offset of word in the EEPROM to read
600 * @data: word read from the EEPROM
602 * Reads a 16 bit word from the EEPROM using the EERD register.
604 s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
606 u32 eerd;
607 s32 status;
609 hw->eeprom.ops.init_params(hw);
611 if (offset >= hw->eeprom.word_size) {
612 status = IXGBE_ERR_EEPROM;
613 goto out;
616 eerd = (offset << IXGBE_EEPROM_RW_ADDR_SHIFT) +
617 IXGBE_EEPROM_RW_REG_START;
619 IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
620 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ);
622 if (status == 0)
623 *data = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
624 IXGBE_EEPROM_RW_REG_DATA);
625 else
626 hw_dbg(hw, "Eeprom read timed out\n");
628 out:
629 return status;
633 * ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status
634 * @hw: pointer to hardware structure
635 * @ee_reg: EEPROM flag for polling
637 * Polls the status bit (bit 1) of the EERD or EEWR to determine when the
638 * read or write is done respectively.
640 s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
642 u32 i;
643 u32 reg;
644 s32 status = IXGBE_ERR_EEPROM;
646 for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) {
647 if (ee_reg == IXGBE_NVM_POLL_READ)
648 reg = IXGBE_READ_REG(hw, IXGBE_EERD);
649 else
650 reg = IXGBE_READ_REG(hw, IXGBE_EEWR);
652 if (reg & IXGBE_EEPROM_RW_REG_DONE) {
653 status = 0;
654 break;
656 udelay(5);
658 return status;
662 * ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang
663 * @hw: pointer to hardware structure
665 * Prepares EEPROM for access using bit-bang method. This function should
666 * be called before issuing a command to the EEPROM.
668 static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
670 s32 status = 0;
671 u32 eec = 0;
672 u32 i;
674 if (ixgbe_acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != 0)
675 status = IXGBE_ERR_SWFW_SYNC;
677 if (status == 0) {
678 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
680 /* Request EEPROM Access */
681 eec |= IXGBE_EEC_REQ;
682 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
684 for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) {
685 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
686 if (eec & IXGBE_EEC_GNT)
687 break;
688 udelay(5);
691 /* Release if grant not acquired */
692 if (!(eec & IXGBE_EEC_GNT)) {
693 eec &= ~IXGBE_EEC_REQ;
694 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
695 hw_dbg(hw, "Could not acquire EEPROM grant\n");
697 ixgbe_release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
698 status = IXGBE_ERR_EEPROM;
702 /* Setup EEPROM for Read/Write */
703 if (status == 0) {
704 /* Clear CS and SK */
705 eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
706 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
707 IXGBE_WRITE_FLUSH(hw);
708 udelay(1);
710 return status;
714 * ixgbe_get_eeprom_semaphore - Get hardware semaphore
715 * @hw: pointer to hardware structure
717 * Sets the hardware semaphores so EEPROM access can occur for bit-bang method
719 static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
721 s32 status = IXGBE_ERR_EEPROM;
722 u32 timeout;
723 u32 i;
724 u32 swsm;
726 /* Set timeout value based on size of EEPROM */
727 timeout = hw->eeprom.word_size + 1;
729 /* Get SMBI software semaphore between device drivers first */
730 for (i = 0; i < timeout; i++) {
732 * If the SMBI bit is 0 when we read it, then the bit will be
733 * set and we have the semaphore
735 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
736 if (!(swsm & IXGBE_SWSM_SMBI)) {
737 status = 0;
738 break;
740 msleep(1);
743 /* Now get the semaphore between SW/FW through the SWESMBI bit */
744 if (status == 0) {
745 for (i = 0; i < timeout; i++) {
746 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
748 /* Set the SW EEPROM semaphore bit to request access */
749 swsm |= IXGBE_SWSM_SWESMBI;
750 IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
753 * If we set the bit successfully then we got the
754 * semaphore.
756 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
757 if (swsm & IXGBE_SWSM_SWESMBI)
758 break;
760 udelay(50);
764 * Release semaphores and return error if SW EEPROM semaphore
765 * was not granted because we don't have access to the EEPROM
767 if (i >= timeout) {
768 hw_dbg(hw, "Driver can't access the Eeprom - Semaphore "
769 "not granted.\n");
770 ixgbe_release_eeprom_semaphore(hw);
771 status = IXGBE_ERR_EEPROM;
775 return status;
779 * ixgbe_release_eeprom_semaphore - Release hardware semaphore
780 * @hw: pointer to hardware structure
782 * This function clears hardware semaphore bits.
784 static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw)
786 u32 swsm;
788 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
790 /* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */
791 swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI);
792 IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
793 IXGBE_WRITE_FLUSH(hw);
797 * ixgbe_ready_eeprom - Polls for EEPROM ready
798 * @hw: pointer to hardware structure
800 static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
802 s32 status = 0;
803 u16 i;
804 u8 spi_stat_reg;
807 * Read "Status Register" repeatedly until the LSB is cleared. The
808 * EEPROM will signal that the command has been completed by clearing
809 * bit 0 of the internal status register. If it's not cleared within
810 * 5 milliseconds, then error out.
812 for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) {
813 ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI,
814 IXGBE_EEPROM_OPCODE_BITS);
815 spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8);
816 if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI))
817 break;
819 udelay(5);
820 ixgbe_standby_eeprom(hw);
824 * On some parts, SPI write time could vary from 0-20mSec on 3.3V
825 * devices (and only 0-5mSec on 5V devices)
827 if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) {
828 hw_dbg(hw, "SPI EEPROM Status error\n");
829 status = IXGBE_ERR_EEPROM;
832 return status;
836 * ixgbe_standby_eeprom - Returns EEPROM to a "standby" state
837 * @hw: pointer to hardware structure
839 static void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
841 u32 eec;
843 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
845 /* Toggle CS to flush commands */
846 eec |= IXGBE_EEC_CS;
847 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
848 IXGBE_WRITE_FLUSH(hw);
849 udelay(1);
850 eec &= ~IXGBE_EEC_CS;
851 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
852 IXGBE_WRITE_FLUSH(hw);
853 udelay(1);
857 * ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM.
858 * @hw: pointer to hardware structure
859 * @data: data to send to the EEPROM
860 * @count: number of bits to shift out
862 static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
863 u16 count)
865 u32 eec;
866 u32 mask;
867 u32 i;
869 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
872 * Mask is used to shift "count" bits of "data" out to the EEPROM
873 * one bit at a time. Determine the starting bit based on count
875 mask = 0x01 << (count - 1);
877 for (i = 0; i < count; i++) {
879 * A "1" is shifted out to the EEPROM by setting bit "DI" to a
880 * "1", and then raising and then lowering the clock (the SK
881 * bit controls the clock input to the EEPROM). A "0" is
882 * shifted out to the EEPROM by setting "DI" to "0" and then
883 * raising and then lowering the clock.
885 if (data & mask)
886 eec |= IXGBE_EEC_DI;
887 else
888 eec &= ~IXGBE_EEC_DI;
890 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
891 IXGBE_WRITE_FLUSH(hw);
893 udelay(1);
895 ixgbe_raise_eeprom_clk(hw, &eec);
896 ixgbe_lower_eeprom_clk(hw, &eec);
899 * Shift mask to signify next bit of data to shift in to the
900 * EEPROM
902 mask = mask >> 1;
905 /* We leave the "DI" bit set to "0" when we leave this routine. */
906 eec &= ~IXGBE_EEC_DI;
907 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
908 IXGBE_WRITE_FLUSH(hw);
912 * ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM
913 * @hw: pointer to hardware structure
915 static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count)
917 u32 eec;
918 u32 i;
919 u16 data = 0;
922 * In order to read a register from the EEPROM, we need to shift
923 * 'count' bits in from the EEPROM. Bits are "shifted in" by raising
924 * the clock input to the EEPROM (setting the SK bit), and then reading
925 * the value of the "DO" bit. During this "shifting in" process the
926 * "DI" bit should always be clear.
928 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
930 eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI);
932 for (i = 0; i < count; i++) {
933 data = data << 1;
934 ixgbe_raise_eeprom_clk(hw, &eec);
936 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
938 eec &= ~(IXGBE_EEC_DI);
939 if (eec & IXGBE_EEC_DO)
940 data |= 1;
942 ixgbe_lower_eeprom_clk(hw, &eec);
945 return data;
949 * ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input.
950 * @hw: pointer to hardware structure
951 * @eec: EEC register's current value
953 static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
956 * Raise the clock input to the EEPROM
957 * (setting the SK bit), then delay
959 *eec = *eec | IXGBE_EEC_SK;
960 IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
961 IXGBE_WRITE_FLUSH(hw);
962 udelay(1);
966 * ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input.
967 * @hw: pointer to hardware structure
968 * @eecd: EECD's current value
970 static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
973 * Lower the clock input to the EEPROM (clearing the SK bit), then
974 * delay
976 *eec = *eec & ~IXGBE_EEC_SK;
977 IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
978 IXGBE_WRITE_FLUSH(hw);
979 udelay(1);
983 * ixgbe_release_eeprom - Release EEPROM, release semaphores
984 * @hw: pointer to hardware structure
986 static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
988 u32 eec;
990 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
992 eec |= IXGBE_EEC_CS; /* Pull CS high */
993 eec &= ~IXGBE_EEC_SK; /* Lower SCK */
995 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
996 IXGBE_WRITE_FLUSH(hw);
998 udelay(1);
1000 /* Stop requesting EEPROM access */
1001 eec &= ~IXGBE_EEC_REQ;
1002 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1004 ixgbe_release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1008 * ixgbe_calc_eeprom_checksum - Calculates and returns the checksum
1009 * @hw: pointer to hardware structure
1011 static u16 ixgbe_calc_eeprom_checksum(struct ixgbe_hw *hw)
1013 u16 i;
1014 u16 j;
1015 u16 checksum = 0;
1016 u16 length = 0;
1017 u16 pointer = 0;
1018 u16 word = 0;
1020 /* Include 0x0-0x3F in the checksum */
1021 for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
1022 if (hw->eeprom.ops.read(hw, i, &word) != 0) {
1023 hw_dbg(hw, "EEPROM read failed\n");
1024 break;
1026 checksum += word;
1029 /* Include all data from pointers except for the fw pointer */
1030 for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
1031 hw->eeprom.ops.read(hw, i, &pointer);
1033 /* Make sure the pointer seems valid */
1034 if (pointer != 0xFFFF && pointer != 0) {
1035 hw->eeprom.ops.read(hw, pointer, &length);
1037 if (length != 0xFFFF && length != 0) {
1038 for (j = pointer+1; j <= pointer+length; j++) {
1039 hw->eeprom.ops.read(hw, j, &word);
1040 checksum += word;
1046 checksum = (u16)IXGBE_EEPROM_SUM - checksum;
1048 return checksum;
1052 * ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum
1053 * @hw: pointer to hardware structure
1054 * @checksum_val: calculated checksum
1056 * Performs checksum calculation and validates the EEPROM checksum. If the
1057 * caller does not need checksum_val, the value can be NULL.
1059 s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
1060 u16 *checksum_val)
1062 s32 status;
1063 u16 checksum;
1064 u16 read_checksum = 0;
1067 * Read the first word from the EEPROM. If this times out or fails, do
1068 * not continue or we could be in for a very long wait while every
1069 * EEPROM read fails
1071 status = hw->eeprom.ops.read(hw, 0, &checksum);
1073 if (status == 0) {
1074 checksum = ixgbe_calc_eeprom_checksum(hw);
1076 hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
1079 * Verify read checksum from EEPROM is the same as
1080 * calculated checksum
1082 if (read_checksum != checksum)
1083 status = IXGBE_ERR_EEPROM_CHECKSUM;
1085 /* If the user cares, return the calculated checksum */
1086 if (checksum_val)
1087 *checksum_val = checksum;
1088 } else {
1089 hw_dbg(hw, "EEPROM read failed\n");
1092 return status;
1096 * ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum
1097 * @hw: pointer to hardware structure
1099 s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
1101 s32 status;
1102 u16 checksum;
1105 * Read the first word from the EEPROM. If this times out or fails, do
1106 * not continue or we could be in for a very long wait while every
1107 * EEPROM read fails
1109 status = hw->eeprom.ops.read(hw, 0, &checksum);
1111 if (status == 0) {
1112 checksum = ixgbe_calc_eeprom_checksum(hw);
1113 status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM,
1114 checksum);
1115 } else {
1116 hw_dbg(hw, "EEPROM read failed\n");
1119 return status;
1123 * ixgbe_validate_mac_addr - Validate MAC address
1124 * @mac_addr: pointer to MAC address.
1126 * Tests a MAC address to ensure it is a valid Individual Address
1128 s32 ixgbe_validate_mac_addr(u8 *mac_addr)
1130 s32 status = 0;
1132 /* Make sure it is not a multicast address */
1133 if (IXGBE_IS_MULTICAST(mac_addr))
1134 status = IXGBE_ERR_INVALID_MAC_ADDR;
1135 /* Not a broadcast address */
1136 else if (IXGBE_IS_BROADCAST(mac_addr))
1137 status = IXGBE_ERR_INVALID_MAC_ADDR;
1138 /* Reject the zero address */
1139 else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
1140 mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0)
1141 status = IXGBE_ERR_INVALID_MAC_ADDR;
1143 return status;
1147 * ixgbe_set_rar_generic - Set Rx address register
1148 * @hw: pointer to hardware structure
1149 * @index: Receive address register to write
1150 * @addr: Address to put into receive address register
1151 * @vmdq: VMDq "set" or "pool" index
1152 * @enable_addr: set flag that address is active
1154 * Puts an ethernet address into a receive address register.
1156 s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
1157 u32 enable_addr)
1159 u32 rar_low, rar_high;
1160 u32 rar_entries = hw->mac.num_rar_entries;
1162 /* setup VMDq pool selection before this RAR gets enabled */
1163 hw->mac.ops.set_vmdq(hw, index, vmdq);
1165 /* Make sure we are using a valid rar index range */
1166 if (index < rar_entries) {
1168 * HW expects these in little endian so we reverse the byte
1169 * order from network order (big endian) to little endian
1171 rar_low = ((u32)addr[0] |
1172 ((u32)addr[1] << 8) |
1173 ((u32)addr[2] << 16) |
1174 ((u32)addr[3] << 24));
1176 * Some parts put the VMDq setting in the extra RAH bits,
1177 * so save everything except the lower 16 bits that hold part
1178 * of the address and the address valid bit.
1180 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
1181 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
1182 rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
1184 if (enable_addr != 0)
1185 rar_high |= IXGBE_RAH_AV;
1187 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
1188 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1189 } else {
1190 hw_dbg(hw, "RAR index %d is out of range.\n", index);
1191 return IXGBE_ERR_RAR_INDEX;
1194 return 0;
1198 * ixgbe_clear_rar_generic - Remove Rx address register
1199 * @hw: pointer to hardware structure
1200 * @index: Receive address register to write
1202 * Clears an ethernet address from a receive address register.
1204 s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
1206 u32 rar_high;
1207 u32 rar_entries = hw->mac.num_rar_entries;
1209 /* Make sure we are using a valid rar index range */
1210 if (index < rar_entries) {
1212 * Some parts put the VMDq setting in the extra RAH bits,
1213 * so save everything except the lower 16 bits that hold part
1214 * of the address and the address valid bit.
1216 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
1217 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
1219 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
1220 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1221 } else {
1222 hw_dbg(hw, "RAR index %d is out of range.\n", index);
1223 return IXGBE_ERR_RAR_INDEX;
1226 /* clear VMDq pool/queue selection for this RAR */
1227 hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
1229 return 0;
1233 * ixgbe_enable_rar - Enable Rx address register
1234 * @hw: pointer to hardware structure
1235 * @index: index into the RAR table
1237 * Enables the select receive address register.
1239 static void ixgbe_enable_rar(struct ixgbe_hw *hw, u32 index)
1241 u32 rar_high;
1243 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
1244 rar_high |= IXGBE_RAH_AV;
1245 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1249 * ixgbe_disable_rar - Disable Rx address register
1250 * @hw: pointer to hardware structure
1251 * @index: index into the RAR table
1253 * Disables the select receive address register.
1255 static void ixgbe_disable_rar(struct ixgbe_hw *hw, u32 index)
1257 u32 rar_high;
1259 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
1260 rar_high &= (~IXGBE_RAH_AV);
1261 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1265 * ixgbe_init_rx_addrs_generic - Initializes receive address filters.
1266 * @hw: pointer to hardware structure
1268 * Places the MAC address in receive address register 0 and clears the rest
1269 * of the receive address registers. Clears the multicast table. Assumes
1270 * the receiver is in reset when the routine is called.
1272 s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
1274 u32 i;
1275 u32 rar_entries = hw->mac.num_rar_entries;
1278 * If the current mac address is valid, assume it is a software override
1279 * to the permanent address.
1280 * Otherwise, use the permanent address from the eeprom.
1282 if (ixgbe_validate_mac_addr(hw->mac.addr) ==
1283 IXGBE_ERR_INVALID_MAC_ADDR) {
1284 /* Get the MAC address from the RAR0 for later reference */
1285 hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
1287 hw_dbg(hw, " Keeping Current RAR0 Addr =%pM\n", hw->mac.addr);
1288 } else {
1289 /* Setup the receive address. */
1290 hw_dbg(hw, "Overriding MAC Address in RAR[0]\n");
1291 hw_dbg(hw, " New MAC Addr =%pM\n", hw->mac.addr);
1293 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1295 hw->addr_ctrl.overflow_promisc = 0;
1297 hw->addr_ctrl.rar_used_count = 1;
1299 /* Zero out the other receive addresses. */
1300 hw_dbg(hw, "Clearing RAR[1-%d]\n", rar_entries - 1);
1301 for (i = 1; i < rar_entries; i++) {
1302 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
1303 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
1306 /* Clear the MTA */
1307 hw->addr_ctrl.mc_addr_in_rar_count = 0;
1308 hw->addr_ctrl.mta_in_use = 0;
1309 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
1311 hw_dbg(hw, " Clearing MTA\n");
1312 for (i = 0; i < hw->mac.mcft_size; i++)
1313 IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
1315 if (hw->mac.ops.init_uta_tables)
1316 hw->mac.ops.init_uta_tables(hw);
1318 return 0;
1322 * ixgbe_add_uc_addr - Adds a secondary unicast address.
1323 * @hw: pointer to hardware structure
1324 * @addr: new address
1326 * Adds it to unused receive address register or goes into promiscuous mode.
1328 static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
1330 u32 rar_entries = hw->mac.num_rar_entries;
1331 u32 rar;
1333 hw_dbg(hw, " UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n",
1334 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
1337 * Place this address in the RAR if there is room,
1338 * else put the controller into promiscuous mode
1340 if (hw->addr_ctrl.rar_used_count < rar_entries) {
1341 rar = hw->addr_ctrl.rar_used_count -
1342 hw->addr_ctrl.mc_addr_in_rar_count;
1343 hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
1344 hw_dbg(hw, "Added a secondary address to RAR[%d]\n", rar);
1345 hw->addr_ctrl.rar_used_count++;
1346 } else {
1347 hw->addr_ctrl.overflow_promisc++;
1350 hw_dbg(hw, "ixgbe_add_uc_addr Complete\n");
1354 * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
1355 * @hw: pointer to hardware structure
1356 * @netdev: pointer to net device structure
1358 * The given list replaces any existing list. Clears the secondary addrs from
1359 * receive address registers. Uses unused receive address registers for the
1360 * first secondary addresses, and falls back to promiscuous mode as needed.
1362 * Drivers using secondary unicast addresses must set user_set_promisc when
1363 * manually putting the device into promiscuous mode.
1365 s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw,
1366 struct net_device *netdev)
1368 u32 i;
1369 u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc;
1370 u32 uc_addr_in_use;
1371 u32 fctrl;
1372 struct netdev_hw_addr *ha;
1375 * Clear accounting of old secondary address list,
1376 * don't count RAR[0]
1378 uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1;
1379 hw->addr_ctrl.rar_used_count -= uc_addr_in_use;
1380 hw->addr_ctrl.overflow_promisc = 0;
1382 /* Zero out the other receive addresses */
1383 hw_dbg(hw, "Clearing RAR[1-%d]\n", uc_addr_in_use + 1);
1384 for (i = 0; i < uc_addr_in_use; i++) {
1385 IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0);
1386 IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0);
1389 /* Add the new addresses */
1390 netdev_for_each_uc_addr(ha, netdev) {
1391 hw_dbg(hw, " Adding the secondary addresses:\n");
1392 ixgbe_add_uc_addr(hw, ha->addr, 0);
1395 if (hw->addr_ctrl.overflow_promisc) {
1396 /* enable promisc if not already in overflow or set by user */
1397 if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
1398 hw_dbg(hw, " Entering address overflow promisc mode\n");
1399 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1400 fctrl |= IXGBE_FCTRL_UPE;
1401 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
1402 hw->addr_ctrl.uc_set_promisc = true;
1404 } else {
1405 /* only disable if set by overflow, not by user */
1406 if ((old_promisc_setting && hw->addr_ctrl.uc_set_promisc) &&
1407 !(hw->addr_ctrl.user_set_promisc)) {
1408 hw_dbg(hw, " Leaving address overflow promisc mode\n");
1409 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1410 fctrl &= ~IXGBE_FCTRL_UPE;
1411 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
1412 hw->addr_ctrl.uc_set_promisc = false;
1416 hw_dbg(hw, "ixgbe_update_uc_addr_list_generic Complete\n");
1417 return 0;
1421 * ixgbe_mta_vector - Determines bit-vector in multicast table to set
1422 * @hw: pointer to hardware structure
1423 * @mc_addr: the multicast address
1425 * Extracts the 12 bits, from a multicast address, to determine which
1426 * bit-vector to set in the multicast table. The hardware uses 12 bits, from
1427 * incoming rx multicast addresses, to determine the bit-vector to check in
1428 * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
1429 * by the MO field of the MCSTCTRL. The MO field is set during initialization
1430 * to mc_filter_type.
1432 static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
1434 u32 vector = 0;
1436 switch (hw->mac.mc_filter_type) {
1437 case 0: /* use bits [47:36] of the address */
1438 vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
1439 break;
1440 case 1: /* use bits [46:35] of the address */
1441 vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
1442 break;
1443 case 2: /* use bits [45:34] of the address */
1444 vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
1445 break;
1446 case 3: /* use bits [43:32] of the address */
1447 vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
1448 break;
1449 default: /* Invalid mc_filter_type */
1450 hw_dbg(hw, "MC filter type param set incorrectly\n");
1451 break;
1454 /* vector can only be 12-bits or boundary will be exceeded */
1455 vector &= 0xFFF;
1456 return vector;
1460 * ixgbe_set_mta - Set bit-vector in multicast table
1461 * @hw: pointer to hardware structure
1462 * @hash_value: Multicast address hash value
1464 * Sets the bit-vector in the multicast table.
1466 static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
1468 u32 vector;
1469 u32 vector_bit;
1470 u32 vector_reg;
1471 u32 mta_reg;
1473 hw->addr_ctrl.mta_in_use++;
1475 vector = ixgbe_mta_vector(hw, mc_addr);
1476 hw_dbg(hw, " bit-vector = 0x%03X\n", vector);
1479 * The MTA is a register array of 128 32-bit registers. It is treated
1480 * like an array of 4096 bits. We want to set bit
1481 * BitArray[vector_value]. So we figure out what register the bit is
1482 * in, read it, OR in the new bit, then write back the new value. The
1483 * register is determined by the upper 7 bits of the vector value and
1484 * the bit within that register are determined by the lower 5 bits of
1485 * the value.
1487 vector_reg = (vector >> 5) & 0x7F;
1488 vector_bit = vector & 0x1F;
1489 mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg));
1490 mta_reg |= (1 << vector_bit);
1491 IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
1495 * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses
1496 * @hw: pointer to hardware structure
1497 * @netdev: pointer to net device structure
1499 * The given list replaces any existing list. Clears the MC addrs from receive
1500 * address registers and the multicast table. Uses unused receive address
1501 * registers for the first multicast addresses, and hashes the rest into the
1502 * multicast table.
1504 s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
1505 struct net_device *netdev)
1507 struct netdev_hw_addr *ha;
1508 u32 i;
1511 * Set the new number of MC addresses that we are being requested to
1512 * use.
1514 hw->addr_ctrl.num_mc_addrs = netdev_mc_count(netdev);
1515 hw->addr_ctrl.mta_in_use = 0;
1517 /* Clear the MTA */
1518 hw_dbg(hw, " Clearing MTA\n");
1519 for (i = 0; i < hw->mac.mcft_size; i++)
1520 IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
1522 /* Add the new addresses */
1523 netdev_for_each_mc_addr(ha, netdev) {
1524 hw_dbg(hw, " Adding the multicast addresses:\n");
1525 ixgbe_set_mta(hw, ha->addr);
1528 /* Enable mta */
1529 if (hw->addr_ctrl.mta_in_use > 0)
1530 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
1531 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
1533 hw_dbg(hw, "ixgbe_update_mc_addr_list_generic Complete\n");
1534 return 0;
1538 * ixgbe_enable_mc_generic - Enable multicast address in RAR
1539 * @hw: pointer to hardware structure
1541 * Enables multicast address in RAR and the use of the multicast hash table.
1543 s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
1545 u32 i;
1546 u32 rar_entries = hw->mac.num_rar_entries;
1547 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
1549 if (a->mc_addr_in_rar_count > 0)
1550 for (i = (rar_entries - a->mc_addr_in_rar_count);
1551 i < rar_entries; i++)
1552 ixgbe_enable_rar(hw, i);
1554 if (a->mta_in_use > 0)
1555 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
1556 hw->mac.mc_filter_type);
1558 return 0;
1562 * ixgbe_disable_mc_generic - Disable multicast address in RAR
1563 * @hw: pointer to hardware structure
1565 * Disables multicast address in RAR and the use of the multicast hash table.
1567 s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
1569 u32 i;
1570 u32 rar_entries = hw->mac.num_rar_entries;
1571 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
1573 if (a->mc_addr_in_rar_count > 0)
1574 for (i = (rar_entries - a->mc_addr_in_rar_count);
1575 i < rar_entries; i++)
1576 ixgbe_disable_rar(hw, i);
1578 if (a->mta_in_use > 0)
1579 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
1581 return 0;
1585 * ixgbe_fc_enable_generic - Enable flow control
1586 * @hw: pointer to hardware structure
1587 * @packetbuf_num: packet buffer number (0-7)
1589 * Enable flow control according to the current settings.
1591 s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
1593 s32 ret_val = 0;
1594 u32 mflcn_reg, fccfg_reg;
1595 u32 reg;
1596 u32 rx_pba_size;
1598 #ifdef CONFIG_DCB
1599 if (hw->fc.requested_mode == ixgbe_fc_pfc)
1600 goto out;
1602 #endif /* CONFIG_DCB */
1603 /* Negotiate the fc mode to use */
1604 ret_val = ixgbe_fc_autoneg(hw);
1605 if (ret_val)
1606 goto out;
1608 /* Disable any previous flow control settings */
1609 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
1610 mflcn_reg &= ~(IXGBE_MFLCN_RFCE | IXGBE_MFLCN_RPFCE);
1612 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
1613 fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
1616 * The possible values of fc.current_mode are:
1617 * 0: Flow control is completely disabled
1618 * 1: Rx flow control is enabled (we can receive pause frames,
1619 * but not send pause frames).
1620 * 2: Tx flow control is enabled (we can send pause frames but
1621 * we do not support receiving pause frames).
1622 * 3: Both Rx and Tx flow control (symmetric) are enabled.
1623 * 4: Priority Flow Control is enabled.
1624 * other: Invalid.
1626 switch (hw->fc.current_mode) {
1627 case ixgbe_fc_none:
1629 * Flow control is disabled by software override or autoneg.
1630 * The code below will actually disable it in the HW.
1632 break;
1633 case ixgbe_fc_rx_pause:
1635 * Rx Flow control is enabled and Tx Flow control is
1636 * disabled by software override. Since there really
1637 * isn't a way to advertise that we are capable of RX
1638 * Pause ONLY, we will advertise that we support both
1639 * symmetric and asymmetric Rx PAUSE. Later, we will
1640 * disable the adapter's ability to send PAUSE frames.
1642 mflcn_reg |= IXGBE_MFLCN_RFCE;
1643 break;
1644 case ixgbe_fc_tx_pause:
1646 * Tx Flow control is enabled, and Rx Flow control is
1647 * disabled by software override.
1649 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
1650 break;
1651 case ixgbe_fc_full:
1652 /* Flow control (both Rx and Tx) is enabled by SW override. */
1653 mflcn_reg |= IXGBE_MFLCN_RFCE;
1654 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
1655 break;
1656 #ifdef CONFIG_DCB
1657 case ixgbe_fc_pfc:
1658 goto out;
1659 break;
1660 #endif /* CONFIG_DCB */
1661 default:
1662 hw_dbg(hw, "Flow control param set incorrectly\n");
1663 ret_val = IXGBE_ERR_CONFIG;
1664 goto out;
1665 break;
1668 /* Set 802.3x based flow control settings. */
1669 mflcn_reg |= IXGBE_MFLCN_DPF;
1670 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
1671 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
1673 reg = IXGBE_READ_REG(hw, IXGBE_MTQC);
1674 /* Thresholds are different for link flow control when in DCB mode */
1675 if (reg & IXGBE_MTQC_RT_ENA) {
1676 rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num));
1678 /* Always disable XON for LFC when in DCB mode */
1679 reg = (rx_pba_size >> 5) & 0xFFE0;
1680 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(packetbuf_num), reg);
1682 reg = (rx_pba_size >> 2) & 0xFFE0;
1683 if (hw->fc.current_mode & ixgbe_fc_tx_pause)
1684 reg |= IXGBE_FCRTH_FCEN;
1685 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num), reg);
1686 } else {
1688 * Set up and enable Rx high/low water mark thresholds,
1689 * enable XON.
1691 if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
1692 if (hw->fc.send_xon) {
1693 IXGBE_WRITE_REG(hw,
1694 IXGBE_FCRTL_82599(packetbuf_num),
1695 (hw->fc.low_water |
1696 IXGBE_FCRTL_XONE));
1697 } else {
1698 IXGBE_WRITE_REG(hw,
1699 IXGBE_FCRTL_82599(packetbuf_num),
1700 hw->fc.low_water);
1703 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num),
1704 (hw->fc.high_water | IXGBE_FCRTH_FCEN));
1708 /* Configure pause time (2 TCs per register) */
1709 reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2));
1710 if ((packetbuf_num & 1) == 0)
1711 reg = (reg & 0xFFFF0000) | hw->fc.pause_time;
1712 else
1713 reg = (reg & 0x0000FFFF) | (hw->fc.pause_time << 16);
1714 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(packetbuf_num / 2), reg);
1716 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1));
1718 out:
1719 return ret_val;
1723 * ixgbe_fc_autoneg - Configure flow control
1724 * @hw: pointer to hardware structure
1726 * Compares our advertised flow control capabilities to those advertised by
1727 * our link partner, and determines the proper flow control mode to use.
1729 s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw)
1731 s32 ret_val = 0;
1732 ixgbe_link_speed speed;
1733 u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
1734 u32 links2, anlp1_reg, autoc_reg, links;
1735 bool link_up;
1738 * AN should have completed when the cable was plugged in.
1739 * Look for reasons to bail out. Bail out if:
1740 * - FC autoneg is disabled, or if
1741 * - link is not up.
1743 * Since we're being called from an LSC, link is already known to be up.
1744 * So use link_up_wait_to_complete=false.
1746 hw->mac.ops.check_link(hw, &speed, &link_up, false);
1748 if (hw->fc.disable_fc_autoneg || (!link_up)) {
1749 hw->fc.fc_was_autonegged = false;
1750 hw->fc.current_mode = hw->fc.requested_mode;
1751 goto out;
1755 * On backplane, bail out if
1756 * - backplane autoneg was not completed, or if
1757 * - we are 82599 and link partner is not AN enabled
1759 if (hw->phy.media_type == ixgbe_media_type_backplane) {
1760 links = IXGBE_READ_REG(hw, IXGBE_LINKS);
1761 if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) {
1762 hw->fc.fc_was_autonegged = false;
1763 hw->fc.current_mode = hw->fc.requested_mode;
1764 goto out;
1767 if (hw->mac.type == ixgbe_mac_82599EB) {
1768 links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
1769 if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) {
1770 hw->fc.fc_was_autonegged = false;
1771 hw->fc.current_mode = hw->fc.requested_mode;
1772 goto out;
1778 * On multispeed fiber at 1g, bail out if
1779 * - link is up but AN did not complete, or if
1780 * - link is up and AN completed but timed out
1782 if (hw->phy.multispeed_fiber && (speed == IXGBE_LINK_SPEED_1GB_FULL)) {
1783 linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
1784 if (((linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
1785 ((linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
1786 hw->fc.fc_was_autonegged = false;
1787 hw->fc.current_mode = hw->fc.requested_mode;
1788 goto out;
1793 * Bail out on
1794 * - copper or CX4 adapters
1795 * - fiber adapters running at 10gig
1797 if ((hw->phy.media_type == ixgbe_media_type_copper) ||
1798 (hw->phy.media_type == ixgbe_media_type_cx4) ||
1799 ((hw->phy.media_type == ixgbe_media_type_fiber) &&
1800 (speed == IXGBE_LINK_SPEED_10GB_FULL))) {
1801 hw->fc.fc_was_autonegged = false;
1802 hw->fc.current_mode = hw->fc.requested_mode;
1803 goto out;
1807 * Read the AN advertisement and LP ability registers and resolve
1808 * local flow control settings accordingly
1810 if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
1811 (hw->phy.media_type != ixgbe_media_type_backplane)) {
1812 pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
1813 pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
1814 if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1815 (pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE)) {
1817 * Now we need to check if the user selected Rx ONLY
1818 * of pause frames. In this case, we had to advertise
1819 * FULL flow control because we could not advertise RX
1820 * ONLY. Hence, we must now check to see if we need to
1821 * turn OFF the TRANSMISSION of PAUSE frames.
1823 if (hw->fc.requested_mode == ixgbe_fc_full) {
1824 hw->fc.current_mode = ixgbe_fc_full;
1825 hw_dbg(hw, "Flow Control = FULL.\n");
1826 } else {
1827 hw->fc.current_mode = ixgbe_fc_rx_pause;
1828 hw_dbg(hw, "Flow Control=RX PAUSE only\n");
1830 } else if (!(pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1831 (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) &&
1832 (pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1833 (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) {
1834 hw->fc.current_mode = ixgbe_fc_tx_pause;
1835 hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
1836 } else if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1837 (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) &&
1838 !(pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1839 (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) {
1840 hw->fc.current_mode = ixgbe_fc_rx_pause;
1841 hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
1842 } else {
1843 hw->fc.current_mode = ixgbe_fc_none;
1844 hw_dbg(hw, "Flow Control = NONE.\n");
1848 if (hw->phy.media_type == ixgbe_media_type_backplane) {
1850 * Read the 10g AN autoc and LP ability registers and resolve
1851 * local flow control settings accordingly
1853 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1854 anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
1856 if ((autoc_reg & IXGBE_AUTOC_SYM_PAUSE) &&
1857 (anlp1_reg & IXGBE_ANLP1_SYM_PAUSE)) {
1859 * Now we need to check if the user selected Rx ONLY
1860 * of pause frames. In this case, we had to advertise
1861 * FULL flow control because we could not advertise RX
1862 * ONLY. Hence, we must now check to see if we need to
1863 * turn OFF the TRANSMISSION of PAUSE frames.
1865 if (hw->fc.requested_mode == ixgbe_fc_full) {
1866 hw->fc.current_mode = ixgbe_fc_full;
1867 hw_dbg(hw, "Flow Control = FULL.\n");
1868 } else {
1869 hw->fc.current_mode = ixgbe_fc_rx_pause;
1870 hw_dbg(hw, "Flow Control=RX PAUSE only\n");
1872 } else if (!(autoc_reg & IXGBE_AUTOC_SYM_PAUSE) &&
1873 (autoc_reg & IXGBE_AUTOC_ASM_PAUSE) &&
1874 (anlp1_reg & IXGBE_ANLP1_SYM_PAUSE) &&
1875 (anlp1_reg & IXGBE_ANLP1_ASM_PAUSE)) {
1876 hw->fc.current_mode = ixgbe_fc_tx_pause;
1877 hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
1878 } else if ((autoc_reg & IXGBE_AUTOC_SYM_PAUSE) &&
1879 (autoc_reg & IXGBE_AUTOC_ASM_PAUSE) &&
1880 !(anlp1_reg & IXGBE_ANLP1_SYM_PAUSE) &&
1881 (anlp1_reg & IXGBE_ANLP1_ASM_PAUSE)) {
1882 hw->fc.current_mode = ixgbe_fc_rx_pause;
1883 hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
1884 } else {
1885 hw->fc.current_mode = ixgbe_fc_none;
1886 hw_dbg(hw, "Flow Control = NONE.\n");
1889 /* Record that current_mode is the result of a successful autoneg */
1890 hw->fc.fc_was_autonegged = true;
1892 out:
1893 return ret_val;
1897 * ixgbe_setup_fc - Set up flow control
1898 * @hw: pointer to hardware structure
1900 * Called at init time to set up flow control.
1902 static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
1904 s32 ret_val = 0;
1905 u32 reg;
1907 #ifdef CONFIG_DCB
1908 if (hw->fc.requested_mode == ixgbe_fc_pfc) {
1909 hw->fc.current_mode = hw->fc.requested_mode;
1910 goto out;
1913 #endif
1914 /* Validate the packetbuf configuration */
1915 if (packetbuf_num < 0 || packetbuf_num > 7) {
1916 hw_dbg(hw, "Invalid packet buffer number [%d], expected range "
1917 "is 0-7\n", packetbuf_num);
1918 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
1919 goto out;
1923 * Validate the water mark configuration. Zero water marks are invalid
1924 * because it causes the controller to just blast out fc packets.
1926 if (!hw->fc.low_water || !hw->fc.high_water || !hw->fc.pause_time) {
1927 hw_dbg(hw, "Invalid water mark configuration\n");
1928 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
1929 goto out;
1933 * Validate the requested mode. Strict IEEE mode does not allow
1934 * ixgbe_fc_rx_pause because it will cause us to fail at UNH.
1936 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
1937 hw_dbg(hw, "ixgbe_fc_rx_pause not valid in strict "
1938 "IEEE mode\n");
1939 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
1940 goto out;
1944 * 10gig parts do not have a word in the EEPROM to determine the
1945 * default flow control setting, so we explicitly set it to full.
1947 if (hw->fc.requested_mode == ixgbe_fc_default)
1948 hw->fc.requested_mode = ixgbe_fc_full;
1951 * Set up the 1G flow control advertisement registers so the HW will be
1952 * able to do fc autoneg once the cable is plugged in. If we end up
1953 * using 10g instead, this is harmless.
1955 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
1958 * The possible values of fc.requested_mode are:
1959 * 0: Flow control is completely disabled
1960 * 1: Rx flow control is enabled (we can receive pause frames,
1961 * but not send pause frames).
1962 * 2: Tx flow control is enabled (we can send pause frames but
1963 * we do not support receiving pause frames).
1964 * 3: Both Rx and Tx flow control (symmetric) are enabled.
1965 #ifdef CONFIG_DCB
1966 * 4: Priority Flow Control is enabled.
1967 #endif
1968 * other: Invalid.
1970 switch (hw->fc.requested_mode) {
1971 case ixgbe_fc_none:
1972 /* Flow control completely disabled by software override. */
1973 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
1974 break;
1975 case ixgbe_fc_rx_pause:
1977 * Rx Flow control is enabled and Tx Flow control is
1978 * disabled by software override. Since there really
1979 * isn't a way to advertise that we are capable of RX
1980 * Pause ONLY, we will advertise that we support both
1981 * symmetric and asymmetric Rx PAUSE. Later, we will
1982 * disable the adapter's ability to send PAUSE frames.
1984 reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
1985 break;
1986 case ixgbe_fc_tx_pause:
1988 * Tx Flow control is enabled, and Rx Flow control is
1989 * disabled by software override.
1991 reg |= (IXGBE_PCS1GANA_ASM_PAUSE);
1992 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE);
1993 break;
1994 case ixgbe_fc_full:
1995 /* Flow control (both Rx and Tx) is enabled by SW override. */
1996 reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
1997 break;
1998 #ifdef CONFIG_DCB
1999 case ixgbe_fc_pfc:
2000 goto out;
2001 break;
2002 #endif /* CONFIG_DCB */
2003 default:
2004 hw_dbg(hw, "Flow control param set incorrectly\n");
2005 ret_val = IXGBE_ERR_CONFIG;
2006 goto out;
2007 break;
2010 IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
2011 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
2013 /* Disable AN timeout */
2014 if (hw->fc.strict_ieee)
2015 reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
2017 IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
2018 hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg);
2021 * Set up the 10G flow control advertisement registers so the HW
2022 * can do fc autoneg once the cable is plugged in. If we end up
2023 * using 1g instead, this is harmless.
2025 reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2028 * The possible values of fc.requested_mode are:
2029 * 0: Flow control is completely disabled
2030 * 1: Rx flow control is enabled (we can receive pause frames,
2031 * but not send pause frames).
2032 * 2: Tx flow control is enabled (we can send pause frames but
2033 * we do not support receiving pause frames).
2034 * 3: Both Rx and Tx flow control (symmetric) are enabled.
2035 * other: Invalid.
2037 switch (hw->fc.requested_mode) {
2038 case ixgbe_fc_none:
2039 /* Flow control completely disabled by software override. */
2040 reg &= ~(IXGBE_AUTOC_SYM_PAUSE | IXGBE_AUTOC_ASM_PAUSE);
2041 break;
2042 case ixgbe_fc_rx_pause:
2044 * Rx Flow control is enabled and Tx Flow control is
2045 * disabled by software override. Since there really
2046 * isn't a way to advertise that we are capable of RX
2047 * Pause ONLY, we will advertise that we support both
2048 * symmetric and asymmetric Rx PAUSE. Later, we will
2049 * disable the adapter's ability to send PAUSE frames.
2051 reg |= (IXGBE_AUTOC_SYM_PAUSE | IXGBE_AUTOC_ASM_PAUSE);
2052 break;
2053 case ixgbe_fc_tx_pause:
2055 * Tx Flow control is enabled, and Rx Flow control is
2056 * disabled by software override.
2058 reg |= (IXGBE_AUTOC_ASM_PAUSE);
2059 reg &= ~(IXGBE_AUTOC_SYM_PAUSE);
2060 break;
2061 case ixgbe_fc_full:
2062 /* Flow control (both Rx and Tx) is enabled by SW override. */
2063 reg |= (IXGBE_AUTOC_SYM_PAUSE | IXGBE_AUTOC_ASM_PAUSE);
2064 break;
2065 #ifdef CONFIG_DCB
2066 case ixgbe_fc_pfc:
2067 goto out;
2068 break;
2069 #endif /* CONFIG_DCB */
2070 default:
2071 hw_dbg(hw, "Flow control param set incorrectly\n");
2072 ret_val = IXGBE_ERR_CONFIG;
2073 goto out;
2074 break;
2077 * AUTOC restart handles negotiation of 1G and 10G. There is
2078 * no need to set the PCS1GCTL register.
2080 reg |= IXGBE_AUTOC_AN_RESTART;
2081 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg);
2082 hw_dbg(hw, "Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
2084 out:
2085 return ret_val;
2089 * ixgbe_disable_pcie_master - Disable PCI-express master access
2090 * @hw: pointer to hardware structure
2092 * Disables PCI-Express master access and verifies there are no pending
2093 * requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable
2094 * bit hasn't caused the master requests to be disabled, else 0
2095 * is returned signifying master requests disabled.
2097 s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
2099 u32 i;
2100 u32 reg_val;
2101 u32 number_of_queues;
2102 s32 status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
2104 /* Disable the receive unit by stopping each queue */
2105 number_of_queues = hw->mac.max_rx_queues;
2106 for (i = 0; i < number_of_queues; i++) {
2107 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
2108 if (reg_val & IXGBE_RXDCTL_ENABLE) {
2109 reg_val &= ~IXGBE_RXDCTL_ENABLE;
2110 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val);
2114 reg_val = IXGBE_READ_REG(hw, IXGBE_CTRL);
2115 reg_val |= IXGBE_CTRL_GIO_DIS;
2116 IXGBE_WRITE_REG(hw, IXGBE_CTRL, reg_val);
2118 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
2119 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) {
2120 status = 0;
2121 break;
2123 udelay(100);
2126 return status;
2131 * ixgbe_acquire_swfw_sync - Acquire SWFW semaphore
2132 * @hw: pointer to hardware structure
2133 * @mask: Mask to specify which semaphore to acquire
2135 * Acquires the SWFW semaphore thought the GSSR register for the specified
2136 * function (CSR, PHY0, PHY1, EEPROM, Flash)
2138 s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
2140 u32 gssr;
2141 u32 swmask = mask;
2142 u32 fwmask = mask << 5;
2143 s32 timeout = 200;
2145 while (timeout) {
2146 if (ixgbe_get_eeprom_semaphore(hw))
2147 return IXGBE_ERR_SWFW_SYNC;
2149 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
2150 if (!(gssr & (fwmask | swmask)))
2151 break;
2154 * Firmware currently using resource (fwmask) or other software
2155 * thread currently using resource (swmask)
2157 ixgbe_release_eeprom_semaphore(hw);
2158 msleep(5);
2159 timeout--;
2162 if (!timeout) {
2163 hw_dbg(hw, "Driver can't access resource, GSSR timeout.\n");
2164 return IXGBE_ERR_SWFW_SYNC;
2167 gssr |= swmask;
2168 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
2170 ixgbe_release_eeprom_semaphore(hw);
2171 return 0;
2175 * ixgbe_release_swfw_sync - Release SWFW semaphore
2176 * @hw: pointer to hardware structure
2177 * @mask: Mask to specify which semaphore to release
2179 * Releases the SWFW semaphore thought the GSSR register for the specified
2180 * function (CSR, PHY0, PHY1, EEPROM, Flash)
2182 void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask)
2184 u32 gssr;
2185 u32 swmask = mask;
2187 ixgbe_get_eeprom_semaphore(hw);
2189 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
2190 gssr &= ~swmask;
2191 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
2193 ixgbe_release_eeprom_semaphore(hw);
2197 * ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit
2198 * @hw: pointer to hardware structure
2199 * @regval: register value to write to RXCTRL
2201 * Enables the Rx DMA unit
2203 s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
2205 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
2207 return 0;
2211 * ixgbe_blink_led_start_generic - Blink LED based on index.
2212 * @hw: pointer to hardware structure
2213 * @index: led number to blink
2215 s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
2217 ixgbe_link_speed speed = 0;
2218 bool link_up = 0;
2219 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2220 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2223 * Link must be up to auto-blink the LEDs;
2224 * Force it if link is down.
2226 hw->mac.ops.check_link(hw, &speed, &link_up, false);
2228 if (!link_up) {
2229 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
2230 autoc_reg |= IXGBE_AUTOC_FLU;
2231 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
2232 msleep(10);
2235 led_reg &= ~IXGBE_LED_MODE_MASK(index);
2236 led_reg |= IXGBE_LED_BLINK(index);
2237 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
2238 IXGBE_WRITE_FLUSH(hw);
2240 return 0;
2244 * ixgbe_blink_led_stop_generic - Stop blinking LED based on index.
2245 * @hw: pointer to hardware structure
2246 * @index: led number to stop blinking
2248 s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
2250 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2251 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2253 autoc_reg &= ~IXGBE_AUTOC_FLU;
2254 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
2255 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
2257 led_reg &= ~IXGBE_LED_MODE_MASK(index);
2258 led_reg &= ~IXGBE_LED_BLINK(index);
2259 led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
2260 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
2261 IXGBE_WRITE_FLUSH(hw);
2263 return 0;
2267 * ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM
2268 * @hw: pointer to hardware structure
2269 * @san_mac_offset: SAN MAC address offset
2271 * This function will read the EEPROM location for the SAN MAC address
2272 * pointer, and returns the value at that location. This is used in both
2273 * get and set mac_addr routines.
2275 static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
2276 u16 *san_mac_offset)
2279 * First read the EEPROM pointer to see if the MAC addresses are
2280 * available.
2282 hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR, san_mac_offset);
2284 return 0;
2288 * ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM
2289 * @hw: pointer to hardware structure
2290 * @san_mac_addr: SAN MAC address
2292 * Reads the SAN MAC address from the EEPROM, if it's available. This is
2293 * per-port, so set_lan_id() must be called before reading the addresses.
2294 * set_lan_id() is called by identify_sfp(), but this cannot be relied
2295 * upon for non-SFP connections, so we must call it here.
2297 s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
2299 u16 san_mac_data, san_mac_offset;
2300 u8 i;
2303 * First read the EEPROM pointer to see if the MAC addresses are
2304 * available. If they're not, no point in calling set_lan_id() here.
2306 ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
2308 if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) {
2310 * No addresses available in this EEPROM. It's not an
2311 * error though, so just wipe the local address and return.
2313 for (i = 0; i < 6; i++)
2314 san_mac_addr[i] = 0xFF;
2316 goto san_mac_addr_out;
2319 /* make sure we know which port we need to program */
2320 hw->mac.ops.set_lan_id(hw);
2321 /* apply the port offset to the address offset */
2322 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
2323 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
2324 for (i = 0; i < 3; i++) {
2325 hw->eeprom.ops.read(hw, san_mac_offset, &san_mac_data);
2326 san_mac_addr[i * 2] = (u8)(san_mac_data);
2327 san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8);
2328 san_mac_offset++;
2331 san_mac_addr_out:
2332 return 0;
2336 * ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count
2337 * @hw: pointer to hardware structure
2339 * Read PCIe configuration space, and get the MSI-X vector count from
2340 * the capabilities table.
2342 u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
2344 struct ixgbe_adapter *adapter = hw->back;
2345 u16 msix_count;
2346 pci_read_config_word(adapter->pdev, IXGBE_PCIE_MSIX_82599_CAPS,
2347 &msix_count);
2348 msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
2350 /* MSI-X count is zero-based in HW, so increment to give proper value */
2351 msix_count++;
2353 return msix_count;
2357 * ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address
2358 * @hw: pointer to hardware struct
2359 * @rar: receive address register index to disassociate
2360 * @vmdq: VMDq pool index to remove from the rar
2362 s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
2364 u32 mpsar_lo, mpsar_hi;
2365 u32 rar_entries = hw->mac.num_rar_entries;
2367 if (rar < rar_entries) {
2368 mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
2369 mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
2371 if (!mpsar_lo && !mpsar_hi)
2372 goto done;
2374 if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
2375 if (mpsar_lo) {
2376 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
2377 mpsar_lo = 0;
2379 if (mpsar_hi) {
2380 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
2381 mpsar_hi = 0;
2383 } else if (vmdq < 32) {
2384 mpsar_lo &= ~(1 << vmdq);
2385 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
2386 } else {
2387 mpsar_hi &= ~(1 << (vmdq - 32));
2388 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
2391 /* was that the last pool using this rar? */
2392 if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
2393 hw->mac.ops.clear_rar(hw, rar);
2394 } else {
2395 hw_dbg(hw, "RAR index %d is out of range.\n", rar);
2398 done:
2399 return 0;
2403 * ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address
2404 * @hw: pointer to hardware struct
2405 * @rar: receive address register index to associate with a VMDq index
2406 * @vmdq: VMDq pool index
2408 s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
2410 u32 mpsar;
2411 u32 rar_entries = hw->mac.num_rar_entries;
2413 if (rar < rar_entries) {
2414 if (vmdq < 32) {
2415 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
2416 mpsar |= 1 << vmdq;
2417 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
2418 } else {
2419 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
2420 mpsar |= 1 << (vmdq - 32);
2421 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
2423 } else {
2424 hw_dbg(hw, "RAR index %d is out of range.\n", rar);
2426 return 0;
2430 * ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array
2431 * @hw: pointer to hardware structure
2433 s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
2435 int i;
2438 for (i = 0; i < 128; i++)
2439 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
2441 return 0;
2445 * ixgbe_find_vlvf_slot - find the vlanid or the first empty slot
2446 * @hw: pointer to hardware structure
2447 * @vlan: VLAN id to write to VLAN filter
2449 * return the VLVF index where this VLAN id should be placed
2452 s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan)
2454 u32 bits = 0;
2455 u32 first_empty_slot = 0;
2456 s32 regindex;
2458 /* short cut the special case */
2459 if (vlan == 0)
2460 return 0;
2463 * Search for the vlan id in the VLVF entries. Save off the first empty
2464 * slot found along the way
2466 for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) {
2467 bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
2468 if (!bits && !(first_empty_slot))
2469 first_empty_slot = regindex;
2470 else if ((bits & 0x0FFF) == vlan)
2471 break;
2475 * If regindex is less than IXGBE_VLVF_ENTRIES, then we found the vlan
2476 * in the VLVF. Else use the first empty VLVF register for this
2477 * vlan id.
2479 if (regindex >= IXGBE_VLVF_ENTRIES) {
2480 if (first_empty_slot)
2481 regindex = first_empty_slot;
2482 else {
2483 hw_dbg(hw, "No space in VLVF.\n");
2484 regindex = IXGBE_ERR_NO_SPACE;
2488 return regindex;
2492 * ixgbe_set_vfta_generic - Set VLAN filter table
2493 * @hw: pointer to hardware structure
2494 * @vlan: VLAN id to write to VLAN filter
2495 * @vind: VMDq output index that maps queue to VLAN id in VFVFB
2496 * @vlan_on: boolean flag to turn on/off VLAN in VFVF
2498 * Turn on/off specified VLAN in the VLAN filter table.
2500 s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
2501 bool vlan_on)
2503 s32 regindex;
2504 u32 bitindex;
2505 u32 vfta;
2506 u32 bits;
2507 u32 vt;
2508 u32 targetbit;
2509 bool vfta_changed = false;
2511 if (vlan > 4095)
2512 return IXGBE_ERR_PARAM;
2515 * this is a 2 part operation - first the VFTA, then the
2516 * VLVF and VLVFB if VT Mode is set
2517 * We don't write the VFTA until we know the VLVF part succeeded.
2520 /* Part 1
2521 * The VFTA is a bitstring made up of 128 32-bit registers
2522 * that enable the particular VLAN id, much like the MTA:
2523 * bits[11-5]: which register
2524 * bits[4-0]: which bit in the register
2526 regindex = (vlan >> 5) & 0x7F;
2527 bitindex = vlan & 0x1F;
2528 targetbit = (1 << bitindex);
2529 vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
2531 if (vlan_on) {
2532 if (!(vfta & targetbit)) {
2533 vfta |= targetbit;
2534 vfta_changed = true;
2536 } else {
2537 if ((vfta & targetbit)) {
2538 vfta &= ~targetbit;
2539 vfta_changed = true;
2543 /* Part 2
2544 * If VT Mode is set
2545 * Either vlan_on
2546 * make sure the vlan is in VLVF
2547 * set the vind bit in the matching VLVFB
2548 * Or !vlan_on
2549 * clear the pool bit and possibly the vind
2551 vt = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
2552 if (vt & IXGBE_VT_CTL_VT_ENABLE) {
2553 s32 vlvf_index;
2555 vlvf_index = ixgbe_find_vlvf_slot(hw, vlan);
2556 if (vlvf_index < 0)
2557 return vlvf_index;
2559 if (vlan_on) {
2560 /* set the pool bit */
2561 if (vind < 32) {
2562 bits = IXGBE_READ_REG(hw,
2563 IXGBE_VLVFB(vlvf_index*2));
2564 bits |= (1 << vind);
2565 IXGBE_WRITE_REG(hw,
2566 IXGBE_VLVFB(vlvf_index*2),
2567 bits);
2568 } else {
2569 bits = IXGBE_READ_REG(hw,
2570 IXGBE_VLVFB((vlvf_index*2)+1));
2571 bits |= (1 << (vind-32));
2572 IXGBE_WRITE_REG(hw,
2573 IXGBE_VLVFB((vlvf_index*2)+1),
2574 bits);
2576 } else {
2577 /* clear the pool bit */
2578 if (vind < 32) {
2579 bits = IXGBE_READ_REG(hw,
2580 IXGBE_VLVFB(vlvf_index*2));
2581 bits &= ~(1 << vind);
2582 IXGBE_WRITE_REG(hw,
2583 IXGBE_VLVFB(vlvf_index*2),
2584 bits);
2585 bits |= IXGBE_READ_REG(hw,
2586 IXGBE_VLVFB((vlvf_index*2)+1));
2587 } else {
2588 bits = IXGBE_READ_REG(hw,
2589 IXGBE_VLVFB((vlvf_index*2)+1));
2590 bits &= ~(1 << (vind-32));
2591 IXGBE_WRITE_REG(hw,
2592 IXGBE_VLVFB((vlvf_index*2)+1),
2593 bits);
2594 bits |= IXGBE_READ_REG(hw,
2595 IXGBE_VLVFB(vlvf_index*2));
2600 * If there are still bits set in the VLVFB registers
2601 * for the VLAN ID indicated we need to see if the
2602 * caller is requesting that we clear the VFTA entry bit.
2603 * If the caller has requested that we clear the VFTA
2604 * entry bit but there are still pools/VFs using this VLAN
2605 * ID entry then ignore the request. We're not worried
2606 * about the case where we're turning the VFTA VLAN ID
2607 * entry bit on, only when requested to turn it off as
2608 * there may be multiple pools and/or VFs using the
2609 * VLAN ID entry. In that case we cannot clear the
2610 * VFTA bit until all pools/VFs using that VLAN ID have also
2611 * been cleared. This will be indicated by "bits" being
2612 * zero.
2614 if (bits) {
2615 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index),
2616 (IXGBE_VLVF_VIEN | vlan));
2617 if (!vlan_on) {
2618 /* someone wants to clear the vfta entry
2619 * but some pools/VFs are still using it.
2620 * Ignore it. */
2621 vfta_changed = false;
2624 else
2625 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
2628 if (vfta_changed)
2629 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), vfta);
2631 return 0;
2635 * ixgbe_clear_vfta_generic - Clear VLAN filter table
2636 * @hw: pointer to hardware structure
2638 * Clears the VLAN filer table, and the VMDq index associated with the filter
2640 s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
2642 u32 offset;
2644 for (offset = 0; offset < hw->mac.vft_size; offset++)
2645 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
2647 for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
2648 IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
2649 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset*2), 0);
2650 IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset*2)+1), 0);
2653 return 0;
2657 * ixgbe_check_mac_link_generic - Determine link and speed status
2658 * @hw: pointer to hardware structure
2659 * @speed: pointer to link speed
2660 * @link_up: true when link is up
2661 * @link_up_wait_to_complete: bool used to wait for link up or not
2663 * Reads the links register to determine if link is up and the current speed
2665 s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
2666 bool *link_up, bool link_up_wait_to_complete)
2668 u32 links_reg;
2669 u32 i;
2671 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
2672 if (link_up_wait_to_complete) {
2673 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
2674 if (links_reg & IXGBE_LINKS_UP) {
2675 *link_up = true;
2676 break;
2677 } else {
2678 *link_up = false;
2680 msleep(100);
2681 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
2683 } else {
2684 if (links_reg & IXGBE_LINKS_UP)
2685 *link_up = true;
2686 else
2687 *link_up = false;
2690 if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
2691 IXGBE_LINKS_SPEED_10G_82599)
2692 *speed = IXGBE_LINK_SPEED_10GB_FULL;
2693 else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
2694 IXGBE_LINKS_SPEED_1G_82599)
2695 *speed = IXGBE_LINK_SPEED_1GB_FULL;
2696 else
2697 *speed = IXGBE_LINK_SPEED_100_FULL;
2699 /* if link is down, zero out the current_mode */
2700 if (*link_up == false) {
2701 hw->fc.current_mode = ixgbe_fc_none;
2702 hw->fc.fc_was_autonegged = false;
2705 return 0;