igb: move alloc_failed and csum_err stats into per rx-ring stat
[linux-2.6/libata-dev.git] / drivers / net / igb / igb_main.c
blobbdd7bf099363f0dc9e4bfdddfaebde6a8c6e52b0
1 /*******************************************************************************
3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2009 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *******************************************************************************/
28 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/init.h>
31 #include <linux/vmalloc.h>
32 #include <linux/pagemap.h>
33 #include <linux/netdevice.h>
34 #include <linux/ipv6.h>
35 #include <net/checksum.h>
36 #include <net/ip6_checksum.h>
37 #include <linux/net_tstamp.h>
38 #include <linux/mii.h>
39 #include <linux/ethtool.h>
40 #include <linux/if_vlan.h>
41 #include <linux/pci.h>
42 #include <linux/pci-aspm.h>
43 #include <linux/delay.h>
44 #include <linux/interrupt.h>
45 #include <linux/if_ether.h>
46 #include <linux/aer.h>
47 #ifdef CONFIG_IGB_DCA
48 #include <linux/dca.h>
49 #endif
50 #include "igb.h"
52 #define DRV_VERSION "1.3.16-k2"
53 char igb_driver_name[] = "igb";
54 char igb_driver_version[] = DRV_VERSION;
55 static const char igb_driver_string[] =
56 "Intel(R) Gigabit Ethernet Network Driver";
57 static const char igb_copyright[] = "Copyright (c) 2007-2009 Intel Corporation.";
59 static const struct e1000_info *igb_info_tbl[] = {
60 [board_82575] = &e1000_82575_info,
63 static struct pci_device_id igb_pci_tbl[] = {
64 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
65 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
66 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
67 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
68 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
69 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
70 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
71 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
72 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
73 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
74 /* required last entry */
75 {0, }
78 MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
80 void igb_reset(struct igb_adapter *);
81 static int igb_setup_all_tx_resources(struct igb_adapter *);
82 static int igb_setup_all_rx_resources(struct igb_adapter *);
83 static void igb_free_all_tx_resources(struct igb_adapter *);
84 static void igb_free_all_rx_resources(struct igb_adapter *);
85 void igb_update_stats(struct igb_adapter *);
86 static int igb_probe(struct pci_dev *, const struct pci_device_id *);
87 static void __devexit igb_remove(struct pci_dev *pdev);
88 static int igb_sw_init(struct igb_adapter *);
89 static int igb_open(struct net_device *);
90 static int igb_close(struct net_device *);
91 static void igb_configure_tx(struct igb_adapter *);
92 static void igb_configure_rx(struct igb_adapter *);
93 static void igb_setup_tctl(struct igb_adapter *);
94 static void igb_setup_rctl(struct igb_adapter *);
95 static void igb_clean_all_tx_rings(struct igb_adapter *);
96 static void igb_clean_all_rx_rings(struct igb_adapter *);
97 static void igb_clean_tx_ring(struct igb_ring *);
98 static void igb_clean_rx_ring(struct igb_ring *);
99 static void igb_set_rx_mode(struct net_device *);
100 static void igb_update_phy_info(unsigned long);
101 static void igb_watchdog(unsigned long);
102 static void igb_watchdog_task(struct work_struct *);
103 static netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *,
104 struct net_device *,
105 struct igb_ring *);
106 static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb,
107 struct net_device *);
108 static struct net_device_stats *igb_get_stats(struct net_device *);
109 static int igb_change_mtu(struct net_device *, int);
110 static int igb_set_mac(struct net_device *, void *);
111 static void igb_set_uta(struct igb_adapter *adapter);
112 static irqreturn_t igb_intr(int irq, void *);
113 static irqreturn_t igb_intr_msi(int irq, void *);
114 static irqreturn_t igb_msix_other(int irq, void *);
115 static irqreturn_t igb_msix_ring(int irq, void *);
116 #ifdef CONFIG_IGB_DCA
117 static void igb_update_dca(struct igb_q_vector *);
118 static void igb_setup_dca(struct igb_adapter *);
119 #endif /* CONFIG_IGB_DCA */
120 static bool igb_clean_tx_irq(struct igb_q_vector *);
121 static int igb_poll(struct napi_struct *, int);
122 static bool igb_clean_rx_irq_adv(struct igb_q_vector *, int *, int);
123 static void igb_alloc_rx_buffers_adv(struct igb_ring *, int);
124 static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
125 static void igb_tx_timeout(struct net_device *);
126 static void igb_reset_task(struct work_struct *);
127 static void igb_vlan_rx_register(struct net_device *, struct vlan_group *);
128 static void igb_vlan_rx_add_vid(struct net_device *, u16);
129 static void igb_vlan_rx_kill_vid(struct net_device *, u16);
130 static void igb_restore_vlan(struct igb_adapter *);
131 static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
132 static void igb_ping_all_vfs(struct igb_adapter *);
133 static void igb_msg_task(struct igb_adapter *);
134 static int igb_rcv_msg_from_vf(struct igb_adapter *, u32);
135 static void igb_vmm_control(struct igb_adapter *);
136 static int igb_set_vf_mac(struct igb_adapter *adapter, int, unsigned char *);
137 static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
139 static inline void igb_set_vmolr(struct e1000_hw *hw, int vfn)
141 u32 reg_data;
143 reg_data = rd32(E1000_VMOLR(vfn));
144 reg_data |= E1000_VMOLR_BAM | /* Accept broadcast */
145 E1000_VMOLR_ROMPE | /* Accept packets matched in MTA */
146 E1000_VMOLR_AUPE | /* Accept untagged packets */
147 E1000_VMOLR_STRVLAN; /* Strip vlan tags */
148 wr32(E1000_VMOLR(vfn), reg_data);
151 static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
152 int vfn)
154 struct e1000_hw *hw = &adapter->hw;
155 u32 vmolr;
157 /* if it isn't the PF check to see if VFs are enabled and
158 * increase the size to support vlan tags */
159 if (vfn < adapter->vfs_allocated_count &&
160 adapter->vf_data[vfn].vlans_enabled)
161 size += VLAN_TAG_SIZE;
163 vmolr = rd32(E1000_VMOLR(vfn));
164 vmolr &= ~E1000_VMOLR_RLPML_MASK;
165 vmolr |= size | E1000_VMOLR_LPE;
166 wr32(E1000_VMOLR(vfn), vmolr);
168 return 0;
171 #ifdef CONFIG_PM
172 static int igb_suspend(struct pci_dev *, pm_message_t);
173 static int igb_resume(struct pci_dev *);
174 #endif
175 static void igb_shutdown(struct pci_dev *);
176 #ifdef CONFIG_IGB_DCA
177 static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
178 static struct notifier_block dca_notifier = {
179 .notifier_call = igb_notify_dca,
180 .next = NULL,
181 .priority = 0
183 #endif
184 #ifdef CONFIG_NET_POLL_CONTROLLER
185 /* for netdump / net console */
186 static void igb_netpoll(struct net_device *);
187 #endif
188 #ifdef CONFIG_PCI_IOV
189 static unsigned int max_vfs = 0;
190 module_param(max_vfs, uint, 0);
191 MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate "
192 "per physical function");
193 #endif /* CONFIG_PCI_IOV */
195 static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
196 pci_channel_state_t);
197 static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
198 static void igb_io_resume(struct pci_dev *);
200 static struct pci_error_handlers igb_err_handler = {
201 .error_detected = igb_io_error_detected,
202 .slot_reset = igb_io_slot_reset,
203 .resume = igb_io_resume,
207 static struct pci_driver igb_driver = {
208 .name = igb_driver_name,
209 .id_table = igb_pci_tbl,
210 .probe = igb_probe,
211 .remove = __devexit_p(igb_remove),
212 #ifdef CONFIG_PM
213 /* Power Managment Hooks */
214 .suspend = igb_suspend,
215 .resume = igb_resume,
216 #endif
217 .shutdown = igb_shutdown,
218 .err_handler = &igb_err_handler
221 static int global_quad_port_a; /* global quad port a indication */
223 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
224 MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
225 MODULE_LICENSE("GPL");
226 MODULE_VERSION(DRV_VERSION);
229 * Scale the NIC clock cycle by a large factor so that
230 * relatively small clock corrections can be added or
231 * substracted at each clock tick. The drawbacks of a
232 * large factor are a) that the clock register overflows
233 * more quickly (not such a big deal) and b) that the
234 * increment per tick has to fit into 24 bits.
236 * Note that
237 * TIMINCA = IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS *
238 * IGB_TSYNC_SCALE
239 * TIMINCA += TIMINCA * adjustment [ppm] / 1e9
241 * The base scale factor is intentionally a power of two
242 * so that the division in %struct timecounter can be done with
243 * a shift.
245 #define IGB_TSYNC_SHIFT (19)
246 #define IGB_TSYNC_SCALE (1<<IGB_TSYNC_SHIFT)
249 * The duration of one clock cycle of the NIC.
251 * @todo This hard-coded value is part of the specification and might change
252 * in future hardware revisions. Add revision check.
254 #define IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS 16
256 #if (IGB_TSYNC_SCALE * IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS) >= (1<<24)
257 # error IGB_TSYNC_SCALE and/or IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS are too large to fit into TIMINCA
258 #endif
261 * igb_read_clock - read raw cycle counter (to be used by time counter)
263 static cycle_t igb_read_clock(const struct cyclecounter *tc)
265 struct igb_adapter *adapter =
266 container_of(tc, struct igb_adapter, cycles);
267 struct e1000_hw *hw = &adapter->hw;
268 u64 stamp;
270 stamp = rd32(E1000_SYSTIML);
271 stamp |= (u64)rd32(E1000_SYSTIMH) << 32ULL;
273 return stamp;
276 #ifdef DEBUG
278 * igb_get_hw_dev_name - return device name string
279 * used by hardware layer to print debugging information
281 char *igb_get_hw_dev_name(struct e1000_hw *hw)
283 struct igb_adapter *adapter = hw->back;
284 return adapter->netdev->name;
288 * igb_get_time_str - format current NIC and system time as string
290 static char *igb_get_time_str(struct igb_adapter *adapter,
291 char buffer[160])
293 cycle_t hw = adapter->cycles.read(&adapter->cycles);
294 struct timespec nic = ns_to_timespec(timecounter_read(&adapter->clock));
295 struct timespec sys;
296 struct timespec delta;
297 getnstimeofday(&sys);
299 delta = timespec_sub(nic, sys);
301 sprintf(buffer,
302 "HW %llu, NIC %ld.%09lus, SYS %ld.%09lus, NIC-SYS %lds + %09luns",
304 (long)nic.tv_sec, nic.tv_nsec,
305 (long)sys.tv_sec, sys.tv_nsec,
306 (long)delta.tv_sec, delta.tv_nsec);
308 return buffer;
310 #endif
313 * igb_desc_unused - calculate if we have unused descriptors
315 static int igb_desc_unused(struct igb_ring *ring)
317 if (ring->next_to_clean > ring->next_to_use)
318 return ring->next_to_clean - ring->next_to_use - 1;
320 return ring->count + ring->next_to_clean - ring->next_to_use - 1;
324 * igb_init_module - Driver Registration Routine
326 * igb_init_module is the first routine called when the driver is
327 * loaded. All it does is register with the PCI subsystem.
329 static int __init igb_init_module(void)
331 int ret;
332 printk(KERN_INFO "%s - version %s\n",
333 igb_driver_string, igb_driver_version);
335 printk(KERN_INFO "%s\n", igb_copyright);
337 global_quad_port_a = 0;
339 #ifdef CONFIG_IGB_DCA
340 dca_register_notify(&dca_notifier);
341 #endif
343 ret = pci_register_driver(&igb_driver);
344 return ret;
347 module_init(igb_init_module);
350 * igb_exit_module - Driver Exit Cleanup Routine
352 * igb_exit_module is called just before the driver is removed
353 * from memory.
355 static void __exit igb_exit_module(void)
357 #ifdef CONFIG_IGB_DCA
358 dca_unregister_notify(&dca_notifier);
359 #endif
360 pci_unregister_driver(&igb_driver);
363 module_exit(igb_exit_module);
365 #define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
367 * igb_cache_ring_register - Descriptor ring to register mapping
368 * @adapter: board private structure to initialize
370 * Once we know the feature-set enabled for the device, we'll cache
371 * the register offset the descriptor ring is assigned to.
373 static void igb_cache_ring_register(struct igb_adapter *adapter)
375 int i;
376 u32 rbase_offset = adapter->vfs_allocated_count;
378 switch (adapter->hw.mac.type) {
379 case e1000_82576:
380 /* The queues are allocated for virtualization such that VF 0
381 * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
382 * In order to avoid collision we start at the first free queue
383 * and continue consuming queues in the same sequence
385 for (i = 0; i < adapter->num_rx_queues; i++)
386 adapter->rx_ring[i].reg_idx = rbase_offset +
387 Q_IDX_82576(i);
388 for (i = 0; i < adapter->num_tx_queues; i++)
389 adapter->tx_ring[i].reg_idx = rbase_offset +
390 Q_IDX_82576(i);
391 break;
392 case e1000_82575:
393 default:
394 for (i = 0; i < adapter->num_rx_queues; i++)
395 adapter->rx_ring[i].reg_idx = i;
396 for (i = 0; i < adapter->num_tx_queues; i++)
397 adapter->tx_ring[i].reg_idx = i;
398 break;
402 static void igb_free_queues(struct igb_adapter *adapter)
404 kfree(adapter->tx_ring);
405 kfree(adapter->rx_ring);
407 adapter->tx_ring = NULL;
408 adapter->rx_ring = NULL;
410 adapter->num_rx_queues = 0;
411 adapter->num_tx_queues = 0;
415 * igb_alloc_queues - Allocate memory for all rings
416 * @adapter: board private structure to initialize
418 * We allocate one ring per queue at run-time since we don't know the
419 * number of queues at compile-time.
421 static int igb_alloc_queues(struct igb_adapter *adapter)
423 int i;
425 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
426 sizeof(struct igb_ring), GFP_KERNEL);
427 if (!adapter->tx_ring)
428 goto err;
430 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
431 sizeof(struct igb_ring), GFP_KERNEL);
432 if (!adapter->rx_ring)
433 goto err;
435 for (i = 0; i < adapter->num_tx_queues; i++) {
436 struct igb_ring *ring = &(adapter->tx_ring[i]);
437 ring->count = adapter->tx_ring_count;
438 ring->queue_index = i;
439 ring->pdev = adapter->pdev;
441 for (i = 0; i < adapter->num_rx_queues; i++) {
442 struct igb_ring *ring = &(adapter->rx_ring[i]);
443 ring->count = adapter->rx_ring_count;
444 ring->queue_index = i;
445 ring->pdev = adapter->pdev;
446 ring->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
449 igb_cache_ring_register(adapter);
451 return 0;
453 err:
454 igb_free_queues(adapter);
456 return -ENOMEM;
459 #define IGB_N0_QUEUE -1
460 static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
462 u32 msixbm = 0;
463 struct igb_adapter *adapter = q_vector->adapter;
464 struct e1000_hw *hw = &adapter->hw;
465 u32 ivar, index;
466 int rx_queue = IGB_N0_QUEUE;
467 int tx_queue = IGB_N0_QUEUE;
469 if (q_vector->rx_ring)
470 rx_queue = q_vector->rx_ring->reg_idx;
471 if (q_vector->tx_ring)
472 tx_queue = q_vector->tx_ring->reg_idx;
474 switch (hw->mac.type) {
475 case e1000_82575:
476 /* The 82575 assigns vectors using a bitmask, which matches the
477 bitmask for the EICR/EIMS/EIMC registers. To assign one
478 or more queues to a vector, we write the appropriate bits
479 into the MSIXBM register for that vector. */
480 if (rx_queue > IGB_N0_QUEUE)
481 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
482 if (tx_queue > IGB_N0_QUEUE)
483 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
484 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
485 q_vector->eims_value = msixbm;
486 break;
487 case e1000_82576:
488 /* 82576 uses a table-based method for assigning vectors.
489 Each queue has a single entry in the table to which we write
490 a vector number along with a "valid" bit. Sadly, the layout
491 of the table is somewhat counterintuitive. */
492 if (rx_queue > IGB_N0_QUEUE) {
493 index = (rx_queue & 0x7);
494 ivar = array_rd32(E1000_IVAR0, index);
495 if (rx_queue < 8) {
496 /* vector goes into low byte of register */
497 ivar = ivar & 0xFFFFFF00;
498 ivar |= msix_vector | E1000_IVAR_VALID;
499 } else {
500 /* vector goes into third byte of register */
501 ivar = ivar & 0xFF00FFFF;
502 ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
504 array_wr32(E1000_IVAR0, index, ivar);
506 if (tx_queue > IGB_N0_QUEUE) {
507 index = (tx_queue & 0x7);
508 ivar = array_rd32(E1000_IVAR0, index);
509 if (tx_queue < 8) {
510 /* vector goes into second byte of register */
511 ivar = ivar & 0xFFFF00FF;
512 ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
513 } else {
514 /* vector goes into high byte of register */
515 ivar = ivar & 0x00FFFFFF;
516 ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
518 array_wr32(E1000_IVAR0, index, ivar);
520 q_vector->eims_value = 1 << msix_vector;
521 break;
522 default:
523 BUG();
524 break;
529 * igb_configure_msix - Configure MSI-X hardware
531 * igb_configure_msix sets up the hardware to properly
532 * generate MSI-X interrupts.
534 static void igb_configure_msix(struct igb_adapter *adapter)
536 u32 tmp;
537 int i, vector = 0;
538 struct e1000_hw *hw = &adapter->hw;
540 adapter->eims_enable_mask = 0;
542 /* set vector for other causes, i.e. link changes */
543 switch (hw->mac.type) {
544 case e1000_82575:
545 tmp = rd32(E1000_CTRL_EXT);
546 /* enable MSI-X PBA support*/
547 tmp |= E1000_CTRL_EXT_PBA_CLR;
549 /* Auto-Mask interrupts upon ICR read. */
550 tmp |= E1000_CTRL_EXT_EIAME;
551 tmp |= E1000_CTRL_EXT_IRCA;
553 wr32(E1000_CTRL_EXT, tmp);
555 /* enable msix_other interrupt */
556 array_wr32(E1000_MSIXBM(0), vector++,
557 E1000_EIMS_OTHER);
558 adapter->eims_other = E1000_EIMS_OTHER;
560 break;
562 case e1000_82576:
563 /* Turn on MSI-X capability first, or our settings
564 * won't stick. And it will take days to debug. */
565 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
566 E1000_GPIE_PBA | E1000_GPIE_EIAME |
567 E1000_GPIE_NSICR);
569 /* enable msix_other interrupt */
570 adapter->eims_other = 1 << vector;
571 tmp = (vector++ | E1000_IVAR_VALID) << 8;
573 wr32(E1000_IVAR_MISC, tmp);
574 break;
575 default:
576 /* do nothing, since nothing else supports MSI-X */
577 break;
578 } /* switch (hw->mac.type) */
580 adapter->eims_enable_mask |= adapter->eims_other;
582 for (i = 0; i < adapter->num_q_vectors; i++) {
583 struct igb_q_vector *q_vector = adapter->q_vector[i];
584 igb_assign_vector(q_vector, vector++);
585 adapter->eims_enable_mask |= q_vector->eims_value;
588 wrfl();
592 * igb_request_msix - Initialize MSI-X interrupts
594 * igb_request_msix allocates MSI-X vectors and requests interrupts from the
595 * kernel.
597 static int igb_request_msix(struct igb_adapter *adapter)
599 struct net_device *netdev = adapter->netdev;
600 struct e1000_hw *hw = &adapter->hw;
601 int i, err = 0, vector = 0;
603 err = request_irq(adapter->msix_entries[vector].vector,
604 &igb_msix_other, 0, netdev->name, adapter);
605 if (err)
606 goto out;
607 vector++;
609 for (i = 0; i < adapter->num_q_vectors; i++) {
610 struct igb_q_vector *q_vector = adapter->q_vector[i];
612 q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
614 if (q_vector->rx_ring && q_vector->tx_ring)
615 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
616 q_vector->rx_ring->queue_index);
617 else if (q_vector->tx_ring)
618 sprintf(q_vector->name, "%s-tx-%u", netdev->name,
619 q_vector->tx_ring->queue_index);
620 else if (q_vector->rx_ring)
621 sprintf(q_vector->name, "%s-rx-%u", netdev->name,
622 q_vector->rx_ring->queue_index);
623 else
624 sprintf(q_vector->name, "%s-unused", netdev->name);
626 err = request_irq(adapter->msix_entries[vector].vector,
627 &igb_msix_ring, 0, q_vector->name,
628 q_vector);
629 if (err)
630 goto out;
631 vector++;
634 igb_configure_msix(adapter);
635 return 0;
636 out:
637 return err;
640 static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
642 if (adapter->msix_entries) {
643 pci_disable_msix(adapter->pdev);
644 kfree(adapter->msix_entries);
645 adapter->msix_entries = NULL;
646 } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
647 pci_disable_msi(adapter->pdev);
652 * igb_free_q_vectors - Free memory allocated for interrupt vectors
653 * @adapter: board private structure to initialize
655 * This function frees the memory allocated to the q_vectors. In addition if
656 * NAPI is enabled it will delete any references to the NAPI struct prior
657 * to freeing the q_vector.
659 static void igb_free_q_vectors(struct igb_adapter *adapter)
661 int v_idx;
663 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
664 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
665 adapter->q_vector[v_idx] = NULL;
666 netif_napi_del(&q_vector->napi);
667 kfree(q_vector);
669 adapter->num_q_vectors = 0;
673 * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
675 * This function resets the device so that it has 0 rx queues, tx queues, and
676 * MSI-X interrupts allocated.
678 static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
680 igb_free_queues(adapter);
681 igb_free_q_vectors(adapter);
682 igb_reset_interrupt_capability(adapter);
686 * igb_set_interrupt_capability - set MSI or MSI-X if supported
688 * Attempt to configure interrupts using the best available
689 * capabilities of the hardware and kernel.
691 static void igb_set_interrupt_capability(struct igb_adapter *adapter)
693 int err;
694 int numvecs, i;
696 /* Number of supported queues. */
697 adapter->num_rx_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
698 adapter->num_tx_queues = min_t(u32, IGB_MAX_TX_QUEUES, num_online_cpus());
700 /* start with one vector for every rx queue */
701 numvecs = adapter->num_rx_queues;
703 /* if tx handler is seperate add 1 for every tx queue */
704 numvecs += adapter->num_tx_queues;
706 /* store the number of vectors reserved for queues */
707 adapter->num_q_vectors = numvecs;
709 /* add 1 vector for link status interrupts */
710 numvecs++;
711 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
712 GFP_KERNEL);
713 if (!adapter->msix_entries)
714 goto msi_only;
716 for (i = 0; i < numvecs; i++)
717 adapter->msix_entries[i].entry = i;
719 err = pci_enable_msix(adapter->pdev,
720 adapter->msix_entries,
721 numvecs);
722 if (err == 0)
723 goto out;
725 igb_reset_interrupt_capability(adapter);
727 /* If we can't do MSI-X, try MSI */
728 msi_only:
729 #ifdef CONFIG_PCI_IOV
730 /* disable SR-IOV for non MSI-X configurations */
731 if (adapter->vf_data) {
732 struct e1000_hw *hw = &adapter->hw;
733 /* disable iov and allow time for transactions to clear */
734 pci_disable_sriov(adapter->pdev);
735 msleep(500);
737 kfree(adapter->vf_data);
738 adapter->vf_data = NULL;
739 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
740 msleep(100);
741 dev_info(&adapter->pdev->dev, "IOV Disabled\n");
743 #endif
744 adapter->num_rx_queues = 1;
745 adapter->num_tx_queues = 1;
746 adapter->num_q_vectors = 1;
747 if (!pci_enable_msi(adapter->pdev))
748 adapter->flags |= IGB_FLAG_HAS_MSI;
749 out:
750 /* Notify the stack of the (possibly) reduced Tx Queue count. */
751 adapter->netdev->real_num_tx_queues = adapter->num_tx_queues;
752 return;
756 * igb_alloc_q_vectors - Allocate memory for interrupt vectors
757 * @adapter: board private structure to initialize
759 * We allocate one q_vector per queue interrupt. If allocation fails we
760 * return -ENOMEM.
762 static int igb_alloc_q_vectors(struct igb_adapter *adapter)
764 struct igb_q_vector *q_vector;
765 struct e1000_hw *hw = &adapter->hw;
766 int v_idx;
768 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
769 q_vector = kzalloc(sizeof(struct igb_q_vector), GFP_KERNEL);
770 if (!q_vector)
771 goto err_out;
772 q_vector->adapter = adapter;
773 q_vector->itr_shift = (hw->mac.type == e1000_82575) ? 16 : 0;
774 q_vector->itr_register = hw->hw_addr + E1000_EITR(0);
775 q_vector->itr_val = IGB_START_ITR;
776 q_vector->set_itr = 1;
777 netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64);
778 adapter->q_vector[v_idx] = q_vector;
780 return 0;
782 err_out:
783 while (v_idx) {
784 v_idx--;
785 q_vector = adapter->q_vector[v_idx];
786 netif_napi_del(&q_vector->napi);
787 kfree(q_vector);
788 adapter->q_vector[v_idx] = NULL;
790 return -ENOMEM;
793 static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter,
794 int ring_idx, int v_idx)
796 struct igb_q_vector *q_vector;
798 q_vector = adapter->q_vector[v_idx];
799 q_vector->rx_ring = &adapter->rx_ring[ring_idx];
800 q_vector->rx_ring->q_vector = q_vector;
801 q_vector->itr_val = adapter->itr;
804 static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter,
805 int ring_idx, int v_idx)
807 struct igb_q_vector *q_vector;
809 q_vector = adapter->q_vector[v_idx];
810 q_vector->tx_ring = &adapter->tx_ring[ring_idx];
811 q_vector->tx_ring->q_vector = q_vector;
812 q_vector->itr_val = adapter->itr;
816 * igb_map_ring_to_vector - maps allocated queues to vectors
818 * This function maps the recently allocated queues to vectors.
820 static int igb_map_ring_to_vector(struct igb_adapter *adapter)
822 int i;
823 int v_idx = 0;
825 if ((adapter->num_q_vectors < adapter->num_rx_queues) ||
826 (adapter->num_q_vectors < adapter->num_tx_queues))
827 return -ENOMEM;
829 if (adapter->num_q_vectors >=
830 (adapter->num_rx_queues + adapter->num_tx_queues)) {
831 for (i = 0; i < adapter->num_rx_queues; i++)
832 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
833 for (i = 0; i < adapter->num_tx_queues; i++)
834 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
835 } else {
836 for (i = 0; i < adapter->num_rx_queues; i++) {
837 if (i < adapter->num_tx_queues)
838 igb_map_tx_ring_to_vector(adapter, i, v_idx);
839 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
841 for (; i < adapter->num_tx_queues; i++)
842 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
844 return 0;
848 * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
850 * This function initializes the interrupts and allocates all of the queues.
852 static int igb_init_interrupt_scheme(struct igb_adapter *adapter)
854 struct pci_dev *pdev = adapter->pdev;
855 int err;
857 igb_set_interrupt_capability(adapter);
859 err = igb_alloc_q_vectors(adapter);
860 if (err) {
861 dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
862 goto err_alloc_q_vectors;
865 err = igb_alloc_queues(adapter);
866 if (err) {
867 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
868 goto err_alloc_queues;
871 err = igb_map_ring_to_vector(adapter);
872 if (err) {
873 dev_err(&pdev->dev, "Invalid q_vector to ring mapping\n");
874 goto err_map_queues;
878 return 0;
879 err_map_queues:
880 igb_free_queues(adapter);
881 err_alloc_queues:
882 igb_free_q_vectors(adapter);
883 err_alloc_q_vectors:
884 igb_reset_interrupt_capability(adapter);
885 return err;
889 * igb_request_irq - initialize interrupts
891 * Attempts to configure interrupts using the best available
892 * capabilities of the hardware and kernel.
894 static int igb_request_irq(struct igb_adapter *adapter)
896 struct net_device *netdev = adapter->netdev;
897 struct pci_dev *pdev = adapter->pdev;
898 struct e1000_hw *hw = &adapter->hw;
899 int err = 0;
901 if (adapter->msix_entries) {
902 err = igb_request_msix(adapter);
903 if (!err)
904 goto request_done;
905 /* fall back to MSI */
906 igb_clear_interrupt_scheme(adapter);
907 if (!pci_enable_msi(adapter->pdev))
908 adapter->flags |= IGB_FLAG_HAS_MSI;
909 igb_free_all_tx_resources(adapter);
910 igb_free_all_rx_resources(adapter);
911 adapter->num_tx_queues = 1;
912 adapter->num_rx_queues = 1;
913 adapter->num_q_vectors = 1;
914 err = igb_alloc_q_vectors(adapter);
915 if (err) {
916 dev_err(&pdev->dev,
917 "Unable to allocate memory for vectors\n");
918 goto request_done;
920 err = igb_alloc_queues(adapter);
921 if (err) {
922 dev_err(&pdev->dev,
923 "Unable to allocate memory for queues\n");
924 igb_free_q_vectors(adapter);
925 goto request_done;
927 igb_setup_all_tx_resources(adapter);
928 igb_setup_all_rx_resources(adapter);
929 } else {
930 switch (hw->mac.type) {
931 case e1000_82575:
932 wr32(E1000_MSIXBM(0),
933 (E1000_EICR_RX_QUEUE0 |
934 E1000_EICR_TX_QUEUE0 |
935 E1000_EIMS_OTHER));
936 break;
937 case e1000_82576:
938 wr32(E1000_IVAR0, E1000_IVAR_VALID);
939 break;
940 default:
941 break;
945 if (adapter->flags & IGB_FLAG_HAS_MSI) {
946 err = request_irq(adapter->pdev->irq, &igb_intr_msi, 0,
947 netdev->name, adapter);
948 if (!err)
949 goto request_done;
951 /* fall back to legacy interrupts */
952 igb_reset_interrupt_capability(adapter);
953 adapter->flags &= ~IGB_FLAG_HAS_MSI;
956 err = request_irq(adapter->pdev->irq, &igb_intr, IRQF_SHARED,
957 netdev->name, adapter);
959 if (err)
960 dev_err(&adapter->pdev->dev, "Error %d getting interrupt\n",
961 err);
963 request_done:
964 return err;
967 static void igb_free_irq(struct igb_adapter *adapter)
969 if (adapter->msix_entries) {
970 int vector = 0, i;
972 free_irq(adapter->msix_entries[vector++].vector, adapter);
974 for (i = 0; i < adapter->num_q_vectors; i++) {
975 struct igb_q_vector *q_vector = adapter->q_vector[i];
976 free_irq(adapter->msix_entries[vector++].vector,
977 q_vector);
979 } else {
980 free_irq(adapter->pdev->irq, adapter);
985 * igb_irq_disable - Mask off interrupt generation on the NIC
986 * @adapter: board private structure
988 static void igb_irq_disable(struct igb_adapter *adapter)
990 struct e1000_hw *hw = &adapter->hw;
992 if (adapter->msix_entries) {
993 u32 regval = rd32(E1000_EIAM);
994 wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
995 wr32(E1000_EIMC, adapter->eims_enable_mask);
996 regval = rd32(E1000_EIAC);
997 wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask);
1000 wr32(E1000_IAM, 0);
1001 wr32(E1000_IMC, ~0);
1002 wrfl();
1003 synchronize_irq(adapter->pdev->irq);
1007 * igb_irq_enable - Enable default interrupt generation settings
1008 * @adapter: board private structure
1010 static void igb_irq_enable(struct igb_adapter *adapter)
1012 struct e1000_hw *hw = &adapter->hw;
1014 if (adapter->msix_entries) {
1015 u32 regval = rd32(E1000_EIAC);
1016 wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
1017 regval = rd32(E1000_EIAM);
1018 wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
1019 wr32(E1000_EIMS, adapter->eims_enable_mask);
1020 if (adapter->vfs_allocated_count)
1021 wr32(E1000_MBVFIMR, 0xFF);
1022 wr32(E1000_IMS, (E1000_IMS_LSC | E1000_IMS_VMMB |
1023 E1000_IMS_DOUTSYNC));
1024 } else {
1025 wr32(E1000_IMS, IMS_ENABLE_MASK);
1026 wr32(E1000_IAM, IMS_ENABLE_MASK);
1030 static void igb_update_mng_vlan(struct igb_adapter *adapter)
1032 struct net_device *netdev = adapter->netdev;
1033 u16 vid = adapter->hw.mng_cookie.vlan_id;
1034 u16 old_vid = adapter->mng_vlan_id;
1035 if (adapter->vlgrp) {
1036 if (!vlan_group_get_device(adapter->vlgrp, vid)) {
1037 if (adapter->hw.mng_cookie.status &
1038 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
1039 igb_vlan_rx_add_vid(netdev, vid);
1040 adapter->mng_vlan_id = vid;
1041 } else
1042 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
1044 if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
1045 (vid != old_vid) &&
1046 !vlan_group_get_device(adapter->vlgrp, old_vid))
1047 igb_vlan_rx_kill_vid(netdev, old_vid);
1048 } else
1049 adapter->mng_vlan_id = vid;
1054 * igb_release_hw_control - release control of the h/w to f/w
1055 * @adapter: address of board private structure
1057 * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
1058 * For ASF and Pass Through versions of f/w this means that the
1059 * driver is no longer loaded.
1062 static void igb_release_hw_control(struct igb_adapter *adapter)
1064 struct e1000_hw *hw = &adapter->hw;
1065 u32 ctrl_ext;
1067 /* Let firmware take over control of h/w */
1068 ctrl_ext = rd32(E1000_CTRL_EXT);
1069 wr32(E1000_CTRL_EXT,
1070 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1075 * igb_get_hw_control - get control of the h/w from f/w
1076 * @adapter: address of board private structure
1078 * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
1079 * For ASF and Pass Through versions of f/w this means that
1080 * the driver is loaded.
1083 static void igb_get_hw_control(struct igb_adapter *adapter)
1085 struct e1000_hw *hw = &adapter->hw;
1086 u32 ctrl_ext;
1088 /* Let firmware know the driver has taken over */
1089 ctrl_ext = rd32(E1000_CTRL_EXT);
1090 wr32(E1000_CTRL_EXT,
1091 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1095 * igb_configure - configure the hardware for RX and TX
1096 * @adapter: private board structure
1098 static void igb_configure(struct igb_adapter *adapter)
1100 struct net_device *netdev = adapter->netdev;
1101 int i;
1103 igb_get_hw_control(adapter);
1104 igb_set_rx_mode(netdev);
1106 igb_restore_vlan(adapter);
1108 igb_setup_tctl(adapter);
1109 igb_setup_rctl(adapter);
1111 igb_configure_tx(adapter);
1112 igb_configure_rx(adapter);
1114 igb_rx_fifo_flush_82575(&adapter->hw);
1116 /* call igb_desc_unused which always leaves
1117 * at least 1 descriptor unused to make sure
1118 * next_to_use != next_to_clean */
1119 for (i = 0; i < adapter->num_rx_queues; i++) {
1120 struct igb_ring *ring = &adapter->rx_ring[i];
1121 igb_alloc_rx_buffers_adv(ring, igb_desc_unused(ring));
1125 adapter->tx_queue_len = netdev->tx_queue_len;
1130 * igb_up - Open the interface and prepare it to handle traffic
1131 * @adapter: board private structure
1134 int igb_up(struct igb_adapter *adapter)
1136 struct e1000_hw *hw = &adapter->hw;
1137 int i;
1139 /* hardware has been reset, we need to reload some things */
1140 igb_configure(adapter);
1142 clear_bit(__IGB_DOWN, &adapter->state);
1144 for (i = 0; i < adapter->num_q_vectors; i++) {
1145 struct igb_q_vector *q_vector = adapter->q_vector[i];
1146 napi_enable(&q_vector->napi);
1148 if (adapter->msix_entries)
1149 igb_configure_msix(adapter);
1151 igb_vmm_control(adapter);
1152 igb_set_vmolr(hw, adapter->vfs_allocated_count);
1154 /* Clear any pending interrupts. */
1155 rd32(E1000_ICR);
1156 igb_irq_enable(adapter);
1158 netif_tx_start_all_queues(adapter->netdev);
1160 /* Fire a link change interrupt to start the watchdog. */
1161 wr32(E1000_ICS, E1000_ICS_LSC);
1162 return 0;
1165 void igb_down(struct igb_adapter *adapter)
1167 struct e1000_hw *hw = &adapter->hw;
1168 struct net_device *netdev = adapter->netdev;
1169 u32 tctl, rctl;
1170 int i;
1172 /* signal that we're down so the interrupt handler does not
1173 * reschedule our watchdog timer */
1174 set_bit(__IGB_DOWN, &adapter->state);
1176 /* disable receives in the hardware */
1177 rctl = rd32(E1000_RCTL);
1178 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
1179 /* flush and sleep below */
1181 netif_tx_stop_all_queues(netdev);
1183 /* disable transmits in the hardware */
1184 tctl = rd32(E1000_TCTL);
1185 tctl &= ~E1000_TCTL_EN;
1186 wr32(E1000_TCTL, tctl);
1187 /* flush both disables and wait for them to finish */
1188 wrfl();
1189 msleep(10);
1191 for (i = 0; i < adapter->num_q_vectors; i++) {
1192 struct igb_q_vector *q_vector = adapter->q_vector[i];
1193 napi_disable(&q_vector->napi);
1196 igb_irq_disable(adapter);
1198 del_timer_sync(&adapter->watchdog_timer);
1199 del_timer_sync(&adapter->phy_info_timer);
1201 netdev->tx_queue_len = adapter->tx_queue_len;
1202 netif_carrier_off(netdev);
1204 /* record the stats before reset*/
1205 igb_update_stats(adapter);
1207 adapter->link_speed = 0;
1208 adapter->link_duplex = 0;
1210 if (!pci_channel_offline(adapter->pdev))
1211 igb_reset(adapter);
1212 igb_clean_all_tx_rings(adapter);
1213 igb_clean_all_rx_rings(adapter);
1214 #ifdef CONFIG_IGB_DCA
1216 /* since we reset the hardware DCA settings were cleared */
1217 igb_setup_dca(adapter);
1218 #endif
1221 void igb_reinit_locked(struct igb_adapter *adapter)
1223 WARN_ON(in_interrupt());
1224 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
1225 msleep(1);
1226 igb_down(adapter);
1227 igb_up(adapter);
1228 clear_bit(__IGB_RESETTING, &adapter->state);
1231 void igb_reset(struct igb_adapter *adapter)
1233 struct e1000_hw *hw = &adapter->hw;
1234 struct e1000_mac_info *mac = &hw->mac;
1235 struct e1000_fc_info *fc = &hw->fc;
1236 u32 pba = 0, tx_space, min_tx_space, min_rx_space;
1237 u16 hwm;
1239 /* Repartition Pba for greater than 9k mtu
1240 * To take effect CTRL.RST is required.
1242 switch (mac->type) {
1243 case e1000_82576:
1244 pba = E1000_PBA_64K;
1245 break;
1246 case e1000_82575:
1247 default:
1248 pba = E1000_PBA_34K;
1249 break;
1252 if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) &&
1253 (mac->type < e1000_82576)) {
1254 /* adjust PBA for jumbo frames */
1255 wr32(E1000_PBA, pba);
1257 /* To maintain wire speed transmits, the Tx FIFO should be
1258 * large enough to accommodate two full transmit packets,
1259 * rounded up to the next 1KB and expressed in KB. Likewise,
1260 * the Rx FIFO should be large enough to accommodate at least
1261 * one full receive packet and is similarly rounded up and
1262 * expressed in KB. */
1263 pba = rd32(E1000_PBA);
1264 /* upper 16 bits has Tx packet buffer allocation size in KB */
1265 tx_space = pba >> 16;
1266 /* lower 16 bits has Rx packet buffer allocation size in KB */
1267 pba &= 0xffff;
1268 /* the tx fifo also stores 16 bytes of information about the tx
1269 * but don't include ethernet FCS because hardware appends it */
1270 min_tx_space = (adapter->max_frame_size +
1271 sizeof(union e1000_adv_tx_desc) -
1272 ETH_FCS_LEN) * 2;
1273 min_tx_space = ALIGN(min_tx_space, 1024);
1274 min_tx_space >>= 10;
1275 /* software strips receive CRC, so leave room for it */
1276 min_rx_space = adapter->max_frame_size;
1277 min_rx_space = ALIGN(min_rx_space, 1024);
1278 min_rx_space >>= 10;
1280 /* If current Tx allocation is less than the min Tx FIFO size,
1281 * and the min Tx FIFO size is less than the current Rx FIFO
1282 * allocation, take space away from current Rx allocation */
1283 if (tx_space < min_tx_space &&
1284 ((min_tx_space - tx_space) < pba)) {
1285 pba = pba - (min_tx_space - tx_space);
1287 /* if short on rx space, rx wins and must trump tx
1288 * adjustment */
1289 if (pba < min_rx_space)
1290 pba = min_rx_space;
1292 wr32(E1000_PBA, pba);
1295 /* flow control settings */
1296 /* The high water mark must be low enough to fit one full frame
1297 * (or the size used for early receive) above it in the Rx FIFO.
1298 * Set it to the lower of:
1299 * - 90% of the Rx FIFO size, or
1300 * - the full Rx FIFO size minus one full frame */
1301 hwm = min(((pba << 10) * 9 / 10),
1302 ((pba << 10) - 2 * adapter->max_frame_size));
1304 if (mac->type < e1000_82576) {
1305 fc->high_water = hwm & 0xFFF8; /* 8-byte granularity */
1306 fc->low_water = fc->high_water - 8;
1307 } else {
1308 fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */
1309 fc->low_water = fc->high_water - 16;
1311 fc->pause_time = 0xFFFF;
1312 fc->send_xon = 1;
1313 fc->current_mode = fc->requested_mode;
1315 /* disable receive for all VFs and wait one second */
1316 if (adapter->vfs_allocated_count) {
1317 int i;
1318 for (i = 0 ; i < adapter->vfs_allocated_count; i++)
1319 adapter->vf_data[i].clear_to_send = false;
1321 /* ping all the active vfs to let them know we are going down */
1322 igb_ping_all_vfs(adapter);
1324 /* disable transmits and receives */
1325 wr32(E1000_VFRE, 0);
1326 wr32(E1000_VFTE, 0);
1329 /* Allow time for pending master requests to run */
1330 adapter->hw.mac.ops.reset_hw(&adapter->hw);
1331 wr32(E1000_WUC, 0);
1333 if (adapter->hw.mac.ops.init_hw(&adapter->hw))
1334 dev_err(&adapter->pdev->dev, "Hardware Error\n");
1336 igb_update_mng_vlan(adapter);
1338 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
1339 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
1341 igb_reset_adaptive(&adapter->hw);
1342 igb_get_phy_info(&adapter->hw);
1345 static const struct net_device_ops igb_netdev_ops = {
1346 .ndo_open = igb_open,
1347 .ndo_stop = igb_close,
1348 .ndo_start_xmit = igb_xmit_frame_adv,
1349 .ndo_get_stats = igb_get_stats,
1350 .ndo_set_rx_mode = igb_set_rx_mode,
1351 .ndo_set_multicast_list = igb_set_rx_mode,
1352 .ndo_set_mac_address = igb_set_mac,
1353 .ndo_change_mtu = igb_change_mtu,
1354 .ndo_do_ioctl = igb_ioctl,
1355 .ndo_tx_timeout = igb_tx_timeout,
1356 .ndo_validate_addr = eth_validate_addr,
1357 .ndo_vlan_rx_register = igb_vlan_rx_register,
1358 .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
1359 .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
1360 #ifdef CONFIG_NET_POLL_CONTROLLER
1361 .ndo_poll_controller = igb_netpoll,
1362 #endif
1366 * igb_probe - Device Initialization Routine
1367 * @pdev: PCI device information struct
1368 * @ent: entry in igb_pci_tbl
1370 * Returns 0 on success, negative on failure
1372 * igb_probe initializes an adapter identified by a pci_dev structure.
1373 * The OS initialization, configuring of the adapter private structure,
1374 * and a hardware reset occur.
1376 static int __devinit igb_probe(struct pci_dev *pdev,
1377 const struct pci_device_id *ent)
1379 struct net_device *netdev;
1380 struct igb_adapter *adapter;
1381 struct e1000_hw *hw;
1382 const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
1383 unsigned long mmio_start, mmio_len;
1384 int err, pci_using_dac;
1385 u16 eeprom_data = 0;
1386 u16 eeprom_apme_mask = IGB_EEPROM_APME;
1387 u32 part_num;
1389 err = pci_enable_device_mem(pdev);
1390 if (err)
1391 return err;
1393 pci_using_dac = 0;
1394 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1395 if (!err) {
1396 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1397 if (!err)
1398 pci_using_dac = 1;
1399 } else {
1400 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1401 if (err) {
1402 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1403 if (err) {
1404 dev_err(&pdev->dev, "No usable DMA "
1405 "configuration, aborting\n");
1406 goto err_dma;
1411 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
1412 IORESOURCE_MEM),
1413 igb_driver_name);
1414 if (err)
1415 goto err_pci_reg;
1417 pci_enable_pcie_error_reporting(pdev);
1419 pci_set_master(pdev);
1420 pci_save_state(pdev);
1422 err = -ENOMEM;
1423 netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
1424 IGB_ABS_MAX_TX_QUEUES);
1425 if (!netdev)
1426 goto err_alloc_etherdev;
1428 SET_NETDEV_DEV(netdev, &pdev->dev);
1430 pci_set_drvdata(pdev, netdev);
1431 adapter = netdev_priv(netdev);
1432 adapter->netdev = netdev;
1433 adapter->pdev = pdev;
1434 hw = &adapter->hw;
1435 hw->back = adapter;
1436 adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE;
1438 mmio_start = pci_resource_start(pdev, 0);
1439 mmio_len = pci_resource_len(pdev, 0);
1441 err = -EIO;
1442 hw->hw_addr = ioremap(mmio_start, mmio_len);
1443 if (!hw->hw_addr)
1444 goto err_ioremap;
1446 netdev->netdev_ops = &igb_netdev_ops;
1447 igb_set_ethtool_ops(netdev);
1448 netdev->watchdog_timeo = 5 * HZ;
1450 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1452 netdev->mem_start = mmio_start;
1453 netdev->mem_end = mmio_start + mmio_len;
1455 /* PCI config space info */
1456 hw->vendor_id = pdev->vendor;
1457 hw->device_id = pdev->device;
1458 hw->revision_id = pdev->revision;
1459 hw->subsystem_vendor_id = pdev->subsystem_vendor;
1460 hw->subsystem_device_id = pdev->subsystem_device;
1462 /* setup the private structure */
1463 hw->back = adapter;
1464 /* Copy the default MAC, PHY and NVM function pointers */
1465 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
1466 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
1467 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
1468 /* Initialize skew-specific constants */
1469 err = ei->get_invariants(hw);
1470 if (err)
1471 goto err_sw_init;
1473 #ifdef CONFIG_PCI_IOV
1474 /* since iov functionality isn't critical to base device function we
1475 * can accept failure. If it fails we don't allow iov to be enabled */
1476 if (hw->mac.type == e1000_82576) {
1477 /* 82576 supports a maximum of 7 VFs in addition to the PF */
1478 unsigned int num_vfs = (max_vfs > 7) ? 7 : max_vfs;
1479 int i;
1480 unsigned char mac_addr[ETH_ALEN];
1482 if (num_vfs) {
1483 adapter->vf_data = kcalloc(num_vfs,
1484 sizeof(struct vf_data_storage),
1485 GFP_KERNEL);
1486 if (!adapter->vf_data) {
1487 dev_err(&pdev->dev,
1488 "Could not allocate VF private data - "
1489 "IOV enable failed\n");
1490 } else {
1491 err = pci_enable_sriov(pdev, num_vfs);
1492 if (!err) {
1493 adapter->vfs_allocated_count = num_vfs;
1494 dev_info(&pdev->dev,
1495 "%d vfs allocated\n",
1496 num_vfs);
1497 for (i = 0;
1498 i < adapter->vfs_allocated_count;
1499 i++) {
1500 random_ether_addr(mac_addr);
1501 igb_set_vf_mac(adapter, i,
1502 mac_addr);
1504 } else {
1505 kfree(adapter->vf_data);
1506 adapter->vf_data = NULL;
1512 #endif
1513 /* setup the private structure */
1514 err = igb_sw_init(adapter);
1515 if (err)
1516 goto err_sw_init;
1518 igb_get_bus_info_pcie(hw);
1520 /* set flags */
1521 switch (hw->mac.type) {
1522 case e1000_82575:
1523 adapter->flags |= IGB_FLAG_NEED_CTX_IDX;
1524 break;
1525 case e1000_82576:
1526 default:
1527 break;
1530 hw->phy.autoneg_wait_to_complete = false;
1531 hw->mac.adaptive_ifs = true;
1533 /* Copper options */
1534 if (hw->phy.media_type == e1000_media_type_copper) {
1535 hw->phy.mdix = AUTO_ALL_MODES;
1536 hw->phy.disable_polarity_correction = false;
1537 hw->phy.ms_type = e1000_ms_hw_default;
1540 if (igb_check_reset_block(hw))
1541 dev_info(&pdev->dev,
1542 "PHY reset is blocked due to SOL/IDER session.\n");
1544 netdev->features = NETIF_F_SG |
1545 NETIF_F_IP_CSUM |
1546 NETIF_F_HW_VLAN_TX |
1547 NETIF_F_HW_VLAN_RX |
1548 NETIF_F_HW_VLAN_FILTER;
1550 netdev->features |= NETIF_F_IPV6_CSUM;
1551 netdev->features |= NETIF_F_TSO;
1552 netdev->features |= NETIF_F_TSO6;
1554 netdev->features |= NETIF_F_GRO;
1556 netdev->vlan_features |= NETIF_F_TSO;
1557 netdev->vlan_features |= NETIF_F_TSO6;
1558 netdev->vlan_features |= NETIF_F_IP_CSUM;
1559 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
1560 netdev->vlan_features |= NETIF_F_SG;
1562 if (pci_using_dac)
1563 netdev->features |= NETIF_F_HIGHDMA;
1565 if (adapter->hw.mac.type == e1000_82576)
1566 netdev->features |= NETIF_F_SCTP_CSUM;
1568 adapter->en_mng_pt = igb_enable_mng_pass_thru(&adapter->hw);
1570 /* before reading the NVM, reset the controller to put the device in a
1571 * known good starting state */
1572 hw->mac.ops.reset_hw(hw);
1574 /* make sure the NVM is good */
1575 if (igb_validate_nvm_checksum(hw) < 0) {
1576 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
1577 err = -EIO;
1578 goto err_eeprom;
1581 /* copy the MAC address out of the NVM */
1582 if (hw->mac.ops.read_mac_addr(hw))
1583 dev_err(&pdev->dev, "NVM Read Error\n");
1585 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
1586 memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len);
1588 if (!is_valid_ether_addr(netdev->perm_addr)) {
1589 dev_err(&pdev->dev, "Invalid MAC Address\n");
1590 err = -EIO;
1591 goto err_eeprom;
1594 setup_timer(&adapter->watchdog_timer, &igb_watchdog,
1595 (unsigned long) adapter);
1596 setup_timer(&adapter->phy_info_timer, &igb_update_phy_info,
1597 (unsigned long) adapter);
1599 INIT_WORK(&adapter->reset_task, igb_reset_task);
1600 INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
1602 /* Initialize link properties that are user-changeable */
1603 adapter->fc_autoneg = true;
1604 hw->mac.autoneg = true;
1605 hw->phy.autoneg_advertised = 0x2f;
1607 hw->fc.requested_mode = e1000_fc_default;
1608 hw->fc.current_mode = e1000_fc_default;
1610 adapter->itr_setting = IGB_DEFAULT_ITR;
1611 adapter->itr = IGB_START_ITR;
1613 igb_validate_mdi_setting(hw);
1615 /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
1616 * enable the ACPI Magic Packet filter
1619 if (hw->bus.func == 0)
1620 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
1621 else if (hw->bus.func == 1)
1622 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
1624 if (eeprom_data & eeprom_apme_mask)
1625 adapter->eeprom_wol |= E1000_WUFC_MAG;
1627 /* now that we have the eeprom settings, apply the special cases where
1628 * the eeprom may be wrong or the board simply won't support wake on
1629 * lan on a particular port */
1630 switch (pdev->device) {
1631 case E1000_DEV_ID_82575GB_QUAD_COPPER:
1632 adapter->eeprom_wol = 0;
1633 break;
1634 case E1000_DEV_ID_82575EB_FIBER_SERDES:
1635 case E1000_DEV_ID_82576_FIBER:
1636 case E1000_DEV_ID_82576_SERDES:
1637 /* Wake events only supported on port A for dual fiber
1638 * regardless of eeprom setting */
1639 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
1640 adapter->eeprom_wol = 0;
1641 break;
1642 case E1000_DEV_ID_82576_QUAD_COPPER:
1643 /* if quad port adapter, disable WoL on all but port A */
1644 if (global_quad_port_a != 0)
1645 adapter->eeprom_wol = 0;
1646 else
1647 adapter->flags |= IGB_FLAG_QUAD_PORT_A;
1648 /* Reset for multiple quad port adapters */
1649 if (++global_quad_port_a == 4)
1650 global_quad_port_a = 0;
1651 break;
1654 /* initialize the wol settings based on the eeprom settings */
1655 adapter->wol = adapter->eeprom_wol;
1656 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1658 /* reset the hardware with the new settings */
1659 igb_reset(adapter);
1661 /* let the f/w know that the h/w is now under the control of the
1662 * driver. */
1663 igb_get_hw_control(adapter);
1665 strcpy(netdev->name, "eth%d");
1666 err = register_netdev(netdev);
1667 if (err)
1668 goto err_register;
1670 /* carrier off reporting is important to ethtool even BEFORE open */
1671 netif_carrier_off(netdev);
1673 #ifdef CONFIG_IGB_DCA
1674 if (dca_add_requester(&pdev->dev) == 0) {
1675 adapter->flags |= IGB_FLAG_DCA_ENABLED;
1676 dev_info(&pdev->dev, "DCA enabled\n");
1677 igb_setup_dca(adapter);
1679 #endif
1682 * Initialize hardware timer: we keep it running just in case
1683 * that some program needs it later on.
1685 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
1686 adapter->cycles.read = igb_read_clock;
1687 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
1688 adapter->cycles.mult = 1;
1689 adapter->cycles.shift = IGB_TSYNC_SHIFT;
1690 wr32(E1000_TIMINCA,
1691 (1<<24) |
1692 IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS * IGB_TSYNC_SCALE);
1693 #if 0
1695 * Avoid rollover while we initialize by resetting the time counter.
1697 wr32(E1000_SYSTIML, 0x00000000);
1698 wr32(E1000_SYSTIMH, 0x00000000);
1699 #else
1701 * Set registers so that rollover occurs soon to test this.
1703 wr32(E1000_SYSTIML, 0x00000000);
1704 wr32(E1000_SYSTIMH, 0xFF800000);
1705 #endif
1706 wrfl();
1707 timecounter_init(&adapter->clock,
1708 &adapter->cycles,
1709 ktime_to_ns(ktime_get_real()));
1712 * Synchronize our NIC clock against system wall clock. NIC
1713 * time stamp reading requires ~3us per sample, each sample
1714 * was pretty stable even under load => only require 10
1715 * samples for each offset comparison.
1717 memset(&adapter->compare, 0, sizeof(adapter->compare));
1718 adapter->compare.source = &adapter->clock;
1719 adapter->compare.target = ktime_get_real;
1720 adapter->compare.num_samples = 10;
1721 timecompare_update(&adapter->compare, 0);
1723 #ifdef DEBUG
1725 char buffer[160];
1726 printk(KERN_DEBUG
1727 "igb: %s: hw %p initialized timer\n",
1728 igb_get_time_str(adapter, buffer),
1729 &adapter->hw);
1731 #endif
1733 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
1734 /* print bus type/speed/width info */
1735 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
1736 netdev->name,
1737 ((hw->bus.speed == e1000_bus_speed_2500)
1738 ? "2.5Gb/s" : "unknown"),
1739 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
1740 (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
1741 (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" :
1742 "unknown"),
1743 netdev->dev_addr);
1745 igb_read_part_num(hw, &part_num);
1746 dev_info(&pdev->dev, "%s: PBA No: %06x-%03x\n", netdev->name,
1747 (part_num >> 8), (part_num & 0xff));
1749 dev_info(&pdev->dev,
1750 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
1751 adapter->msix_entries ? "MSI-X" :
1752 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
1753 adapter->num_rx_queues, adapter->num_tx_queues);
1755 return 0;
1757 err_register:
1758 igb_release_hw_control(adapter);
1759 err_eeprom:
1760 if (!igb_check_reset_block(hw))
1761 igb_reset_phy(hw);
1763 if (hw->flash_address)
1764 iounmap(hw->flash_address);
1765 err_sw_init:
1766 igb_clear_interrupt_scheme(adapter);
1767 iounmap(hw->hw_addr);
1768 err_ioremap:
1769 free_netdev(netdev);
1770 err_alloc_etherdev:
1771 pci_release_selected_regions(pdev, pci_select_bars(pdev,
1772 IORESOURCE_MEM));
1773 err_pci_reg:
1774 err_dma:
1775 pci_disable_device(pdev);
1776 return err;
1780 * igb_remove - Device Removal Routine
1781 * @pdev: PCI device information struct
1783 * igb_remove is called by the PCI subsystem to alert the driver
1784 * that it should release a PCI device. The could be caused by a
1785 * Hot-Plug event, or because the driver is going to be removed from
1786 * memory.
1788 static void __devexit igb_remove(struct pci_dev *pdev)
1790 struct net_device *netdev = pci_get_drvdata(pdev);
1791 struct igb_adapter *adapter = netdev_priv(netdev);
1792 struct e1000_hw *hw = &adapter->hw;
1794 /* flush_scheduled work may reschedule our watchdog task, so
1795 * explicitly disable watchdog tasks from being rescheduled */
1796 set_bit(__IGB_DOWN, &adapter->state);
1797 del_timer_sync(&adapter->watchdog_timer);
1798 del_timer_sync(&adapter->phy_info_timer);
1800 flush_scheduled_work();
1802 #ifdef CONFIG_IGB_DCA
1803 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
1804 dev_info(&pdev->dev, "DCA disabled\n");
1805 dca_remove_requester(&pdev->dev);
1806 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
1807 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
1809 #endif
1811 /* Release control of h/w to f/w. If f/w is AMT enabled, this
1812 * would have already happened in close and is redundant. */
1813 igb_release_hw_control(adapter);
1815 unregister_netdev(netdev);
1817 if (!igb_check_reset_block(&adapter->hw))
1818 igb_reset_phy(&adapter->hw);
1820 igb_clear_interrupt_scheme(adapter);
1822 #ifdef CONFIG_PCI_IOV
1823 /* reclaim resources allocated to VFs */
1824 if (adapter->vf_data) {
1825 /* disable iov and allow time for transactions to clear */
1826 pci_disable_sriov(pdev);
1827 msleep(500);
1829 kfree(adapter->vf_data);
1830 adapter->vf_data = NULL;
1831 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
1832 msleep(100);
1833 dev_info(&pdev->dev, "IOV Disabled\n");
1835 #endif
1836 iounmap(hw->hw_addr);
1837 if (hw->flash_address)
1838 iounmap(hw->flash_address);
1839 pci_release_selected_regions(pdev, pci_select_bars(pdev,
1840 IORESOURCE_MEM));
1842 free_netdev(netdev);
1844 pci_disable_pcie_error_reporting(pdev);
1846 pci_disable_device(pdev);
1850 * igb_sw_init - Initialize general software structures (struct igb_adapter)
1851 * @adapter: board private structure to initialize
1853 * igb_sw_init initializes the Adapter private data structure.
1854 * Fields are initialized based on PCI device information and
1855 * OS network device settings (MTU size).
1857 static int __devinit igb_sw_init(struct igb_adapter *adapter)
1859 struct e1000_hw *hw = &adapter->hw;
1860 struct net_device *netdev = adapter->netdev;
1861 struct pci_dev *pdev = adapter->pdev;
1863 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
1865 adapter->tx_ring_count = IGB_DEFAULT_TXD;
1866 adapter->rx_ring_count = IGB_DEFAULT_RXD;
1867 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1868 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
1870 /* This call may decrease the number of queues depending on
1871 * interrupt mode. */
1872 if (igb_init_interrupt_scheme(adapter)) {
1873 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
1874 return -ENOMEM;
1877 /* Explicitly disable IRQ since the NIC can be in any state. */
1878 igb_irq_disable(adapter);
1880 set_bit(__IGB_DOWN, &adapter->state);
1881 return 0;
1885 * igb_open - Called when a network interface is made active
1886 * @netdev: network interface device structure
1888 * Returns 0 on success, negative value on failure
1890 * The open entry point is called when a network interface is made
1891 * active by the system (IFF_UP). At this point all resources needed
1892 * for transmit and receive operations are allocated, the interrupt
1893 * handler is registered with the OS, the watchdog timer is started,
1894 * and the stack is notified that the interface is ready.
1896 static int igb_open(struct net_device *netdev)
1898 struct igb_adapter *adapter = netdev_priv(netdev);
1899 struct e1000_hw *hw = &adapter->hw;
1900 int err;
1901 int i;
1903 /* disallow open during test */
1904 if (test_bit(__IGB_TESTING, &adapter->state))
1905 return -EBUSY;
1907 netif_carrier_off(netdev);
1909 /* allocate transmit descriptors */
1910 err = igb_setup_all_tx_resources(adapter);
1911 if (err)
1912 goto err_setup_tx;
1914 /* allocate receive descriptors */
1915 err = igb_setup_all_rx_resources(adapter);
1916 if (err)
1917 goto err_setup_rx;
1919 /* e1000_power_up_phy(adapter); */
1921 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
1922 if ((adapter->hw.mng_cookie.status &
1923 E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
1924 igb_update_mng_vlan(adapter);
1926 /* before we allocate an interrupt, we must be ready to handle it.
1927 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1928 * as soon as we call pci_request_irq, so we have to setup our
1929 * clean_rx handler before we do so. */
1930 igb_configure(adapter);
1932 igb_vmm_control(adapter);
1933 igb_set_vmolr(hw, adapter->vfs_allocated_count);
1935 err = igb_request_irq(adapter);
1936 if (err)
1937 goto err_req_irq;
1939 /* From here on the code is the same as igb_up() */
1940 clear_bit(__IGB_DOWN, &adapter->state);
1942 for (i = 0; i < adapter->num_q_vectors; i++) {
1943 struct igb_q_vector *q_vector = adapter->q_vector[i];
1944 napi_enable(&q_vector->napi);
1947 /* Clear any pending interrupts. */
1948 rd32(E1000_ICR);
1950 igb_irq_enable(adapter);
1952 netif_tx_start_all_queues(netdev);
1954 /* Fire a link status change interrupt to start the watchdog. */
1955 wr32(E1000_ICS, E1000_ICS_LSC);
1957 return 0;
1959 err_req_irq:
1960 igb_release_hw_control(adapter);
1961 /* e1000_power_down_phy(adapter); */
1962 igb_free_all_rx_resources(adapter);
1963 err_setup_rx:
1964 igb_free_all_tx_resources(adapter);
1965 err_setup_tx:
1966 igb_reset(adapter);
1968 return err;
1972 * igb_close - Disables a network interface
1973 * @netdev: network interface device structure
1975 * Returns 0, this is not allowed to fail
1977 * The close entry point is called when an interface is de-activated
1978 * by the OS. The hardware is still under the driver's control, but
1979 * needs to be disabled. A global MAC reset is issued to stop the
1980 * hardware, and all transmit and receive resources are freed.
1982 static int igb_close(struct net_device *netdev)
1984 struct igb_adapter *adapter = netdev_priv(netdev);
1986 WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
1987 igb_down(adapter);
1989 igb_free_irq(adapter);
1991 igb_free_all_tx_resources(adapter);
1992 igb_free_all_rx_resources(adapter);
1994 /* kill manageability vlan ID if supported, but not if a vlan with
1995 * the same ID is registered on the host OS (let 8021q kill it) */
1996 if ((adapter->hw.mng_cookie.status &
1997 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
1998 !(adapter->vlgrp &&
1999 vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id)))
2000 igb_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
2002 return 0;
2006 * igb_setup_tx_resources - allocate Tx resources (Descriptors)
2007 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2009 * Return 0 on success, negative on failure
2011 int igb_setup_tx_resources(struct igb_ring *tx_ring)
2013 struct pci_dev *pdev = tx_ring->pdev;
2014 int size;
2016 size = sizeof(struct igb_buffer) * tx_ring->count;
2017 tx_ring->buffer_info = vmalloc(size);
2018 if (!tx_ring->buffer_info)
2019 goto err;
2020 memset(tx_ring->buffer_info, 0, size);
2022 /* round up to nearest 4K */
2023 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
2024 tx_ring->size = ALIGN(tx_ring->size, 4096);
2026 tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
2027 &tx_ring->dma);
2029 if (!tx_ring->desc)
2030 goto err;
2032 tx_ring->next_to_use = 0;
2033 tx_ring->next_to_clean = 0;
2034 return 0;
2036 err:
2037 vfree(tx_ring->buffer_info);
2038 dev_err(&pdev->dev,
2039 "Unable to allocate memory for the transmit descriptor ring\n");
2040 return -ENOMEM;
2044 * igb_setup_all_tx_resources - wrapper to allocate Tx resources
2045 * (Descriptors) for all queues
2046 * @adapter: board private structure
2048 * Return 0 on success, negative on failure
2050 static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
2052 int i, err = 0;
2053 int r_idx;
2055 for (i = 0; i < adapter->num_tx_queues; i++) {
2056 err = igb_setup_tx_resources(&adapter->tx_ring[i]);
2057 if (err) {
2058 dev_err(&adapter->pdev->dev,
2059 "Allocation for Tx Queue %u failed\n", i);
2060 for (i--; i >= 0; i--)
2061 igb_free_tx_resources(&adapter->tx_ring[i]);
2062 break;
2066 for (i = 0; i < IGB_MAX_TX_QUEUES; i++) {
2067 r_idx = i % adapter->num_tx_queues;
2068 adapter->multi_tx_table[i] = &adapter->tx_ring[r_idx];
2070 return err;
2074 * igb_setup_tctl - configure the transmit control registers
2075 * @adapter: Board private structure
2077 static void igb_setup_tctl(struct igb_adapter *adapter)
2079 struct e1000_hw *hw = &adapter->hw;
2080 u32 tctl;
2082 /* disable queue 0 which is enabled by default on 82575 and 82576 */
2083 wr32(E1000_TXDCTL(0), 0);
2085 /* Program the Transmit Control Register */
2086 tctl = rd32(E1000_TCTL);
2087 tctl &= ~E1000_TCTL_CT;
2088 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
2089 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2091 igb_config_collision_dist(hw);
2093 /* Enable transmits */
2094 tctl |= E1000_TCTL_EN;
2096 wr32(E1000_TCTL, tctl);
2100 * igb_configure_tx_ring - Configure transmit ring after Reset
2101 * @adapter: board private structure
2102 * @ring: tx ring to configure
2104 * Configure a transmit ring after a reset.
2106 static void igb_configure_tx_ring(struct igb_adapter *adapter,
2107 struct igb_ring *ring)
2109 struct e1000_hw *hw = &adapter->hw;
2110 u32 txdctl;
2111 u64 tdba = ring->dma;
2112 int reg_idx = ring->reg_idx;
2114 /* disable the queue */
2115 txdctl = rd32(E1000_TXDCTL(reg_idx));
2116 wr32(E1000_TXDCTL(reg_idx),
2117 txdctl & ~E1000_TXDCTL_QUEUE_ENABLE);
2118 wrfl();
2119 mdelay(10);
2121 wr32(E1000_TDLEN(reg_idx),
2122 ring->count * sizeof(union e1000_adv_tx_desc));
2123 wr32(E1000_TDBAL(reg_idx),
2124 tdba & 0x00000000ffffffffULL);
2125 wr32(E1000_TDBAH(reg_idx), tdba >> 32);
2127 ring->head = hw->hw_addr + E1000_TDH(reg_idx);
2128 ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
2129 writel(0, ring->head);
2130 writel(0, ring->tail);
2132 txdctl |= IGB_TX_PTHRESH;
2133 txdctl |= IGB_TX_HTHRESH << 8;
2134 txdctl |= IGB_TX_WTHRESH << 16;
2136 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2137 wr32(E1000_TXDCTL(reg_idx), txdctl);
2141 * igb_configure_tx - Configure transmit Unit after Reset
2142 * @adapter: board private structure
2144 * Configure the Tx unit of the MAC after a reset.
2146 static void igb_configure_tx(struct igb_adapter *adapter)
2148 int i;
2150 for (i = 0; i < adapter->num_tx_queues; i++)
2151 igb_configure_tx_ring(adapter, &adapter->tx_ring[i]);
2153 /* Setup Transmit Descriptor Settings for eop descriptor */
2154 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS;
2158 * igb_setup_rx_resources - allocate Rx resources (Descriptors)
2159 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2161 * Returns 0 on success, negative on failure
2163 int igb_setup_rx_resources(struct igb_ring *rx_ring)
2165 struct pci_dev *pdev = rx_ring->pdev;
2166 int size, desc_len;
2168 size = sizeof(struct igb_buffer) * rx_ring->count;
2169 rx_ring->buffer_info = vmalloc(size);
2170 if (!rx_ring->buffer_info)
2171 goto err;
2172 memset(rx_ring->buffer_info, 0, size);
2174 desc_len = sizeof(union e1000_adv_rx_desc);
2176 /* Round up to nearest 4K */
2177 rx_ring->size = rx_ring->count * desc_len;
2178 rx_ring->size = ALIGN(rx_ring->size, 4096);
2180 rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
2181 &rx_ring->dma);
2183 if (!rx_ring->desc)
2184 goto err;
2186 rx_ring->next_to_clean = 0;
2187 rx_ring->next_to_use = 0;
2189 return 0;
2191 err:
2192 vfree(rx_ring->buffer_info);
2193 dev_err(&pdev->dev, "Unable to allocate memory for "
2194 "the receive descriptor ring\n");
2195 return -ENOMEM;
2199 * igb_setup_all_rx_resources - wrapper to allocate Rx resources
2200 * (Descriptors) for all queues
2201 * @adapter: board private structure
2203 * Return 0 on success, negative on failure
2205 static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
2207 int i, err = 0;
2209 for (i = 0; i < adapter->num_rx_queues; i++) {
2210 err = igb_setup_rx_resources(&adapter->rx_ring[i]);
2211 if (err) {
2212 dev_err(&adapter->pdev->dev,
2213 "Allocation for Rx Queue %u failed\n", i);
2214 for (i--; i >= 0; i--)
2215 igb_free_rx_resources(&adapter->rx_ring[i]);
2216 break;
2220 return err;
2224 * igb_setup_rctl - configure the receive control registers
2225 * @adapter: Board private structure
2227 static void igb_setup_rctl(struct igb_adapter *adapter)
2229 struct e1000_hw *hw = &adapter->hw;
2230 u32 rctl;
2232 rctl = rd32(E1000_RCTL);
2234 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
2235 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
2237 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
2238 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
2241 * enable stripping of CRC. It's unlikely this will break BMC
2242 * redirection as it did with e1000. Newer features require
2243 * that the HW strips the CRC.
2245 rctl |= E1000_RCTL_SECRC;
2248 * disable store bad packets and clear size bits.
2250 rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
2252 /* enable LPE to prevent packets larger than max_frame_size */
2253 rctl |= E1000_RCTL_LPE;
2255 /* disable queue 0 to prevent tail write w/o re-config */
2256 wr32(E1000_RXDCTL(0), 0);
2258 /* Attention!!! For SR-IOV PF driver operations you must enable
2259 * queue drop for all VF and PF queues to prevent head of line blocking
2260 * if an un-trusted VF does not provide descriptors to hardware.
2262 if (adapter->vfs_allocated_count) {
2263 u32 vmolr;
2265 /* set all queue drop enable bits */
2266 wr32(E1000_QDE, ALL_QUEUES);
2268 vmolr = rd32(E1000_VMOLR(adapter->vfs_allocated_count));
2269 if (rctl & E1000_RCTL_LPE)
2270 vmolr |= E1000_VMOLR_LPE;
2271 if (adapter->num_rx_queues > 1)
2272 vmolr |= E1000_VMOLR_RSSE;
2273 wr32(E1000_VMOLR(adapter->vfs_allocated_count), vmolr);
2276 wr32(E1000_RCTL, rctl);
2280 * igb_rlpml_set - set maximum receive packet size
2281 * @adapter: board private structure
2283 * Configure maximum receivable packet size.
2285 static void igb_rlpml_set(struct igb_adapter *adapter)
2287 u32 max_frame_size = adapter->max_frame_size;
2288 struct e1000_hw *hw = &adapter->hw;
2289 u16 pf_id = adapter->vfs_allocated_count;
2291 if (adapter->vlgrp)
2292 max_frame_size += VLAN_TAG_SIZE;
2294 /* if vfs are enabled we set RLPML to the largest possible request
2295 * size and set the VMOLR RLPML to the size we need */
2296 if (pf_id) {
2297 igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
2298 max_frame_size = MAX_STD_JUMBO_FRAME_SIZE + VLAN_TAG_SIZE;
2301 wr32(E1000_RLPML, max_frame_size);
2305 * igb_configure_vt_default_pool - Configure VT default pool
2306 * @adapter: board private structure
2308 * Configure the default pool
2310 static void igb_configure_vt_default_pool(struct igb_adapter *adapter)
2312 struct e1000_hw *hw = &adapter->hw;
2313 u16 pf_id = adapter->vfs_allocated_count;
2314 u32 vtctl;
2316 /* not in sr-iov mode - do nothing */
2317 if (!pf_id)
2318 return;
2320 vtctl = rd32(E1000_VT_CTL);
2321 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
2322 E1000_VT_CTL_DISABLE_DEF_POOL);
2323 vtctl |= pf_id << E1000_VT_CTL_DEFAULT_POOL_SHIFT;
2324 wr32(E1000_VT_CTL, vtctl);
2328 * igb_configure_rx_ring - Configure a receive ring after Reset
2329 * @adapter: board private structure
2330 * @ring: receive ring to be configured
2332 * Configure the Rx unit of the MAC after a reset.
2334 static void igb_configure_rx_ring(struct igb_adapter *adapter,
2335 struct igb_ring *ring)
2337 struct e1000_hw *hw = &adapter->hw;
2338 u64 rdba = ring->dma;
2339 int reg_idx = ring->reg_idx;
2340 u32 srrctl, rxdctl;
2342 /* disable the queue */
2343 rxdctl = rd32(E1000_RXDCTL(reg_idx));
2344 wr32(E1000_RXDCTL(reg_idx),
2345 rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE);
2347 /* Set DMA base address registers */
2348 wr32(E1000_RDBAL(reg_idx),
2349 rdba & 0x00000000ffffffffULL);
2350 wr32(E1000_RDBAH(reg_idx), rdba >> 32);
2351 wr32(E1000_RDLEN(reg_idx),
2352 ring->count * sizeof(union e1000_adv_rx_desc));
2354 /* initialize head and tail */
2355 ring->head = hw->hw_addr + E1000_RDH(reg_idx);
2356 ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
2357 writel(0, ring->head);
2358 writel(0, ring->tail);
2360 /* set descriptor configuration */
2361 if (ring->rx_buffer_len < IGB_RXBUFFER_1024) {
2362 srrctl = ALIGN(ring->rx_buffer_len, 64) <<
2363 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
2364 #if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
2365 srrctl |= IGB_RXBUFFER_16384 >>
2366 E1000_SRRCTL_BSIZEPKT_SHIFT;
2367 #else
2368 srrctl |= (PAGE_SIZE / 2) >>
2369 E1000_SRRCTL_BSIZEPKT_SHIFT;
2370 #endif
2371 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
2372 } else {
2373 srrctl = ALIGN(ring->rx_buffer_len, 1024) >>
2374 E1000_SRRCTL_BSIZEPKT_SHIFT;
2375 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2378 wr32(E1000_SRRCTL(reg_idx), srrctl);
2380 /* enable receive descriptor fetching */
2381 rxdctl = rd32(E1000_RXDCTL(reg_idx));
2382 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2383 rxdctl &= 0xFFF00000;
2384 rxdctl |= IGB_RX_PTHRESH;
2385 rxdctl |= IGB_RX_HTHRESH << 8;
2386 rxdctl |= IGB_RX_WTHRESH << 16;
2387 wr32(E1000_RXDCTL(reg_idx), rxdctl);
2391 * igb_configure_rx - Configure receive Unit after Reset
2392 * @adapter: board private structure
2394 * Configure the Rx unit of the MAC after a reset.
2396 static void igb_configure_rx(struct igb_adapter *adapter)
2398 struct e1000_hw *hw = &adapter->hw;
2399 u32 rctl, rxcsum;
2400 int i;
2402 /* disable receives while setting up the descriptors */
2403 rctl = rd32(E1000_RCTL);
2404 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
2405 wrfl();
2406 mdelay(10);
2408 if (adapter->itr_setting > 3)
2409 wr32(E1000_ITR, adapter->itr);
2411 /* Setup the HW Rx Head and Tail Descriptor Pointers and
2412 * the Base and Length of the Rx Descriptor Ring */
2413 for (i = 0; i < adapter->num_rx_queues; i++)
2414 igb_configure_rx_ring(adapter, &adapter->rx_ring[i]);
2416 if (adapter->num_rx_queues > 1) {
2417 u32 random[10];
2418 u32 mrqc;
2419 u32 j, shift;
2420 union e1000_reta {
2421 u32 dword;
2422 u8 bytes[4];
2423 } reta;
2425 get_random_bytes(&random[0], 40);
2427 if (hw->mac.type >= e1000_82576)
2428 shift = 0;
2429 else
2430 shift = 6;
2431 for (j = 0; j < (32 * 4); j++) {
2432 reta.bytes[j & 3] =
2433 adapter->rx_ring[(j % adapter->num_rx_queues)].reg_idx << shift;
2434 if ((j & 3) == 3)
2435 writel(reta.dword,
2436 hw->hw_addr + E1000_RETA(0) + (j & ~3));
2438 if (adapter->vfs_allocated_count)
2439 mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
2440 else
2441 mrqc = E1000_MRQC_ENABLE_RSS_4Q;
2443 /* Fill out hash function seeds */
2444 for (j = 0; j < 10; j++)
2445 array_wr32(E1000_RSSRK(0), j, random[j]);
2447 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
2448 E1000_MRQC_RSS_FIELD_IPV4_TCP);
2449 mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 |
2450 E1000_MRQC_RSS_FIELD_IPV6_TCP);
2451 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4_UDP |
2452 E1000_MRQC_RSS_FIELD_IPV6_UDP);
2453 mrqc |= (E1000_MRQC_RSS_FIELD_IPV6_UDP_EX |
2454 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
2456 wr32(E1000_MRQC, mrqc);
2457 } else if (adapter->vfs_allocated_count) {
2458 /* Enable multi-queue for sr-iov */
2459 wr32(E1000_MRQC, E1000_MRQC_ENABLE_VMDQ);
2462 /* Enable Receive Checksum Offload for TCP and UDP */
2463 rxcsum = rd32(E1000_RXCSUM);
2464 /* Disable raw packet checksumming */
2465 rxcsum |= E1000_RXCSUM_PCSD;
2467 if (adapter->hw.mac.type == e1000_82576)
2468 /* Enable Receive Checksum Offload for SCTP */
2469 rxcsum |= E1000_RXCSUM_CRCOFL;
2471 /* Don't need to set TUOFL or IPOFL, they default to 1 */
2472 wr32(E1000_RXCSUM, rxcsum);
2474 /* Set the default pool for the PF's first queue */
2475 igb_configure_vt_default_pool(adapter);
2477 /* set UTA to appropriate mode */
2478 igb_set_uta(adapter);
2480 /* set the correct pool for the PF default MAC address in entry 0 */
2481 igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0,
2482 adapter->vfs_allocated_count);
2484 igb_rlpml_set(adapter);
2486 /* Enable Receives */
2487 wr32(E1000_RCTL, rctl);
2491 * igb_free_tx_resources - Free Tx Resources per Queue
2492 * @tx_ring: Tx descriptor ring for a specific queue
2494 * Free all transmit software resources
2496 void igb_free_tx_resources(struct igb_ring *tx_ring)
2498 igb_clean_tx_ring(tx_ring);
2500 vfree(tx_ring->buffer_info);
2501 tx_ring->buffer_info = NULL;
2503 pci_free_consistent(tx_ring->pdev, tx_ring->size,
2504 tx_ring->desc, tx_ring->dma);
2506 tx_ring->desc = NULL;
2510 * igb_free_all_tx_resources - Free Tx Resources for All Queues
2511 * @adapter: board private structure
2513 * Free all transmit software resources
2515 static void igb_free_all_tx_resources(struct igb_adapter *adapter)
2517 int i;
2519 for (i = 0; i < adapter->num_tx_queues; i++)
2520 igb_free_tx_resources(&adapter->tx_ring[i]);
2523 static void igb_unmap_and_free_tx_resource(struct igb_ring *tx_ring,
2524 struct igb_buffer *buffer_info)
2526 buffer_info->dma = 0;
2527 if (buffer_info->skb) {
2528 skb_dma_unmap(&tx_ring->pdev->dev,
2529 buffer_info->skb,
2530 DMA_TO_DEVICE);
2531 dev_kfree_skb_any(buffer_info->skb);
2532 buffer_info->skb = NULL;
2534 buffer_info->time_stamp = 0;
2535 /* buffer_info must be completely set up in the transmit path */
2539 * igb_clean_tx_ring - Free Tx Buffers
2540 * @tx_ring: ring to be cleaned
2542 static void igb_clean_tx_ring(struct igb_ring *tx_ring)
2544 struct igb_buffer *buffer_info;
2545 unsigned long size;
2546 unsigned int i;
2548 if (!tx_ring->buffer_info)
2549 return;
2550 /* Free all the Tx ring sk_buffs */
2552 for (i = 0; i < tx_ring->count; i++) {
2553 buffer_info = &tx_ring->buffer_info[i];
2554 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
2557 size = sizeof(struct igb_buffer) * tx_ring->count;
2558 memset(tx_ring->buffer_info, 0, size);
2560 /* Zero out the descriptor ring */
2562 memset(tx_ring->desc, 0, tx_ring->size);
2564 tx_ring->next_to_use = 0;
2565 tx_ring->next_to_clean = 0;
2567 writel(0, tx_ring->head);
2568 writel(0, tx_ring->tail);
2572 * igb_clean_all_tx_rings - Free Tx Buffers for all queues
2573 * @adapter: board private structure
2575 static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
2577 int i;
2579 for (i = 0; i < adapter->num_tx_queues; i++)
2580 igb_clean_tx_ring(&adapter->tx_ring[i]);
2584 * igb_free_rx_resources - Free Rx Resources
2585 * @rx_ring: ring to clean the resources from
2587 * Free all receive software resources
2589 void igb_free_rx_resources(struct igb_ring *rx_ring)
2591 igb_clean_rx_ring(rx_ring);
2593 vfree(rx_ring->buffer_info);
2594 rx_ring->buffer_info = NULL;
2596 pci_free_consistent(rx_ring->pdev, rx_ring->size,
2597 rx_ring->desc, rx_ring->dma);
2599 rx_ring->desc = NULL;
2603 * igb_free_all_rx_resources - Free Rx Resources for All Queues
2604 * @adapter: board private structure
2606 * Free all receive software resources
2608 static void igb_free_all_rx_resources(struct igb_adapter *adapter)
2610 int i;
2612 for (i = 0; i < adapter->num_rx_queues; i++)
2613 igb_free_rx_resources(&adapter->rx_ring[i]);
2617 * igb_clean_rx_ring - Free Rx Buffers per Queue
2618 * @rx_ring: ring to free buffers from
2620 static void igb_clean_rx_ring(struct igb_ring *rx_ring)
2622 struct igb_buffer *buffer_info;
2623 unsigned long size;
2624 unsigned int i;
2626 if (!rx_ring->buffer_info)
2627 return;
2628 /* Free all the Rx ring sk_buffs */
2629 for (i = 0; i < rx_ring->count; i++) {
2630 buffer_info = &rx_ring->buffer_info[i];
2631 if (buffer_info->dma) {
2632 pci_unmap_single(rx_ring->pdev,
2633 buffer_info->dma,
2634 rx_ring->rx_buffer_len,
2635 PCI_DMA_FROMDEVICE);
2636 buffer_info->dma = 0;
2639 if (buffer_info->skb) {
2640 dev_kfree_skb(buffer_info->skb);
2641 buffer_info->skb = NULL;
2643 if (buffer_info->page_dma) {
2644 pci_unmap_page(rx_ring->pdev,
2645 buffer_info->page_dma,
2646 PAGE_SIZE / 2,
2647 PCI_DMA_FROMDEVICE);
2648 buffer_info->page_dma = 0;
2650 if (buffer_info->page) {
2651 put_page(buffer_info->page);
2652 buffer_info->page = NULL;
2653 buffer_info->page_offset = 0;
2657 size = sizeof(struct igb_buffer) * rx_ring->count;
2658 memset(rx_ring->buffer_info, 0, size);
2660 /* Zero out the descriptor ring */
2661 memset(rx_ring->desc, 0, rx_ring->size);
2663 rx_ring->next_to_clean = 0;
2664 rx_ring->next_to_use = 0;
2666 writel(0, rx_ring->head);
2667 writel(0, rx_ring->tail);
2671 * igb_clean_all_rx_rings - Free Rx Buffers for all queues
2672 * @adapter: board private structure
2674 static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
2676 int i;
2678 for (i = 0; i < adapter->num_rx_queues; i++)
2679 igb_clean_rx_ring(&adapter->rx_ring[i]);
2683 * igb_set_mac - Change the Ethernet Address of the NIC
2684 * @netdev: network interface device structure
2685 * @p: pointer to an address structure
2687 * Returns 0 on success, negative on failure
2689 static int igb_set_mac(struct net_device *netdev, void *p)
2691 struct igb_adapter *adapter = netdev_priv(netdev);
2692 struct e1000_hw *hw = &adapter->hw;
2693 struct sockaddr *addr = p;
2695 if (!is_valid_ether_addr(addr->sa_data))
2696 return -EADDRNOTAVAIL;
2698 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2699 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
2701 /* set the correct pool for the new PF MAC address in entry 0 */
2702 igb_rar_set_qsel(adapter, hw->mac.addr, 0,
2703 adapter->vfs_allocated_count);
2705 return 0;
2709 * igb_write_mc_addr_list - write multicast addresses to MTA
2710 * @netdev: network interface device structure
2712 * Writes multicast address list to the MTA hash table.
2713 * Returns: -ENOMEM on failure
2714 * 0 on no addresses written
2715 * X on writing X addresses to MTA
2717 static int igb_write_mc_addr_list(struct net_device *netdev)
2719 struct igb_adapter *adapter = netdev_priv(netdev);
2720 struct e1000_hw *hw = &adapter->hw;
2721 struct dev_mc_list *mc_ptr = netdev->mc_list;
2722 u8 *mta_list;
2723 u32 vmolr = 0;
2724 int i;
2726 if (!netdev->mc_count) {
2727 /* nothing to program, so clear mc list */
2728 igb_update_mc_addr_list(hw, NULL, 0);
2729 igb_restore_vf_multicasts(adapter);
2730 return 0;
2733 mta_list = kzalloc(netdev->mc_count * 6, GFP_ATOMIC);
2734 if (!mta_list)
2735 return -ENOMEM;
2737 /* set vmolr receive overflow multicast bit */
2738 vmolr |= E1000_VMOLR_ROMPE;
2740 /* The shared function expects a packed array of only addresses. */
2741 mc_ptr = netdev->mc_list;
2743 for (i = 0; i < netdev->mc_count; i++) {
2744 if (!mc_ptr)
2745 break;
2746 memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN);
2747 mc_ptr = mc_ptr->next;
2749 igb_update_mc_addr_list(hw, mta_list, i);
2750 kfree(mta_list);
2752 return netdev->mc_count;
2756 * igb_write_uc_addr_list - write unicast addresses to RAR table
2757 * @netdev: network interface device structure
2759 * Writes unicast address list to the RAR table.
2760 * Returns: -ENOMEM on failure/insufficient address space
2761 * 0 on no addresses written
2762 * X on writing X addresses to the RAR table
2764 static int igb_write_uc_addr_list(struct net_device *netdev)
2766 struct igb_adapter *adapter = netdev_priv(netdev);
2767 struct e1000_hw *hw = &adapter->hw;
2768 unsigned int vfn = adapter->vfs_allocated_count;
2769 unsigned int rar_entries = hw->mac.rar_entry_count - (vfn + 1);
2770 int count = 0;
2772 /* return ENOMEM indicating insufficient memory for addresses */
2773 if (netdev->uc.count > rar_entries)
2774 return -ENOMEM;
2776 if (netdev->uc.count && rar_entries) {
2777 struct netdev_hw_addr *ha;
2778 list_for_each_entry(ha, &netdev->uc.list, list) {
2779 if (!rar_entries)
2780 break;
2781 igb_rar_set_qsel(adapter, ha->addr,
2782 rar_entries--,
2783 vfn);
2784 count++;
2787 /* write the addresses in reverse order to avoid write combining */
2788 for (; rar_entries > 0 ; rar_entries--) {
2789 wr32(E1000_RAH(rar_entries), 0);
2790 wr32(E1000_RAL(rar_entries), 0);
2792 wrfl();
2794 return count;
2798 * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
2799 * @netdev: network interface device structure
2801 * The set_rx_mode entry point is called whenever the unicast or multicast
2802 * address lists or the network interface flags are updated. This routine is
2803 * responsible for configuring the hardware for proper unicast, multicast,
2804 * promiscuous mode, and all-multi behavior.
2806 static void igb_set_rx_mode(struct net_device *netdev)
2808 struct igb_adapter *adapter = netdev_priv(netdev);
2809 struct e1000_hw *hw = &adapter->hw;
2810 unsigned int vfn = adapter->vfs_allocated_count;
2811 u32 rctl, vmolr = 0;
2812 int count;
2814 /* Check for Promiscuous and All Multicast modes */
2815 rctl = rd32(E1000_RCTL);
2817 /* clear the effected bits */
2818 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE);
2820 if (netdev->flags & IFF_PROMISC) {
2821 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2822 vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME);
2823 } else {
2824 if (netdev->flags & IFF_ALLMULTI) {
2825 rctl |= E1000_RCTL_MPE;
2826 vmolr |= E1000_VMOLR_MPME;
2827 } else {
2829 * Write addresses to the MTA, if the attempt fails
2830 * then we should just turn on promiscous mode so
2831 * that we can at least receive multicast traffic
2833 count = igb_write_mc_addr_list(netdev);
2834 if (count < 0) {
2835 rctl |= E1000_RCTL_MPE;
2836 vmolr |= E1000_VMOLR_MPME;
2837 } else if (count) {
2838 vmolr |= E1000_VMOLR_ROMPE;
2842 * Write addresses to available RAR registers, if there is not
2843 * sufficient space to store all the addresses then enable
2844 * unicast promiscous mode
2846 count = igb_write_uc_addr_list(netdev);
2847 if (count < 0) {
2848 rctl |= E1000_RCTL_UPE;
2849 vmolr |= E1000_VMOLR_ROPE;
2851 rctl |= E1000_RCTL_VFE;
2853 wr32(E1000_RCTL, rctl);
2856 * In order to support SR-IOV and eventually VMDq it is necessary to set
2857 * the VMOLR to enable the appropriate modes. Without this workaround
2858 * we will have issues with VLAN tag stripping not being done for frames
2859 * that are only arriving because we are the default pool
2861 if (hw->mac.type < e1000_82576)
2862 return;
2864 vmolr |= rd32(E1000_VMOLR(vfn)) &
2865 ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
2866 wr32(E1000_VMOLR(vfn), vmolr);
2867 igb_restore_vf_multicasts(adapter);
2870 /* Need to wait a few seconds after link up to get diagnostic information from
2871 * the phy */
2872 static void igb_update_phy_info(unsigned long data)
2874 struct igb_adapter *adapter = (struct igb_adapter *) data;
2875 igb_get_phy_info(&adapter->hw);
2879 * igb_has_link - check shared code for link and determine up/down
2880 * @adapter: pointer to driver private info
2882 static bool igb_has_link(struct igb_adapter *adapter)
2884 struct e1000_hw *hw = &adapter->hw;
2885 bool link_active = false;
2886 s32 ret_val = 0;
2888 /* get_link_status is set on LSC (link status) interrupt or
2889 * rx sequence error interrupt. get_link_status will stay
2890 * false until the e1000_check_for_link establishes link
2891 * for copper adapters ONLY
2893 switch (hw->phy.media_type) {
2894 case e1000_media_type_copper:
2895 if (hw->mac.get_link_status) {
2896 ret_val = hw->mac.ops.check_for_link(hw);
2897 link_active = !hw->mac.get_link_status;
2898 } else {
2899 link_active = true;
2901 break;
2902 case e1000_media_type_internal_serdes:
2903 ret_val = hw->mac.ops.check_for_link(hw);
2904 link_active = hw->mac.serdes_has_link;
2905 break;
2906 default:
2907 case e1000_media_type_unknown:
2908 break;
2911 return link_active;
2915 * igb_watchdog - Timer Call-back
2916 * @data: pointer to adapter cast into an unsigned long
2918 static void igb_watchdog(unsigned long data)
2920 struct igb_adapter *adapter = (struct igb_adapter *)data;
2921 /* Do the rest outside of interrupt context */
2922 schedule_work(&adapter->watchdog_task);
2925 static void igb_watchdog_task(struct work_struct *work)
2927 struct igb_adapter *adapter = container_of(work,
2928 struct igb_adapter, watchdog_task);
2929 struct e1000_hw *hw = &adapter->hw;
2930 struct net_device *netdev = adapter->netdev;
2931 struct igb_ring *tx_ring = adapter->tx_ring;
2932 u32 link;
2933 int i;
2935 link = igb_has_link(adapter);
2936 if ((netif_carrier_ok(netdev)) && link)
2937 goto link_up;
2939 if (link) {
2940 if (!netif_carrier_ok(netdev)) {
2941 u32 ctrl;
2942 hw->mac.ops.get_speed_and_duplex(&adapter->hw,
2943 &adapter->link_speed,
2944 &adapter->link_duplex);
2946 ctrl = rd32(E1000_CTRL);
2947 /* Links status message must follow this format */
2948 printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s, "
2949 "Flow Control: %s\n",
2950 netdev->name,
2951 adapter->link_speed,
2952 adapter->link_duplex == FULL_DUPLEX ?
2953 "Full Duplex" : "Half Duplex",
2954 ((ctrl & E1000_CTRL_TFCE) && (ctrl &
2955 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2956 E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2957 E1000_CTRL_TFCE) ? "TX" : "None")));
2959 /* tweak tx_queue_len according to speed/duplex and
2960 * adjust the timeout factor */
2961 netdev->tx_queue_len = adapter->tx_queue_len;
2962 adapter->tx_timeout_factor = 1;
2963 switch (adapter->link_speed) {
2964 case SPEED_10:
2965 netdev->tx_queue_len = 10;
2966 adapter->tx_timeout_factor = 14;
2967 break;
2968 case SPEED_100:
2969 netdev->tx_queue_len = 100;
2970 /* maybe add some timeout factor ? */
2971 break;
2974 netif_carrier_on(netdev);
2976 igb_ping_all_vfs(adapter);
2978 /* link state has changed, schedule phy info update */
2979 if (!test_bit(__IGB_DOWN, &adapter->state))
2980 mod_timer(&adapter->phy_info_timer,
2981 round_jiffies(jiffies + 2 * HZ));
2983 } else {
2984 if (netif_carrier_ok(netdev)) {
2985 adapter->link_speed = 0;
2986 adapter->link_duplex = 0;
2987 /* Links status message must follow this format */
2988 printk(KERN_INFO "igb: %s NIC Link is Down\n",
2989 netdev->name);
2990 netif_carrier_off(netdev);
2992 igb_ping_all_vfs(adapter);
2994 /* link state has changed, schedule phy info update */
2995 if (!test_bit(__IGB_DOWN, &adapter->state))
2996 mod_timer(&adapter->phy_info_timer,
2997 round_jiffies(jiffies + 2 * HZ));
3001 link_up:
3002 igb_update_stats(adapter);
3004 hw->mac.tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
3005 adapter->tpt_old = adapter->stats.tpt;
3006 hw->mac.collision_delta = adapter->stats.colc - adapter->colc_old;
3007 adapter->colc_old = adapter->stats.colc;
3009 adapter->gorc = adapter->stats.gorc - adapter->gorc_old;
3010 adapter->gorc_old = adapter->stats.gorc;
3011 adapter->gotc = adapter->stats.gotc - adapter->gotc_old;
3012 adapter->gotc_old = adapter->stats.gotc;
3014 igb_update_adaptive(&adapter->hw);
3016 if (!netif_carrier_ok(netdev)) {
3017 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
3018 /* We've lost link, so the controller stops DMA,
3019 * but we've got queued Tx work that's never going
3020 * to get done, so reset controller to flush Tx.
3021 * (Do the reset outside of interrupt context). */
3022 adapter->tx_timeout_count++;
3023 schedule_work(&adapter->reset_task);
3024 /* return immediately since reset is imminent */
3025 return;
3029 /* Cause software interrupt to ensure rx ring is cleaned */
3030 if (adapter->msix_entries) {
3031 u32 eics = 0;
3032 for (i = 0; i < adapter->num_q_vectors; i++) {
3033 struct igb_q_vector *q_vector = adapter->q_vector[i];
3034 eics |= q_vector->eims_value;
3036 wr32(E1000_EICS, eics);
3037 } else {
3038 wr32(E1000_ICS, E1000_ICS_RXDMT0);
3041 /* Force detection of hung controller every watchdog period */
3042 tx_ring->detect_tx_hung = true;
3044 /* Reset the timer */
3045 if (!test_bit(__IGB_DOWN, &adapter->state))
3046 mod_timer(&adapter->watchdog_timer,
3047 round_jiffies(jiffies + 2 * HZ));
3050 enum latency_range {
3051 lowest_latency = 0,
3052 low_latency = 1,
3053 bulk_latency = 2,
3054 latency_invalid = 255
3059 * igb_update_ring_itr - update the dynamic ITR value based on packet size
3061 * Stores a new ITR value based on strictly on packet size. This
3062 * algorithm is less sophisticated than that used in igb_update_itr,
3063 * due to the difficulty of synchronizing statistics across multiple
3064 * receive rings. The divisors and thresholds used by this fuction
3065 * were determined based on theoretical maximum wire speed and testing
3066 * data, in order to minimize response time while increasing bulk
3067 * throughput.
3068 * This functionality is controlled by the InterruptThrottleRate module
3069 * parameter (see igb_param.c)
3070 * NOTE: This function is called only when operating in a multiqueue
3071 * receive environment.
3072 * @q_vector: pointer to q_vector
3074 static void igb_update_ring_itr(struct igb_q_vector *q_vector)
3076 int new_val = q_vector->itr_val;
3077 int avg_wire_size = 0;
3078 struct igb_adapter *adapter = q_vector->adapter;
3080 /* For non-gigabit speeds, just fix the interrupt rate at 4000
3081 * ints/sec - ITR timer value of 120 ticks.
3083 if (adapter->link_speed != SPEED_1000) {
3084 new_val = 976;
3085 goto set_itr_val;
3088 if (q_vector->rx_ring && q_vector->rx_ring->total_packets) {
3089 struct igb_ring *ring = q_vector->rx_ring;
3090 avg_wire_size = ring->total_bytes / ring->total_packets;
3093 if (q_vector->tx_ring && q_vector->tx_ring->total_packets) {
3094 struct igb_ring *ring = q_vector->tx_ring;
3095 avg_wire_size = max_t(u32, avg_wire_size,
3096 (ring->total_bytes /
3097 ring->total_packets));
3100 /* if avg_wire_size isn't set no work was done */
3101 if (!avg_wire_size)
3102 goto clear_counts;
3104 /* Add 24 bytes to size to account for CRC, preamble, and gap */
3105 avg_wire_size += 24;
3107 /* Don't starve jumbo frames */
3108 avg_wire_size = min(avg_wire_size, 3000);
3110 /* Give a little boost to mid-size frames */
3111 if ((avg_wire_size > 300) && (avg_wire_size < 1200))
3112 new_val = avg_wire_size / 3;
3113 else
3114 new_val = avg_wire_size / 2;
3116 set_itr_val:
3117 if (new_val != q_vector->itr_val) {
3118 q_vector->itr_val = new_val;
3119 q_vector->set_itr = 1;
3121 clear_counts:
3122 if (q_vector->rx_ring) {
3123 q_vector->rx_ring->total_bytes = 0;
3124 q_vector->rx_ring->total_packets = 0;
3126 if (q_vector->tx_ring) {
3127 q_vector->tx_ring->total_bytes = 0;
3128 q_vector->tx_ring->total_packets = 0;
3133 * igb_update_itr - update the dynamic ITR value based on statistics
3134 * Stores a new ITR value based on packets and byte
3135 * counts during the last interrupt. The advantage of per interrupt
3136 * computation is faster updates and more accurate ITR for the current
3137 * traffic pattern. Constants in this function were computed
3138 * based on theoretical maximum wire speed and thresholds were set based
3139 * on testing data as well as attempting to minimize response time
3140 * while increasing bulk throughput.
3141 * this functionality is controlled by the InterruptThrottleRate module
3142 * parameter (see igb_param.c)
3143 * NOTE: These calculations are only valid when operating in a single-
3144 * queue environment.
3145 * @adapter: pointer to adapter
3146 * @itr_setting: current q_vector->itr_val
3147 * @packets: the number of packets during this measurement interval
3148 * @bytes: the number of bytes during this measurement interval
3150 static unsigned int igb_update_itr(struct igb_adapter *adapter, u16 itr_setting,
3151 int packets, int bytes)
3153 unsigned int retval = itr_setting;
3155 if (packets == 0)
3156 goto update_itr_done;
3158 switch (itr_setting) {
3159 case lowest_latency:
3160 /* handle TSO and jumbo frames */
3161 if (bytes/packets > 8000)
3162 retval = bulk_latency;
3163 else if ((packets < 5) && (bytes > 512))
3164 retval = low_latency;
3165 break;
3166 case low_latency: /* 50 usec aka 20000 ints/s */
3167 if (bytes > 10000) {
3168 /* this if handles the TSO accounting */
3169 if (bytes/packets > 8000) {
3170 retval = bulk_latency;
3171 } else if ((packets < 10) || ((bytes/packets) > 1200)) {
3172 retval = bulk_latency;
3173 } else if ((packets > 35)) {
3174 retval = lowest_latency;
3176 } else if (bytes/packets > 2000) {
3177 retval = bulk_latency;
3178 } else if (packets <= 2 && bytes < 512) {
3179 retval = lowest_latency;
3181 break;
3182 case bulk_latency: /* 250 usec aka 4000 ints/s */
3183 if (bytes > 25000) {
3184 if (packets > 35)
3185 retval = low_latency;
3186 } else if (bytes < 1500) {
3187 retval = low_latency;
3189 break;
3192 update_itr_done:
3193 return retval;
3196 static void igb_set_itr(struct igb_adapter *adapter)
3198 struct igb_q_vector *q_vector = adapter->q_vector[0];
3199 u16 current_itr;
3200 u32 new_itr = q_vector->itr_val;
3202 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
3203 if (adapter->link_speed != SPEED_1000) {
3204 current_itr = 0;
3205 new_itr = 4000;
3206 goto set_itr_now;
3209 adapter->rx_itr = igb_update_itr(adapter,
3210 adapter->rx_itr,
3211 adapter->rx_ring->total_packets,
3212 adapter->rx_ring->total_bytes);
3214 adapter->tx_itr = igb_update_itr(adapter,
3215 adapter->tx_itr,
3216 adapter->tx_ring->total_packets,
3217 adapter->tx_ring->total_bytes);
3218 current_itr = max(adapter->rx_itr, adapter->tx_itr);
3220 /* conservative mode (itr 3) eliminates the lowest_latency setting */
3221 if (adapter->itr_setting == 3 && current_itr == lowest_latency)
3222 current_itr = low_latency;
3224 switch (current_itr) {
3225 /* counts and packets in update_itr are dependent on these numbers */
3226 case lowest_latency:
3227 new_itr = 56; /* aka 70,000 ints/sec */
3228 break;
3229 case low_latency:
3230 new_itr = 196; /* aka 20,000 ints/sec */
3231 break;
3232 case bulk_latency:
3233 new_itr = 980; /* aka 4,000 ints/sec */
3234 break;
3235 default:
3236 break;
3239 set_itr_now:
3240 adapter->rx_ring->total_bytes = 0;
3241 adapter->rx_ring->total_packets = 0;
3242 adapter->tx_ring->total_bytes = 0;
3243 adapter->tx_ring->total_packets = 0;
3245 if (new_itr != q_vector->itr_val) {
3246 /* this attempts to bias the interrupt rate towards Bulk
3247 * by adding intermediate steps when interrupt rate is
3248 * increasing */
3249 new_itr = new_itr > q_vector->itr_val ?
3250 max((new_itr * q_vector->itr_val) /
3251 (new_itr + (q_vector->itr_val >> 2)),
3252 new_itr) :
3253 new_itr;
3254 /* Don't write the value here; it resets the adapter's
3255 * internal timer, and causes us to delay far longer than
3256 * we should between interrupts. Instead, we write the ITR
3257 * value at the beginning of the next interrupt so the timing
3258 * ends up being correct.
3260 q_vector->itr_val = new_itr;
3261 q_vector->set_itr = 1;
3264 return;
3267 #define IGB_TX_FLAGS_CSUM 0x00000001
3268 #define IGB_TX_FLAGS_VLAN 0x00000002
3269 #define IGB_TX_FLAGS_TSO 0x00000004
3270 #define IGB_TX_FLAGS_IPV4 0x00000008
3271 #define IGB_TX_FLAGS_TSTAMP 0x00000010
3272 #define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
3273 #define IGB_TX_FLAGS_VLAN_SHIFT 16
3275 static inline int igb_tso_adv(struct igb_adapter *adapter,
3276 struct igb_ring *tx_ring,
3277 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
3279 struct e1000_adv_tx_context_desc *context_desc;
3280 unsigned int i;
3281 int err;
3282 struct igb_buffer *buffer_info;
3283 u32 info = 0, tu_cmd = 0;
3284 u32 mss_l4len_idx, l4len;
3285 *hdr_len = 0;
3287 if (skb_header_cloned(skb)) {
3288 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
3289 if (err)
3290 return err;
3293 l4len = tcp_hdrlen(skb);
3294 *hdr_len += l4len;
3296 if (skb->protocol == htons(ETH_P_IP)) {
3297 struct iphdr *iph = ip_hdr(skb);
3298 iph->tot_len = 0;
3299 iph->check = 0;
3300 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
3301 iph->daddr, 0,
3302 IPPROTO_TCP,
3304 } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
3305 ipv6_hdr(skb)->payload_len = 0;
3306 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3307 &ipv6_hdr(skb)->daddr,
3308 0, IPPROTO_TCP, 0);
3311 i = tx_ring->next_to_use;
3313 buffer_info = &tx_ring->buffer_info[i];
3314 context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i);
3315 /* VLAN MACLEN IPLEN */
3316 if (tx_flags & IGB_TX_FLAGS_VLAN)
3317 info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
3318 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
3319 *hdr_len += skb_network_offset(skb);
3320 info |= skb_network_header_len(skb);
3321 *hdr_len += skb_network_header_len(skb);
3322 context_desc->vlan_macip_lens = cpu_to_le32(info);
3324 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
3325 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
3327 if (skb->protocol == htons(ETH_P_IP))
3328 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
3329 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
3331 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
3333 /* MSS L4LEN IDX */
3334 mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT);
3335 mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT);
3337 /* For 82575, context index must be unique per ring. */
3338 if (adapter->flags & IGB_FLAG_NEED_CTX_IDX)
3339 mss_l4len_idx |= tx_ring->queue_index << 4;
3341 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
3342 context_desc->seqnum_seed = 0;
3344 buffer_info->time_stamp = jiffies;
3345 buffer_info->next_to_watch = i;
3346 buffer_info->dma = 0;
3347 i++;
3348 if (i == tx_ring->count)
3349 i = 0;
3351 tx_ring->next_to_use = i;
3353 return true;
3356 static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
3357 struct igb_ring *tx_ring,
3358 struct sk_buff *skb, u32 tx_flags)
3360 struct e1000_adv_tx_context_desc *context_desc;
3361 struct pci_dev *pdev = tx_ring->pdev;
3362 struct igb_buffer *buffer_info;
3363 u32 info = 0, tu_cmd = 0;
3364 unsigned int i;
3366 if ((skb->ip_summed == CHECKSUM_PARTIAL) ||
3367 (tx_flags & IGB_TX_FLAGS_VLAN)) {
3368 i = tx_ring->next_to_use;
3369 buffer_info = &tx_ring->buffer_info[i];
3370 context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i);
3372 if (tx_flags & IGB_TX_FLAGS_VLAN)
3373 info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
3374 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
3375 if (skb->ip_summed == CHECKSUM_PARTIAL)
3376 info |= skb_network_header_len(skb);
3378 context_desc->vlan_macip_lens = cpu_to_le32(info);
3380 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
3382 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3383 __be16 protocol;
3385 if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
3386 const struct vlan_ethhdr *vhdr =
3387 (const struct vlan_ethhdr*)skb->data;
3389 protocol = vhdr->h_vlan_encapsulated_proto;
3390 } else {
3391 protocol = skb->protocol;
3394 switch (protocol) {
3395 case cpu_to_be16(ETH_P_IP):
3396 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
3397 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
3398 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
3399 else if (ip_hdr(skb)->protocol == IPPROTO_SCTP)
3400 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
3401 break;
3402 case cpu_to_be16(ETH_P_IPV6):
3403 /* XXX what about other V6 headers?? */
3404 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3405 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
3406 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_SCTP)
3407 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
3408 break;
3409 default:
3410 if (unlikely(net_ratelimit()))
3411 dev_warn(&pdev->dev,
3412 "partial checksum but proto=%x!\n",
3413 skb->protocol);
3414 break;
3418 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
3419 context_desc->seqnum_seed = 0;
3420 if (adapter->flags & IGB_FLAG_NEED_CTX_IDX)
3421 context_desc->mss_l4len_idx =
3422 cpu_to_le32(tx_ring->queue_index << 4);
3423 else
3424 context_desc->mss_l4len_idx = 0;
3426 buffer_info->time_stamp = jiffies;
3427 buffer_info->next_to_watch = i;
3428 buffer_info->dma = 0;
3430 i++;
3431 if (i == tx_ring->count)
3432 i = 0;
3433 tx_ring->next_to_use = i;
3435 return true;
3437 return false;
3440 #define IGB_MAX_TXD_PWR 16
3441 #define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
3443 static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
3444 unsigned int first)
3446 struct igb_buffer *buffer_info;
3447 struct pci_dev *pdev = tx_ring->pdev;
3448 unsigned int len = skb_headlen(skb);
3449 unsigned int count = 0, i;
3450 unsigned int f;
3451 dma_addr_t *map;
3453 i = tx_ring->next_to_use;
3455 if (skb_dma_map(&pdev->dev, skb, DMA_TO_DEVICE)) {
3456 dev_err(&pdev->dev, "TX DMA map failed\n");
3457 return 0;
3460 map = skb_shinfo(skb)->dma_maps;
3462 buffer_info = &tx_ring->buffer_info[i];
3463 BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
3464 buffer_info->length = len;
3465 /* set time_stamp *before* dma to help avoid a possible race */
3466 buffer_info->time_stamp = jiffies;
3467 buffer_info->next_to_watch = i;
3468 buffer_info->dma = skb_shinfo(skb)->dma_head;
3470 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
3471 struct skb_frag_struct *frag;
3473 i++;
3474 if (i == tx_ring->count)
3475 i = 0;
3477 frag = &skb_shinfo(skb)->frags[f];
3478 len = frag->size;
3480 buffer_info = &tx_ring->buffer_info[i];
3481 BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
3482 buffer_info->length = len;
3483 buffer_info->time_stamp = jiffies;
3484 buffer_info->next_to_watch = i;
3485 buffer_info->dma = map[count];
3486 count++;
3489 tx_ring->buffer_info[i].skb = skb;
3490 tx_ring->buffer_info[first].next_to_watch = i;
3492 return count + 1;
3495 static inline void igb_tx_queue_adv(struct igb_adapter *adapter,
3496 struct igb_ring *tx_ring,
3497 int tx_flags, int count, u32 paylen,
3498 u8 hdr_len)
3500 union e1000_adv_tx_desc *tx_desc = NULL;
3501 struct igb_buffer *buffer_info;
3502 u32 olinfo_status = 0, cmd_type_len;
3503 unsigned int i;
3505 cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS |
3506 E1000_ADVTXD_DCMD_DEXT);
3508 if (tx_flags & IGB_TX_FLAGS_VLAN)
3509 cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
3511 if (tx_flags & IGB_TX_FLAGS_TSTAMP)
3512 cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
3514 if (tx_flags & IGB_TX_FLAGS_TSO) {
3515 cmd_type_len |= E1000_ADVTXD_DCMD_TSE;
3517 /* insert tcp checksum */
3518 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
3520 /* insert ip checksum */
3521 if (tx_flags & IGB_TX_FLAGS_IPV4)
3522 olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
3524 } else if (tx_flags & IGB_TX_FLAGS_CSUM) {
3525 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
3528 if ((adapter->flags & IGB_FLAG_NEED_CTX_IDX) &&
3529 (tx_flags & (IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_TSO |
3530 IGB_TX_FLAGS_VLAN)))
3531 olinfo_status |= tx_ring->queue_index << 4;
3533 olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT);
3535 i = tx_ring->next_to_use;
3536 while (count--) {
3537 buffer_info = &tx_ring->buffer_info[i];
3538 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
3539 tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
3540 tx_desc->read.cmd_type_len =
3541 cpu_to_le32(cmd_type_len | buffer_info->length);
3542 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
3543 i++;
3544 if (i == tx_ring->count)
3545 i = 0;
3548 tx_desc->read.cmd_type_len |= cpu_to_le32(adapter->txd_cmd);
3549 /* Force memory writes to complete before letting h/w
3550 * know there are new descriptors to fetch. (Only
3551 * applicable for weak-ordered memory model archs,
3552 * such as IA-64). */
3553 wmb();
3555 tx_ring->next_to_use = i;
3556 writel(i, tx_ring->tail);
3557 /* we need this if more than one processor can write to our tail
3558 * at a time, it syncronizes IO on IA64/Altix systems */
3559 mmiowb();
3562 static int __igb_maybe_stop_tx(struct net_device *netdev,
3563 struct igb_ring *tx_ring, int size)
3565 netif_stop_subqueue(netdev, tx_ring->queue_index);
3567 /* Herbert's original patch had:
3568 * smp_mb__after_netif_stop_queue();
3569 * but since that doesn't exist yet, just open code it. */
3570 smp_mb();
3572 /* We need to check again in a case another CPU has just
3573 * made room available. */
3574 if (igb_desc_unused(tx_ring) < size)
3575 return -EBUSY;
3577 /* A reprieve! */
3578 netif_wake_subqueue(netdev, tx_ring->queue_index);
3579 tx_ring->tx_stats.restart_queue++;
3580 return 0;
3583 static int igb_maybe_stop_tx(struct net_device *netdev,
3584 struct igb_ring *tx_ring, int size)
3586 if (igb_desc_unused(tx_ring) >= size)
3587 return 0;
3588 return __igb_maybe_stop_tx(netdev, tx_ring, size);
3591 static netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
3592 struct net_device *netdev,
3593 struct igb_ring *tx_ring)
3595 struct igb_adapter *adapter = netdev_priv(netdev);
3596 unsigned int first;
3597 unsigned int tx_flags = 0;
3598 u8 hdr_len = 0;
3599 int count = 0;
3600 int tso = 0;
3601 union skb_shared_tx *shtx;
3603 if (test_bit(__IGB_DOWN, &adapter->state)) {
3604 dev_kfree_skb_any(skb);
3605 return NETDEV_TX_OK;
3608 if (skb->len <= 0) {
3609 dev_kfree_skb_any(skb);
3610 return NETDEV_TX_OK;
3613 /* need: 1 descriptor per page,
3614 * + 2 desc gap to keep tail from touching head,
3615 * + 1 desc for skb->data,
3616 * + 1 desc for context descriptor,
3617 * otherwise try next time */
3618 if (igb_maybe_stop_tx(netdev, tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
3619 /* this is a hard error */
3620 return NETDEV_TX_BUSY;
3624 * TODO: check that there currently is no other packet with
3625 * time stamping in the queue
3627 * When doing time stamping, keep the connection to the socket
3628 * a while longer: it is still needed by skb_hwtstamp_tx(),
3629 * called either in igb_tx_hwtstamp() or by our caller when
3630 * doing software time stamping.
3632 shtx = skb_tx(skb);
3633 if (unlikely(shtx->hardware)) {
3634 shtx->in_progress = 1;
3635 tx_flags |= IGB_TX_FLAGS_TSTAMP;
3638 if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
3639 tx_flags |= IGB_TX_FLAGS_VLAN;
3640 tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
3643 if (skb->protocol == htons(ETH_P_IP))
3644 tx_flags |= IGB_TX_FLAGS_IPV4;
3646 first = tx_ring->next_to_use;
3647 tso = skb_is_gso(skb) ? igb_tso_adv(adapter, tx_ring, skb, tx_flags,
3648 &hdr_len) : 0;
3650 if (tso < 0) {
3651 dev_kfree_skb_any(skb);
3652 return NETDEV_TX_OK;
3655 if (tso)
3656 tx_flags |= IGB_TX_FLAGS_TSO;
3657 else if (igb_tx_csum_adv(adapter, tx_ring, skb, tx_flags) &&
3658 (skb->ip_summed == CHECKSUM_PARTIAL))
3659 tx_flags |= IGB_TX_FLAGS_CSUM;
3662 * count reflects descriptors mapped, if 0 then mapping error
3663 * has occured and we need to rewind the descriptor queue
3665 count = igb_tx_map_adv(tx_ring, skb, first);
3667 if (count) {
3668 igb_tx_queue_adv(adapter, tx_ring, tx_flags, count,
3669 skb->len, hdr_len);
3670 /* Make sure there is space in the ring for the next send. */
3671 igb_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 4);
3672 } else {
3673 dev_kfree_skb_any(skb);
3674 tx_ring->buffer_info[first].time_stamp = 0;
3675 tx_ring->next_to_use = first;
3678 return NETDEV_TX_OK;
3681 static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb,
3682 struct net_device *netdev)
3684 struct igb_adapter *adapter = netdev_priv(netdev);
3685 struct igb_ring *tx_ring;
3687 int r_idx = 0;
3688 r_idx = skb->queue_mapping & (IGB_ABS_MAX_TX_QUEUES - 1);
3689 tx_ring = adapter->multi_tx_table[r_idx];
3691 /* This goes back to the question of how to logically map a tx queue
3692 * to a flow. Right now, performance is impacted slightly negatively
3693 * if using multiple tx queues. If the stack breaks away from a
3694 * single qdisc implementation, we can look at this again. */
3695 return igb_xmit_frame_ring_adv(skb, netdev, tx_ring);
3699 * igb_tx_timeout - Respond to a Tx Hang
3700 * @netdev: network interface device structure
3702 static void igb_tx_timeout(struct net_device *netdev)
3704 struct igb_adapter *adapter = netdev_priv(netdev);
3705 struct e1000_hw *hw = &adapter->hw;
3707 /* Do the reset outside of interrupt context */
3708 adapter->tx_timeout_count++;
3709 schedule_work(&adapter->reset_task);
3710 wr32(E1000_EICS,
3711 (adapter->eims_enable_mask & ~adapter->eims_other));
3714 static void igb_reset_task(struct work_struct *work)
3716 struct igb_adapter *adapter;
3717 adapter = container_of(work, struct igb_adapter, reset_task);
3719 igb_reinit_locked(adapter);
3723 * igb_get_stats - Get System Network Statistics
3724 * @netdev: network interface device structure
3726 * Returns the address of the device statistics structure.
3727 * The statistics are actually updated from the timer callback.
3729 static struct net_device_stats *igb_get_stats(struct net_device *netdev)
3731 /* only return the current stats */
3732 return &netdev->stats;
3736 * igb_change_mtu - Change the Maximum Transfer Unit
3737 * @netdev: network interface device structure
3738 * @new_mtu: new value for maximum frame size
3740 * Returns 0 on success, negative on failure
3742 static int igb_change_mtu(struct net_device *netdev, int new_mtu)
3744 struct igb_adapter *adapter = netdev_priv(netdev);
3745 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
3746 u32 rx_buffer_len, i;
3748 if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
3749 (max_frame > MAX_JUMBO_FRAME_SIZE)) {
3750 dev_err(&adapter->pdev->dev, "Invalid MTU setting\n");
3751 return -EINVAL;
3754 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
3755 dev_err(&adapter->pdev->dev, "MTU > 9216 not supported.\n");
3756 return -EINVAL;
3759 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
3760 msleep(1);
3762 /* igb_down has a dependency on max_frame_size */
3763 adapter->max_frame_size = max_frame;
3764 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3765 * means we reserve 2 more, this pushes us to allocate from the next
3766 * larger slab size.
3767 * i.e. RXBUFFER_2048 --> size-4096 slab
3770 if (max_frame <= IGB_RXBUFFER_1024)
3771 rx_buffer_len = IGB_RXBUFFER_1024;
3772 else if (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE)
3773 rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
3774 else
3775 rx_buffer_len = IGB_RXBUFFER_128;
3777 if (netif_running(netdev))
3778 igb_down(adapter);
3780 dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n",
3781 netdev->mtu, new_mtu);
3782 netdev->mtu = new_mtu;
3784 for (i = 0; i < adapter->num_rx_queues; i++)
3785 adapter->rx_ring[i].rx_buffer_len = rx_buffer_len;
3787 if (netif_running(netdev))
3788 igb_up(adapter);
3789 else
3790 igb_reset(adapter);
3792 clear_bit(__IGB_RESETTING, &adapter->state);
3794 return 0;
3798 * igb_update_stats - Update the board statistics counters
3799 * @adapter: board private structure
3802 void igb_update_stats(struct igb_adapter *adapter)
3804 struct net_device *netdev = adapter->netdev;
3805 struct e1000_hw *hw = &adapter->hw;
3806 struct pci_dev *pdev = adapter->pdev;
3807 u16 phy_tmp;
3809 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3812 * Prevent stats update while adapter is being reset, or if the pci
3813 * connection is down.
3815 if (adapter->link_speed == 0)
3816 return;
3817 if (pci_channel_offline(pdev))
3818 return;
3820 adapter->stats.crcerrs += rd32(E1000_CRCERRS);
3821 adapter->stats.gprc += rd32(E1000_GPRC);
3822 adapter->stats.gorc += rd32(E1000_GORCL);
3823 rd32(E1000_GORCH); /* clear GORCL */
3824 adapter->stats.bprc += rd32(E1000_BPRC);
3825 adapter->stats.mprc += rd32(E1000_MPRC);
3826 adapter->stats.roc += rd32(E1000_ROC);
3828 adapter->stats.prc64 += rd32(E1000_PRC64);
3829 adapter->stats.prc127 += rd32(E1000_PRC127);
3830 adapter->stats.prc255 += rd32(E1000_PRC255);
3831 adapter->stats.prc511 += rd32(E1000_PRC511);
3832 adapter->stats.prc1023 += rd32(E1000_PRC1023);
3833 adapter->stats.prc1522 += rd32(E1000_PRC1522);
3834 adapter->stats.symerrs += rd32(E1000_SYMERRS);
3835 adapter->stats.sec += rd32(E1000_SEC);
3837 adapter->stats.mpc += rd32(E1000_MPC);
3838 adapter->stats.scc += rd32(E1000_SCC);
3839 adapter->stats.ecol += rd32(E1000_ECOL);
3840 adapter->stats.mcc += rd32(E1000_MCC);
3841 adapter->stats.latecol += rd32(E1000_LATECOL);
3842 adapter->stats.dc += rd32(E1000_DC);
3843 adapter->stats.rlec += rd32(E1000_RLEC);
3844 adapter->stats.xonrxc += rd32(E1000_XONRXC);
3845 adapter->stats.xontxc += rd32(E1000_XONTXC);
3846 adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
3847 adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
3848 adapter->stats.fcruc += rd32(E1000_FCRUC);
3849 adapter->stats.gptc += rd32(E1000_GPTC);
3850 adapter->stats.gotc += rd32(E1000_GOTCL);
3851 rd32(E1000_GOTCH); /* clear GOTCL */
3852 adapter->stats.rnbc += rd32(E1000_RNBC);
3853 adapter->stats.ruc += rd32(E1000_RUC);
3854 adapter->stats.rfc += rd32(E1000_RFC);
3855 adapter->stats.rjc += rd32(E1000_RJC);
3856 adapter->stats.tor += rd32(E1000_TORH);
3857 adapter->stats.tot += rd32(E1000_TOTH);
3858 adapter->stats.tpr += rd32(E1000_TPR);
3860 adapter->stats.ptc64 += rd32(E1000_PTC64);
3861 adapter->stats.ptc127 += rd32(E1000_PTC127);
3862 adapter->stats.ptc255 += rd32(E1000_PTC255);
3863 adapter->stats.ptc511 += rd32(E1000_PTC511);
3864 adapter->stats.ptc1023 += rd32(E1000_PTC1023);
3865 adapter->stats.ptc1522 += rd32(E1000_PTC1522);
3867 adapter->stats.mptc += rd32(E1000_MPTC);
3868 adapter->stats.bptc += rd32(E1000_BPTC);
3870 /* used for adaptive IFS */
3872 hw->mac.tx_packet_delta = rd32(E1000_TPT);
3873 adapter->stats.tpt += hw->mac.tx_packet_delta;
3874 hw->mac.collision_delta = rd32(E1000_COLC);
3875 adapter->stats.colc += hw->mac.collision_delta;
3877 adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
3878 adapter->stats.rxerrc += rd32(E1000_RXERRC);
3879 adapter->stats.tncrs += rd32(E1000_TNCRS);
3880 adapter->stats.tsctc += rd32(E1000_TSCTC);
3881 adapter->stats.tsctfc += rd32(E1000_TSCTFC);
3883 adapter->stats.iac += rd32(E1000_IAC);
3884 adapter->stats.icrxoc += rd32(E1000_ICRXOC);
3885 adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
3886 adapter->stats.icrxatc += rd32(E1000_ICRXATC);
3887 adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
3888 adapter->stats.ictxatc += rd32(E1000_ICTXATC);
3889 adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
3890 adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
3891 adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
3893 /* Fill out the OS statistics structure */
3894 netdev->stats.multicast = adapter->stats.mprc;
3895 netdev->stats.collisions = adapter->stats.colc;
3897 /* Rx Errors */
3899 if (hw->mac.type != e1000_82575) {
3900 u32 rqdpc_tmp;
3901 u64 rqdpc_total = 0;
3902 int i;
3903 /* Read out drops stats per RX queue. Notice RQDPC (Receive
3904 * Queue Drop Packet Count) stats only gets incremented, if
3905 * the DROP_EN but it set (in the SRRCTL register for that
3906 * queue). If DROP_EN bit is NOT set, then the some what
3907 * equivalent count is stored in RNBC (not per queue basis).
3908 * Also note the drop count is due to lack of available
3909 * descriptors.
3911 for (i = 0; i < adapter->num_rx_queues; i++) {
3912 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0xFFF;
3913 adapter->rx_ring[i].rx_stats.drops += rqdpc_tmp;
3914 rqdpc_total += adapter->rx_ring[i].rx_stats.drops;
3916 netdev->stats.rx_fifo_errors = rqdpc_total;
3919 /* Note RNBC (Receive No Buffers Count) is an not an exact
3920 * drop count as the hardware FIFO might save the day. Thats
3921 * one of the reason for saving it in rx_fifo_errors, as its
3922 * potentially not a true drop.
3924 netdev->stats.rx_fifo_errors += adapter->stats.rnbc;
3926 /* RLEC on some newer hardware can be incorrect so build
3927 * our own version based on RUC and ROC */
3928 netdev->stats.rx_errors = adapter->stats.rxerrc +
3929 adapter->stats.crcerrs + adapter->stats.algnerrc +
3930 adapter->stats.ruc + adapter->stats.roc +
3931 adapter->stats.cexterr;
3932 netdev->stats.rx_length_errors = adapter->stats.ruc +
3933 adapter->stats.roc;
3934 netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
3935 netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
3936 netdev->stats.rx_missed_errors = adapter->stats.mpc;
3938 /* Tx Errors */
3939 netdev->stats.tx_errors = adapter->stats.ecol +
3940 adapter->stats.latecol;
3941 netdev->stats.tx_aborted_errors = adapter->stats.ecol;
3942 netdev->stats.tx_window_errors = adapter->stats.latecol;
3943 netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
3945 /* Tx Dropped needs to be maintained elsewhere */
3947 /* Phy Stats */
3948 if (hw->phy.media_type == e1000_media_type_copper) {
3949 if ((adapter->link_speed == SPEED_1000) &&
3950 (!igb_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
3951 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3952 adapter->phy_stats.idle_errors += phy_tmp;
3956 /* Management Stats */
3957 adapter->stats.mgptc += rd32(E1000_MGTPTC);
3958 adapter->stats.mgprc += rd32(E1000_MGTPRC);
3959 adapter->stats.mgpdc += rd32(E1000_MGTPDC);
3962 static irqreturn_t igb_msix_other(int irq, void *data)
3964 struct igb_adapter *adapter = data;
3965 struct e1000_hw *hw = &adapter->hw;
3966 u32 icr = rd32(E1000_ICR);
3967 /* reading ICR causes bit 31 of EICR to be cleared */
3969 if (icr & E1000_ICR_DOUTSYNC) {
3970 /* HW is reporting DMA is out of sync */
3971 adapter->stats.doosync++;
3974 /* Check for a mailbox event */
3975 if (icr & E1000_ICR_VMMB)
3976 igb_msg_task(adapter);
3978 if (icr & E1000_ICR_LSC) {
3979 hw->mac.get_link_status = 1;
3980 /* guard against interrupt when we're going down */
3981 if (!test_bit(__IGB_DOWN, &adapter->state))
3982 mod_timer(&adapter->watchdog_timer, jiffies + 1);
3985 wr32(E1000_IMS, E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_VMMB);
3986 wr32(E1000_EIMS, adapter->eims_other);
3988 return IRQ_HANDLED;
3991 static void igb_write_itr(struct igb_q_vector *q_vector)
3993 u32 itr_val = q_vector->itr_val & 0x7FFC;
3995 if (!q_vector->set_itr)
3996 return;
3998 if (!itr_val)
3999 itr_val = 0x4;
4001 if (q_vector->itr_shift)
4002 itr_val |= itr_val << q_vector->itr_shift;
4003 else
4004 itr_val |= 0x8000000;
4006 writel(itr_val, q_vector->itr_register);
4007 q_vector->set_itr = 0;
4010 static irqreturn_t igb_msix_ring(int irq, void *data)
4012 struct igb_q_vector *q_vector = data;
4014 /* Write the ITR value calculated from the previous interrupt. */
4015 igb_write_itr(q_vector);
4017 napi_schedule(&q_vector->napi);
4019 return IRQ_HANDLED;
4022 #ifdef CONFIG_IGB_DCA
4023 static void igb_update_dca(struct igb_q_vector *q_vector)
4025 struct igb_adapter *adapter = q_vector->adapter;
4026 struct e1000_hw *hw = &adapter->hw;
4027 int cpu = get_cpu();
4029 if (q_vector->cpu == cpu)
4030 goto out_no_update;
4032 if (q_vector->tx_ring) {
4033 int q = q_vector->tx_ring->reg_idx;
4034 u32 dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
4035 if (hw->mac.type == e1000_82575) {
4036 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
4037 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
4038 } else {
4039 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
4040 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
4041 E1000_DCA_TXCTRL_CPUID_SHIFT;
4043 dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
4044 wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
4046 if (q_vector->rx_ring) {
4047 int q = q_vector->rx_ring->reg_idx;
4048 u32 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
4049 if (hw->mac.type == e1000_82575) {
4050 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
4051 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
4052 } else {
4053 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
4054 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
4055 E1000_DCA_RXCTRL_CPUID_SHIFT;
4057 dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN;
4058 dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN;
4059 dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN;
4060 wr32(E1000_DCA_RXCTRL(q), dca_rxctrl);
4062 q_vector->cpu = cpu;
4063 out_no_update:
4064 put_cpu();
4067 static void igb_setup_dca(struct igb_adapter *adapter)
4069 struct e1000_hw *hw = &adapter->hw;
4070 int i;
4072 if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
4073 return;
4075 /* Always use CB2 mode, difference is masked in the CB driver. */
4076 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
4078 for (i = 0; i < adapter->num_q_vectors; i++) {
4079 struct igb_q_vector *q_vector = adapter->q_vector[i];
4080 q_vector->cpu = -1;
4081 igb_update_dca(q_vector);
4085 static int __igb_notify_dca(struct device *dev, void *data)
4087 struct net_device *netdev = dev_get_drvdata(dev);
4088 struct igb_adapter *adapter = netdev_priv(netdev);
4089 struct e1000_hw *hw = &adapter->hw;
4090 unsigned long event = *(unsigned long *)data;
4092 switch (event) {
4093 case DCA_PROVIDER_ADD:
4094 /* if already enabled, don't do it again */
4095 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
4096 break;
4097 /* Always use CB2 mode, difference is masked
4098 * in the CB driver. */
4099 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
4100 if (dca_add_requester(dev) == 0) {
4101 adapter->flags |= IGB_FLAG_DCA_ENABLED;
4102 dev_info(&adapter->pdev->dev, "DCA enabled\n");
4103 igb_setup_dca(adapter);
4104 break;
4106 /* Fall Through since DCA is disabled. */
4107 case DCA_PROVIDER_REMOVE:
4108 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
4109 /* without this a class_device is left
4110 * hanging around in the sysfs model */
4111 dca_remove_requester(dev);
4112 dev_info(&adapter->pdev->dev, "DCA disabled\n");
4113 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
4114 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
4116 break;
4119 return 0;
4122 static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
4123 void *p)
4125 int ret_val;
4127 ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
4128 __igb_notify_dca);
4130 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
4132 #endif /* CONFIG_IGB_DCA */
4134 static void igb_ping_all_vfs(struct igb_adapter *adapter)
4136 struct e1000_hw *hw = &adapter->hw;
4137 u32 ping;
4138 int i;
4140 for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
4141 ping = E1000_PF_CONTROL_MSG;
4142 if (adapter->vf_data[i].clear_to_send)
4143 ping |= E1000_VT_MSGTYPE_CTS;
4144 igb_write_mbx(hw, &ping, 1, i);
4148 static int igb_set_vf_multicasts(struct igb_adapter *adapter,
4149 u32 *msgbuf, u32 vf)
4151 int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
4152 u16 *hash_list = (u16 *)&msgbuf[1];
4153 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4154 int i;
4156 /* only up to 30 hash values supported */
4157 if (n > 30)
4158 n = 30;
4160 /* salt away the number of multi cast addresses assigned
4161 * to this VF for later use to restore when the PF multi cast
4162 * list changes
4164 vf_data->num_vf_mc_hashes = n;
4166 /* VFs are limited to using the MTA hash table for their multicast
4167 * addresses */
4168 for (i = 0; i < n; i++)
4169 vf_data->vf_mc_hashes[i] = hash_list[i];
4171 /* Flush and reset the mta with the new values */
4172 igb_set_rx_mode(adapter->netdev);
4174 return 0;
4177 static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
4179 struct e1000_hw *hw = &adapter->hw;
4180 struct vf_data_storage *vf_data;
4181 int i, j;
4183 for (i = 0; i < adapter->vfs_allocated_count; i++) {
4184 vf_data = &adapter->vf_data[i];
4185 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
4186 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
4190 static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
4192 struct e1000_hw *hw = &adapter->hw;
4193 u32 pool_mask, reg, vid;
4194 int i;
4196 pool_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
4198 /* Find the vlan filter for this id */
4199 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
4200 reg = rd32(E1000_VLVF(i));
4202 /* remove the vf from the pool */
4203 reg &= ~pool_mask;
4205 /* if pool is empty then remove entry from vfta */
4206 if (!(reg & E1000_VLVF_POOLSEL_MASK) &&
4207 (reg & E1000_VLVF_VLANID_ENABLE)) {
4208 reg = 0;
4209 vid = reg & E1000_VLVF_VLANID_MASK;
4210 igb_vfta_set(hw, vid, false);
4213 wr32(E1000_VLVF(i), reg);
4216 adapter->vf_data[vf].vlans_enabled = 0;
4219 static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
4221 struct e1000_hw *hw = &adapter->hw;
4222 u32 reg, i;
4224 /* It is an error to call this function when VFs are not enabled */
4225 if (!adapter->vfs_allocated_count)
4226 return -1;
4228 /* Find the vlan filter for this id */
4229 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
4230 reg = rd32(E1000_VLVF(i));
4231 if ((reg & E1000_VLVF_VLANID_ENABLE) &&
4232 vid == (reg & E1000_VLVF_VLANID_MASK))
4233 break;
4236 if (add) {
4237 if (i == E1000_VLVF_ARRAY_SIZE) {
4238 /* Did not find a matching VLAN ID entry that was
4239 * enabled. Search for a free filter entry, i.e.
4240 * one without the enable bit set
4242 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
4243 reg = rd32(E1000_VLVF(i));
4244 if (!(reg & E1000_VLVF_VLANID_ENABLE))
4245 break;
4248 if (i < E1000_VLVF_ARRAY_SIZE) {
4249 /* Found an enabled/available entry */
4250 reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
4252 /* if !enabled we need to set this up in vfta */
4253 if (!(reg & E1000_VLVF_VLANID_ENABLE)) {
4254 /* add VID to filter table, if bit already set
4255 * PF must have added it outside of table */
4256 if (igb_vfta_set(hw, vid, true))
4257 reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT +
4258 adapter->vfs_allocated_count);
4259 reg |= E1000_VLVF_VLANID_ENABLE;
4261 reg &= ~E1000_VLVF_VLANID_MASK;
4262 reg |= vid;
4264 wr32(E1000_VLVF(i), reg);
4266 /* do not modify RLPML for PF devices */
4267 if (vf >= adapter->vfs_allocated_count)
4268 return 0;
4270 if (!adapter->vf_data[vf].vlans_enabled) {
4271 u32 size;
4272 reg = rd32(E1000_VMOLR(vf));
4273 size = reg & E1000_VMOLR_RLPML_MASK;
4274 size += 4;
4275 reg &= ~E1000_VMOLR_RLPML_MASK;
4276 reg |= size;
4277 wr32(E1000_VMOLR(vf), reg);
4279 adapter->vf_data[vf].vlans_enabled++;
4281 return 0;
4283 } else {
4284 if (i < E1000_VLVF_ARRAY_SIZE) {
4285 /* remove vf from the pool */
4286 reg &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT + vf));
4287 /* if pool is empty then remove entry from vfta */
4288 if (!(reg & E1000_VLVF_POOLSEL_MASK)) {
4289 reg = 0;
4290 igb_vfta_set(hw, vid, false);
4292 wr32(E1000_VLVF(i), reg);
4294 /* do not modify RLPML for PF devices */
4295 if (vf >= adapter->vfs_allocated_count)
4296 return 0;
4298 adapter->vf_data[vf].vlans_enabled--;
4299 if (!adapter->vf_data[vf].vlans_enabled) {
4300 u32 size;
4301 reg = rd32(E1000_VMOLR(vf));
4302 size = reg & E1000_VMOLR_RLPML_MASK;
4303 size -= 4;
4304 reg &= ~E1000_VMOLR_RLPML_MASK;
4305 reg |= size;
4306 wr32(E1000_VMOLR(vf), reg);
4308 return 0;
4311 return -1;
4314 static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
4316 int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
4317 int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
4319 return igb_vlvf_set(adapter, vid, add, vf);
4322 static inline void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
4324 struct e1000_hw *hw = &adapter->hw;
4326 /* disable mailbox functionality for vf */
4327 adapter->vf_data[vf].clear_to_send = false;
4329 /* reset offloads to defaults */
4330 igb_set_vmolr(hw, vf);
4332 /* reset vlans for device */
4333 igb_clear_vf_vfta(adapter, vf);
4335 /* reset multicast table array for vf */
4336 adapter->vf_data[vf].num_vf_mc_hashes = 0;
4338 /* Flush and reset the mta with the new values */
4339 igb_set_rx_mode(adapter->netdev);
4342 static inline void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
4344 struct e1000_hw *hw = &adapter->hw;
4345 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
4346 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
4347 u32 reg, msgbuf[3];
4348 u8 *addr = (u8 *)(&msgbuf[1]);
4350 /* process all the same items cleared in a function level reset */
4351 igb_vf_reset_event(adapter, vf);
4353 /* set vf mac address */
4354 igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf);
4356 /* enable transmit and receive for vf */
4357 reg = rd32(E1000_VFTE);
4358 wr32(E1000_VFTE, reg | (1 << vf));
4359 reg = rd32(E1000_VFRE);
4360 wr32(E1000_VFRE, reg | (1 << vf));
4362 /* enable mailbox functionality for vf */
4363 adapter->vf_data[vf].clear_to_send = true;
4365 /* reply to reset with ack and vf mac address */
4366 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
4367 memcpy(addr, vf_mac, 6);
4368 igb_write_mbx(hw, msgbuf, 3, vf);
4371 static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
4373 unsigned char *addr = (char *)&msg[1];
4374 int err = -1;
4376 if (is_valid_ether_addr(addr))
4377 err = igb_set_vf_mac(adapter, vf, addr);
4379 return err;
4383 static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
4385 struct e1000_hw *hw = &adapter->hw;
4386 u32 msg = E1000_VT_MSGTYPE_NACK;
4388 /* if device isn't clear to send it shouldn't be reading either */
4389 if (!adapter->vf_data[vf].clear_to_send)
4390 igb_write_mbx(hw, &msg, 1, vf);
4394 static void igb_msg_task(struct igb_adapter *adapter)
4396 struct e1000_hw *hw = &adapter->hw;
4397 u32 vf;
4399 for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
4400 /* process any reset requests */
4401 if (!igb_check_for_rst(hw, vf)) {
4402 adapter->vf_data[vf].clear_to_send = false;
4403 igb_vf_reset_event(adapter, vf);
4406 /* process any messages pending */
4407 if (!igb_check_for_msg(hw, vf))
4408 igb_rcv_msg_from_vf(adapter, vf);
4410 /* process any acks */
4411 if (!igb_check_for_ack(hw, vf))
4412 igb_rcv_ack_from_vf(adapter, vf);
4417 static int igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
4419 u32 mbx_size = E1000_VFMAILBOX_SIZE;
4420 u32 msgbuf[mbx_size];
4421 struct e1000_hw *hw = &adapter->hw;
4422 s32 retval;
4424 retval = igb_read_mbx(hw, msgbuf, mbx_size, vf);
4426 if (retval)
4427 dev_err(&adapter->pdev->dev,
4428 "Error receiving message from VF\n");
4430 /* this is a message we already processed, do nothing */
4431 if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
4432 return retval;
4435 * until the vf completes a reset it should not be
4436 * allowed to start any configuration.
4439 if (msgbuf[0] == E1000_VF_RESET) {
4440 igb_vf_reset_msg(adapter, vf);
4442 return retval;
4445 if (!adapter->vf_data[vf].clear_to_send) {
4446 msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
4447 igb_write_mbx(hw, msgbuf, 1, vf);
4448 return retval;
4451 switch ((msgbuf[0] & 0xFFFF)) {
4452 case E1000_VF_SET_MAC_ADDR:
4453 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
4454 break;
4455 case E1000_VF_SET_MULTICAST:
4456 retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
4457 break;
4458 case E1000_VF_SET_LPE:
4459 retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
4460 break;
4461 case E1000_VF_SET_VLAN:
4462 retval = igb_set_vf_vlan(adapter, msgbuf, vf);
4463 break;
4464 default:
4465 dev_err(&adapter->pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
4466 retval = -1;
4467 break;
4470 /* notify the VF of the results of what it sent us */
4471 if (retval)
4472 msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
4473 else
4474 msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
4476 msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
4478 igb_write_mbx(hw, msgbuf, 1, vf);
4480 return retval;
4484 * igb_set_uta - Set unicast filter table address
4485 * @adapter: board private structure
4487 * The unicast table address is a register array of 32-bit registers.
4488 * The table is meant to be used in a way similar to how the MTA is used
4489 * however due to certain limitations in the hardware it is necessary to
4490 * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscous
4491 * enable bit to allow vlan tag stripping when promiscous mode is enabled
4493 static void igb_set_uta(struct igb_adapter *adapter)
4495 struct e1000_hw *hw = &adapter->hw;
4496 int i;
4498 /* The UTA table only exists on 82576 hardware and newer */
4499 if (hw->mac.type < e1000_82576)
4500 return;
4502 /* we only need to do this if VMDq is enabled */
4503 if (!adapter->vfs_allocated_count)
4504 return;
4506 for (i = 0; i < hw->mac.uta_reg_count; i++)
4507 array_wr32(E1000_UTA, i, ~0);
4511 * igb_intr_msi - Interrupt Handler
4512 * @irq: interrupt number
4513 * @data: pointer to a network interface device structure
4515 static irqreturn_t igb_intr_msi(int irq, void *data)
4517 struct igb_adapter *adapter = data;
4518 struct igb_q_vector *q_vector = adapter->q_vector[0];
4519 struct e1000_hw *hw = &adapter->hw;
4520 /* read ICR disables interrupts using IAM */
4521 u32 icr = rd32(E1000_ICR);
4523 igb_write_itr(q_vector);
4525 if (icr & E1000_ICR_DOUTSYNC) {
4526 /* HW is reporting DMA is out of sync */
4527 adapter->stats.doosync++;
4530 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
4531 hw->mac.get_link_status = 1;
4532 if (!test_bit(__IGB_DOWN, &adapter->state))
4533 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4536 napi_schedule(&q_vector->napi);
4538 return IRQ_HANDLED;
4542 * igb_intr - Legacy Interrupt Handler
4543 * @irq: interrupt number
4544 * @data: pointer to a network interface device structure
4546 static irqreturn_t igb_intr(int irq, void *data)
4548 struct igb_adapter *adapter = data;
4549 struct igb_q_vector *q_vector = adapter->q_vector[0];
4550 struct e1000_hw *hw = &adapter->hw;
4551 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
4552 * need for the IMC write */
4553 u32 icr = rd32(E1000_ICR);
4554 if (!icr)
4555 return IRQ_NONE; /* Not our interrupt */
4557 igb_write_itr(q_vector);
4559 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
4560 * not set, then the adapter didn't send an interrupt */
4561 if (!(icr & E1000_ICR_INT_ASSERTED))
4562 return IRQ_NONE;
4564 if (icr & E1000_ICR_DOUTSYNC) {
4565 /* HW is reporting DMA is out of sync */
4566 adapter->stats.doosync++;
4569 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
4570 hw->mac.get_link_status = 1;
4571 /* guard against interrupt when we're going down */
4572 if (!test_bit(__IGB_DOWN, &adapter->state))
4573 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4576 napi_schedule(&q_vector->napi);
4578 return IRQ_HANDLED;
4581 static inline void igb_ring_irq_enable(struct igb_q_vector *q_vector)
4583 struct igb_adapter *adapter = q_vector->adapter;
4584 struct e1000_hw *hw = &adapter->hw;
4586 if (adapter->itr_setting & 3) {
4587 if (!adapter->msix_entries)
4588 igb_set_itr(adapter);
4589 else
4590 igb_update_ring_itr(q_vector);
4593 if (!test_bit(__IGB_DOWN, &adapter->state)) {
4594 if (adapter->msix_entries)
4595 wr32(E1000_EIMS, q_vector->eims_value);
4596 else
4597 igb_irq_enable(adapter);
4602 * igb_poll - NAPI Rx polling callback
4603 * @napi: napi polling structure
4604 * @budget: count of how many packets we should handle
4606 static int igb_poll(struct napi_struct *napi, int budget)
4608 struct igb_q_vector *q_vector = container_of(napi,
4609 struct igb_q_vector,
4610 napi);
4611 int tx_clean_complete = 1, work_done = 0;
4613 #ifdef CONFIG_IGB_DCA
4614 if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
4615 igb_update_dca(q_vector);
4616 #endif
4617 if (q_vector->tx_ring)
4618 tx_clean_complete = igb_clean_tx_irq(q_vector);
4620 if (q_vector->rx_ring)
4621 igb_clean_rx_irq_adv(q_vector, &work_done, budget);
4623 if (!tx_clean_complete)
4624 work_done = budget;
4626 /* If not enough Rx work done, exit the polling mode */
4627 if (work_done < budget) {
4628 napi_complete(napi);
4629 igb_ring_irq_enable(q_vector);
4632 return work_done;
4636 * igb_hwtstamp - utility function which checks for TX time stamp
4637 * @adapter: board private structure
4638 * @skb: packet that was just sent
4640 * If we were asked to do hardware stamping and such a time stamp is
4641 * available, then it must have been for this skb here because we only
4642 * allow only one such packet into the queue.
4644 static void igb_tx_hwtstamp(struct igb_adapter *adapter, struct sk_buff *skb)
4646 union skb_shared_tx *shtx = skb_tx(skb);
4647 struct e1000_hw *hw = &adapter->hw;
4649 if (unlikely(shtx->hardware)) {
4650 u32 valid = rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID;
4651 if (valid) {
4652 u64 regval = rd32(E1000_TXSTMPL);
4653 u64 ns;
4654 struct skb_shared_hwtstamps shhwtstamps;
4656 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
4657 regval |= (u64)rd32(E1000_TXSTMPH) << 32;
4658 ns = timecounter_cyc2time(&adapter->clock,
4659 regval);
4660 timecompare_update(&adapter->compare, ns);
4661 shhwtstamps.hwtstamp = ns_to_ktime(ns);
4662 shhwtstamps.syststamp =
4663 timecompare_transform(&adapter->compare, ns);
4664 skb_tstamp_tx(skb, &shhwtstamps);
4670 * igb_clean_tx_irq - Reclaim resources after transmit completes
4671 * @q_vector: pointer to q_vector containing needed info
4672 * returns true if ring is completely cleaned
4674 static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
4676 struct igb_adapter *adapter = q_vector->adapter;
4677 struct igb_ring *tx_ring = q_vector->tx_ring;
4678 struct net_device *netdev = adapter->netdev;
4679 struct e1000_hw *hw = &adapter->hw;
4680 struct igb_buffer *buffer_info;
4681 struct sk_buff *skb;
4682 union e1000_adv_tx_desc *tx_desc, *eop_desc;
4683 unsigned int total_bytes = 0, total_packets = 0;
4684 unsigned int i, eop, count = 0;
4685 bool cleaned = false;
4687 i = tx_ring->next_to_clean;
4688 eop = tx_ring->buffer_info[i].next_to_watch;
4689 eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop);
4691 while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) &&
4692 (count < tx_ring->count)) {
4693 for (cleaned = false; !cleaned; count++) {
4694 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
4695 buffer_info = &tx_ring->buffer_info[i];
4696 cleaned = (i == eop);
4697 skb = buffer_info->skb;
4699 if (skb) {
4700 unsigned int segs, bytecount;
4701 /* gso_segs is currently only valid for tcp */
4702 segs = skb_shinfo(skb)->gso_segs ?: 1;
4703 /* multiply data chunks by size of headers */
4704 bytecount = ((segs - 1) * skb_headlen(skb)) +
4705 skb->len;
4706 total_packets += segs;
4707 total_bytes += bytecount;
4709 igb_tx_hwtstamp(adapter, skb);
4712 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
4713 tx_desc->wb.status = 0;
4715 i++;
4716 if (i == tx_ring->count)
4717 i = 0;
4719 eop = tx_ring->buffer_info[i].next_to_watch;
4720 eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop);
4723 tx_ring->next_to_clean = i;
4725 if (unlikely(count &&
4726 netif_carrier_ok(netdev) &&
4727 igb_desc_unused(tx_ring) >= IGB_TX_QUEUE_WAKE)) {
4728 /* Make sure that anybody stopping the queue after this
4729 * sees the new next_to_clean.
4731 smp_mb();
4732 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
4733 !(test_bit(__IGB_DOWN, &adapter->state))) {
4734 netif_wake_subqueue(netdev, tx_ring->queue_index);
4735 tx_ring->tx_stats.restart_queue++;
4739 if (tx_ring->detect_tx_hung) {
4740 /* Detect a transmit hang in hardware, this serializes the
4741 * check with the clearing of time_stamp and movement of i */
4742 tx_ring->detect_tx_hung = false;
4743 if (tx_ring->buffer_info[i].time_stamp &&
4744 time_after(jiffies, tx_ring->buffer_info[i].time_stamp +
4745 (adapter->tx_timeout_factor * HZ))
4746 && !(rd32(E1000_STATUS) &
4747 E1000_STATUS_TXOFF)) {
4749 /* detected Tx unit hang */
4750 dev_err(&tx_ring->pdev->dev,
4751 "Detected Tx Unit Hang\n"
4752 " Tx Queue <%d>\n"
4753 " TDH <%x>\n"
4754 " TDT <%x>\n"
4755 " next_to_use <%x>\n"
4756 " next_to_clean <%x>\n"
4757 "buffer_info[next_to_clean]\n"
4758 " time_stamp <%lx>\n"
4759 " next_to_watch <%x>\n"
4760 " jiffies <%lx>\n"
4761 " desc.status <%x>\n",
4762 tx_ring->queue_index,
4763 readl(tx_ring->head),
4764 readl(tx_ring->tail),
4765 tx_ring->next_to_use,
4766 tx_ring->next_to_clean,
4767 tx_ring->buffer_info[i].time_stamp,
4768 eop,
4769 jiffies,
4770 eop_desc->wb.status);
4771 netif_stop_subqueue(netdev, tx_ring->queue_index);
4774 tx_ring->total_bytes += total_bytes;
4775 tx_ring->total_packets += total_packets;
4776 tx_ring->tx_stats.bytes += total_bytes;
4777 tx_ring->tx_stats.packets += total_packets;
4778 netdev->stats.tx_bytes += total_bytes;
4779 netdev->stats.tx_packets += total_packets;
4780 return (count < tx_ring->count);
4784 * igb_receive_skb - helper function to handle rx indications
4785 * @q_vector: structure containing interrupt and ring information
4786 * @skb: packet to send up
4787 * @vlan_tag: vlan tag for packet
4789 static void igb_receive_skb(struct igb_q_vector *q_vector,
4790 struct sk_buff *skb,
4791 u16 vlan_tag)
4793 struct igb_adapter *adapter = q_vector->adapter;
4795 if (vlan_tag)
4796 vlan_gro_receive(&q_vector->napi, adapter->vlgrp,
4797 vlan_tag, skb);
4798 else
4799 napi_gro_receive(&q_vector->napi, skb);
4802 static inline void igb_rx_checksum_adv(struct igb_ring *ring,
4803 struct igb_adapter *adapter,
4804 u32 status_err, struct sk_buff *skb)
4806 skb->ip_summed = CHECKSUM_NONE;
4808 /* Ignore Checksum bit is set or checksum is disabled through ethtool */
4809 if ((status_err & E1000_RXD_STAT_IXSM) ||
4810 (adapter->flags & IGB_FLAG_RX_CSUM_DISABLED))
4811 return;
4812 /* TCP/UDP checksum error bit is set */
4813 if (status_err &
4814 (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) {
4816 * work around errata with sctp packets where the TCPE aka
4817 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
4818 * packets, (aka let the stack check the crc32c)
4820 if (!((adapter->hw.mac.type == e1000_82576) &&
4821 (skb->len == 60)))
4822 ring->rx_stats.csum_err++;
4823 /* let the stack verify checksum errors */
4824 return;
4826 /* It must be a TCP or UDP packet with a valid checksum */
4827 if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
4828 skb->ip_summed = CHECKSUM_UNNECESSARY;
4830 dev_dbg(&adapter->pdev->dev, "cksum success: bits %08X\n", status_err);
4833 static inline u16 igb_get_hlen(struct igb_ring *rx_ring,
4834 union e1000_adv_rx_desc *rx_desc)
4836 /* HW will not DMA in data larger than the given buffer, even if it
4837 * parses the (NFS, of course) header to be larger. In that case, it
4838 * fills the header buffer and spills the rest into the page.
4840 u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
4841 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
4842 if (hlen > rx_ring->rx_buffer_len)
4843 hlen = rx_ring->rx_buffer_len;
4844 return hlen;
4847 static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
4848 int *work_done, int budget)
4850 struct igb_adapter *adapter = q_vector->adapter;
4851 struct net_device *netdev = adapter->netdev;
4852 struct igb_ring *rx_ring = q_vector->rx_ring;
4853 struct e1000_hw *hw = &adapter->hw;
4854 struct pci_dev *pdev = rx_ring->pdev;
4855 union e1000_adv_rx_desc *rx_desc , *next_rxd;
4856 struct igb_buffer *buffer_info , *next_buffer;
4857 struct sk_buff *skb;
4858 bool cleaned = false;
4859 int cleaned_count = 0;
4860 unsigned int total_bytes = 0, total_packets = 0;
4861 unsigned int i;
4862 u32 staterr;
4863 u16 length;
4864 u16 vlan_tag;
4866 i = rx_ring->next_to_clean;
4867 buffer_info = &rx_ring->buffer_info[i];
4868 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
4869 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
4871 while (staterr & E1000_RXD_STAT_DD) {
4872 if (*work_done >= budget)
4873 break;
4874 (*work_done)++;
4876 skb = buffer_info->skb;
4877 prefetch(skb->data - NET_IP_ALIGN);
4878 buffer_info->skb = NULL;
4880 i++;
4881 if (i == rx_ring->count)
4882 i = 0;
4883 next_rxd = E1000_RX_DESC_ADV(*rx_ring, i);
4884 prefetch(next_rxd);
4885 next_buffer = &rx_ring->buffer_info[i];
4887 length = le16_to_cpu(rx_desc->wb.upper.length);
4888 cleaned = true;
4889 cleaned_count++;
4891 if (buffer_info->dma) {
4892 pci_unmap_single(pdev, buffer_info->dma,
4893 rx_ring->rx_buffer_len,
4894 PCI_DMA_FROMDEVICE);
4895 buffer_info->dma = 0;
4896 if (rx_ring->rx_buffer_len >= IGB_RXBUFFER_1024) {
4897 skb_put(skb, length);
4898 goto send_up;
4900 skb_put(skb, igb_get_hlen(rx_ring, rx_desc));
4903 if (length) {
4904 pci_unmap_page(pdev, buffer_info->page_dma,
4905 PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
4906 buffer_info->page_dma = 0;
4908 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++,
4909 buffer_info->page,
4910 buffer_info->page_offset,
4911 length);
4913 if (page_count(buffer_info->page) != 1)
4914 buffer_info->page = NULL;
4915 else
4916 get_page(buffer_info->page);
4918 skb->len += length;
4919 skb->data_len += length;
4921 skb->truesize += length;
4924 if (!(staterr & E1000_RXD_STAT_EOP)) {
4925 buffer_info->skb = next_buffer->skb;
4926 buffer_info->dma = next_buffer->dma;
4927 next_buffer->skb = skb;
4928 next_buffer->dma = 0;
4929 goto next_desc;
4931 send_up:
4933 * If this bit is set, then the RX registers contain
4934 * the time stamp. No other packet will be time
4935 * stamped until we read these registers, so read the
4936 * registers to make them available again. Because
4937 * only one packet can be time stamped at a time, we
4938 * know that the register values must belong to this
4939 * one here and therefore we don't need to compare
4940 * any of the additional attributes stored for it.
4942 * If nothing went wrong, then it should have a
4943 * skb_shared_tx that we can turn into a
4944 * skb_shared_hwtstamps.
4946 * TODO: can time stamping be triggered (thus locking
4947 * the registers) without the packet reaching this point
4948 * here? In that case RX time stamping would get stuck.
4950 * TODO: in "time stamp all packets" mode this bit is
4951 * not set. Need a global flag for this mode and then
4952 * always read the registers. Cannot be done without
4953 * a race condition.
4955 if (unlikely(staterr & E1000_RXD_STAT_TS)) {
4956 u64 regval;
4957 u64 ns;
4958 struct skb_shared_hwtstamps *shhwtstamps =
4959 skb_hwtstamps(skb);
4961 WARN(!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID),
4962 "igb: no RX time stamp available for time stamped packet");
4963 regval = rd32(E1000_RXSTMPL);
4964 regval |= (u64)rd32(E1000_RXSTMPH) << 32;
4965 ns = timecounter_cyc2time(&adapter->clock, regval);
4966 timecompare_update(&adapter->compare, ns);
4967 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
4968 shhwtstamps->hwtstamp = ns_to_ktime(ns);
4969 shhwtstamps->syststamp =
4970 timecompare_transform(&adapter->compare, ns);
4973 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
4974 dev_kfree_skb_irq(skb);
4975 goto next_desc;
4978 total_bytes += skb->len;
4979 total_packets++;
4981 igb_rx_checksum_adv(rx_ring, adapter, staterr, skb);
4983 skb->protocol = eth_type_trans(skb, netdev);
4984 skb_record_rx_queue(skb, rx_ring->queue_index);
4986 vlan_tag = ((staterr & E1000_RXD_STAT_VP) ?
4987 le16_to_cpu(rx_desc->wb.upper.vlan) : 0);
4989 igb_receive_skb(q_vector, skb, vlan_tag);
4991 next_desc:
4992 rx_desc->wb.upper.status_error = 0;
4994 /* return some buffers to hardware, one at a time is too slow */
4995 if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
4996 igb_alloc_rx_buffers_adv(rx_ring, cleaned_count);
4997 cleaned_count = 0;
5000 /* use prefetched values */
5001 rx_desc = next_rxd;
5002 buffer_info = next_buffer;
5003 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
5006 rx_ring->next_to_clean = i;
5007 cleaned_count = igb_desc_unused(rx_ring);
5009 if (cleaned_count)
5010 igb_alloc_rx_buffers_adv(rx_ring, cleaned_count);
5012 rx_ring->total_packets += total_packets;
5013 rx_ring->total_bytes += total_bytes;
5014 rx_ring->rx_stats.packets += total_packets;
5015 rx_ring->rx_stats.bytes += total_bytes;
5016 netdev->stats.rx_bytes += total_bytes;
5017 netdev->stats.rx_packets += total_packets;
5018 return cleaned;
5022 * igb_alloc_rx_buffers_adv - Replace used receive buffers; packet split
5023 * @adapter: address of board private structure
5025 static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
5026 int cleaned_count)
5028 struct igb_adapter *adapter = rx_ring->q_vector->adapter;
5029 struct net_device *netdev = adapter->netdev;
5030 union e1000_adv_rx_desc *rx_desc;
5031 struct igb_buffer *buffer_info;
5032 struct sk_buff *skb;
5033 unsigned int i;
5034 int bufsz;
5036 i = rx_ring->next_to_use;
5037 buffer_info = &rx_ring->buffer_info[i];
5039 bufsz = rx_ring->rx_buffer_len;
5041 while (cleaned_count--) {
5042 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
5044 if ((bufsz < IGB_RXBUFFER_1024) && !buffer_info->page_dma) {
5045 if (!buffer_info->page) {
5046 buffer_info->page = alloc_page(GFP_ATOMIC);
5047 if (!buffer_info->page) {
5048 rx_ring->rx_stats.alloc_failed++;
5049 goto no_buffers;
5051 buffer_info->page_offset = 0;
5052 } else {
5053 buffer_info->page_offset ^= PAGE_SIZE / 2;
5055 buffer_info->page_dma =
5056 pci_map_page(rx_ring->pdev, buffer_info->page,
5057 buffer_info->page_offset,
5058 PAGE_SIZE / 2,
5059 PCI_DMA_FROMDEVICE);
5062 if (!buffer_info->skb) {
5063 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
5064 if (!skb) {
5065 rx_ring->rx_stats.alloc_failed++;
5066 goto no_buffers;
5069 buffer_info->skb = skb;
5070 buffer_info->dma = pci_map_single(rx_ring->pdev,
5071 skb->data,
5072 bufsz,
5073 PCI_DMA_FROMDEVICE);
5075 /* Refresh the desc even if buffer_addrs didn't change because
5076 * each write-back erases this info. */
5077 if (bufsz < IGB_RXBUFFER_1024) {
5078 rx_desc->read.pkt_addr =
5079 cpu_to_le64(buffer_info->page_dma);
5080 rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma);
5081 } else {
5082 rx_desc->read.pkt_addr =
5083 cpu_to_le64(buffer_info->dma);
5084 rx_desc->read.hdr_addr = 0;
5087 i++;
5088 if (i == rx_ring->count)
5089 i = 0;
5090 buffer_info = &rx_ring->buffer_info[i];
5093 no_buffers:
5094 if (rx_ring->next_to_use != i) {
5095 rx_ring->next_to_use = i;
5096 if (i == 0)
5097 i = (rx_ring->count - 1);
5098 else
5099 i--;
5101 /* Force memory writes to complete before letting h/w
5102 * know there are new descriptors to fetch. (Only
5103 * applicable for weak-ordered memory model archs,
5104 * such as IA-64). */
5105 wmb();
5106 writel(i, rx_ring->tail);
5111 * igb_mii_ioctl -
5112 * @netdev:
5113 * @ifreq:
5114 * @cmd:
5116 static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
5118 struct igb_adapter *adapter = netdev_priv(netdev);
5119 struct mii_ioctl_data *data = if_mii(ifr);
5121 if (adapter->hw.phy.media_type != e1000_media_type_copper)
5122 return -EOPNOTSUPP;
5124 switch (cmd) {
5125 case SIOCGMIIPHY:
5126 data->phy_id = adapter->hw.phy.addr;
5127 break;
5128 case SIOCGMIIREG:
5129 if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
5130 &data->val_out))
5131 return -EIO;
5132 break;
5133 case SIOCSMIIREG:
5134 default:
5135 return -EOPNOTSUPP;
5137 return 0;
5141 * igb_hwtstamp_ioctl - control hardware time stamping
5142 * @netdev:
5143 * @ifreq:
5144 * @cmd:
5146 * Outgoing time stamping can be enabled and disabled. Play nice and
5147 * disable it when requested, although it shouldn't case any overhead
5148 * when no packet needs it. At most one packet in the queue may be
5149 * marked for time stamping, otherwise it would be impossible to tell
5150 * for sure to which packet the hardware time stamp belongs.
5152 * Incoming time stamping has to be configured via the hardware
5153 * filters. Not all combinations are supported, in particular event
5154 * type has to be specified. Matching the kind of event packet is
5155 * not supported, with the exception of "all V2 events regardless of
5156 * level 2 or 4".
5159 static int igb_hwtstamp_ioctl(struct net_device *netdev,
5160 struct ifreq *ifr, int cmd)
5162 struct igb_adapter *adapter = netdev_priv(netdev);
5163 struct e1000_hw *hw = &adapter->hw;
5164 struct hwtstamp_config config;
5165 u32 tsync_tx_ctl_bit = E1000_TSYNCTXCTL_ENABLED;
5166 u32 tsync_rx_ctl_bit = E1000_TSYNCRXCTL_ENABLED;
5167 u32 tsync_rx_ctl_type = 0;
5168 u32 tsync_rx_cfg = 0;
5169 int is_l4 = 0;
5170 int is_l2 = 0;
5171 short port = 319; /* PTP */
5172 u32 regval;
5174 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
5175 return -EFAULT;
5177 /* reserved for future extensions */
5178 if (config.flags)
5179 return -EINVAL;
5181 switch (config.tx_type) {
5182 case HWTSTAMP_TX_OFF:
5183 tsync_tx_ctl_bit = 0;
5184 break;
5185 case HWTSTAMP_TX_ON:
5186 tsync_tx_ctl_bit = E1000_TSYNCTXCTL_ENABLED;
5187 break;
5188 default:
5189 return -ERANGE;
5192 switch (config.rx_filter) {
5193 case HWTSTAMP_FILTER_NONE:
5194 tsync_rx_ctl_bit = 0;
5195 break;
5196 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
5197 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
5198 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
5199 case HWTSTAMP_FILTER_ALL:
5201 * register TSYNCRXCFG must be set, therefore it is not
5202 * possible to time stamp both Sync and Delay_Req messages
5203 * => fall back to time stamping all packets
5205 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_ALL;
5206 config.rx_filter = HWTSTAMP_FILTER_ALL;
5207 break;
5208 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
5209 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L4_V1;
5210 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
5211 is_l4 = 1;
5212 break;
5213 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
5214 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L4_V1;
5215 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
5216 is_l4 = 1;
5217 break;
5218 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
5219 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
5220 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
5221 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
5222 is_l2 = 1;
5223 is_l4 = 1;
5224 config.rx_filter = HWTSTAMP_FILTER_SOME;
5225 break;
5226 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
5227 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
5228 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
5229 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
5230 is_l2 = 1;
5231 is_l4 = 1;
5232 config.rx_filter = HWTSTAMP_FILTER_SOME;
5233 break;
5234 case HWTSTAMP_FILTER_PTP_V2_EVENT:
5235 case HWTSTAMP_FILTER_PTP_V2_SYNC:
5236 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
5237 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_EVENT_V2;
5238 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
5239 is_l2 = 1;
5240 break;
5241 default:
5242 return -ERANGE;
5245 /* enable/disable TX */
5246 regval = rd32(E1000_TSYNCTXCTL);
5247 regval = (regval & ~E1000_TSYNCTXCTL_ENABLED) | tsync_tx_ctl_bit;
5248 wr32(E1000_TSYNCTXCTL, regval);
5250 /* enable/disable RX, define which PTP packets are time stamped */
5251 regval = rd32(E1000_TSYNCRXCTL);
5252 regval = (regval & ~E1000_TSYNCRXCTL_ENABLED) | tsync_rx_ctl_bit;
5253 regval = (regval & ~0xE) | tsync_rx_ctl_type;
5254 wr32(E1000_TSYNCRXCTL, regval);
5255 wr32(E1000_TSYNCRXCFG, tsync_rx_cfg);
5258 * Ethertype Filter Queue Filter[0][15:0] = 0x88F7
5259 * (Ethertype to filter on)
5260 * Ethertype Filter Queue Filter[0][26] = 0x1 (Enable filter)
5261 * Ethertype Filter Queue Filter[0][30] = 0x1 (Enable Timestamping)
5263 wr32(E1000_ETQF0, is_l2 ? 0x440088f7 : 0);
5265 /* L4 Queue Filter[0]: only filter by source and destination port */
5266 wr32(E1000_SPQF0, htons(port));
5267 wr32(E1000_IMIREXT(0), is_l4 ?
5268 ((1<<12) | (1<<19) /* bypass size and control flags */) : 0);
5269 wr32(E1000_IMIR(0), is_l4 ?
5270 (htons(port)
5271 | (0<<16) /* immediate interrupt disabled */
5272 | 0 /* (1<<17) bit cleared: do not bypass
5273 destination port check */)
5274 : 0);
5275 wr32(E1000_FTQF0, is_l4 ?
5276 (0x11 /* UDP */
5277 | (1<<15) /* VF not compared */
5278 | (1<<27) /* Enable Timestamping */
5279 | (7<<28) /* only source port filter enabled,
5280 source/target address and protocol
5281 masked */)
5282 : ((1<<15) | (15<<28) /* all mask bits set = filter not
5283 enabled */));
5285 wrfl();
5287 adapter->hwtstamp_config = config;
5289 /* clear TX/RX time stamp registers, just to be sure */
5290 regval = rd32(E1000_TXSTMPH);
5291 regval = rd32(E1000_RXSTMPH);
5293 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
5294 -EFAULT : 0;
5298 * igb_ioctl -
5299 * @netdev:
5300 * @ifreq:
5301 * @cmd:
5303 static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
5305 switch (cmd) {
5306 case SIOCGMIIPHY:
5307 case SIOCGMIIREG:
5308 case SIOCSMIIREG:
5309 return igb_mii_ioctl(netdev, ifr, cmd);
5310 case SIOCSHWTSTAMP:
5311 return igb_hwtstamp_ioctl(netdev, ifr, cmd);
5312 default:
5313 return -EOPNOTSUPP;
5317 s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
5319 struct igb_adapter *adapter = hw->back;
5320 u16 cap_offset;
5322 cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
5323 if (!cap_offset)
5324 return -E1000_ERR_CONFIG;
5326 pci_read_config_word(adapter->pdev, cap_offset + reg, value);
5328 return 0;
5331 s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
5333 struct igb_adapter *adapter = hw->back;
5334 u16 cap_offset;
5336 cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
5337 if (!cap_offset)
5338 return -E1000_ERR_CONFIG;
5340 pci_write_config_word(adapter->pdev, cap_offset + reg, *value);
5342 return 0;
5345 static void igb_vlan_rx_register(struct net_device *netdev,
5346 struct vlan_group *grp)
5348 struct igb_adapter *adapter = netdev_priv(netdev);
5349 struct e1000_hw *hw = &adapter->hw;
5350 u32 ctrl, rctl;
5352 igb_irq_disable(adapter);
5353 adapter->vlgrp = grp;
5355 if (grp) {
5356 /* enable VLAN tag insert/strip */
5357 ctrl = rd32(E1000_CTRL);
5358 ctrl |= E1000_CTRL_VME;
5359 wr32(E1000_CTRL, ctrl);
5361 /* enable VLAN receive filtering */
5362 rctl = rd32(E1000_RCTL);
5363 rctl &= ~E1000_RCTL_CFIEN;
5364 wr32(E1000_RCTL, rctl);
5365 igb_update_mng_vlan(adapter);
5366 } else {
5367 /* disable VLAN tag insert/strip */
5368 ctrl = rd32(E1000_CTRL);
5369 ctrl &= ~E1000_CTRL_VME;
5370 wr32(E1000_CTRL, ctrl);
5372 if (adapter->mng_vlan_id != (u16)IGB_MNG_VLAN_NONE) {
5373 igb_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
5374 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
5378 igb_rlpml_set(adapter);
5380 if (!test_bit(__IGB_DOWN, &adapter->state))
5381 igb_irq_enable(adapter);
5384 static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
5386 struct igb_adapter *adapter = netdev_priv(netdev);
5387 struct e1000_hw *hw = &adapter->hw;
5388 int pf_id = adapter->vfs_allocated_count;
5390 if ((hw->mng_cookie.status &
5391 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
5392 (vid == adapter->mng_vlan_id))
5393 return;
5395 /* add vid to vlvf if sr-iov is enabled,
5396 * if that fails add directly to filter table */
5397 if (igb_vlvf_set(adapter, vid, true, pf_id))
5398 igb_vfta_set(hw, vid, true);
5402 static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
5404 struct igb_adapter *adapter = netdev_priv(netdev);
5405 struct e1000_hw *hw = &adapter->hw;
5406 int pf_id = adapter->vfs_allocated_count;
5408 igb_irq_disable(adapter);
5409 vlan_group_set_device(adapter->vlgrp, vid, NULL);
5411 if (!test_bit(__IGB_DOWN, &adapter->state))
5412 igb_irq_enable(adapter);
5414 if ((adapter->hw.mng_cookie.status &
5415 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
5416 (vid == adapter->mng_vlan_id)) {
5417 /* release control to f/w */
5418 igb_release_hw_control(adapter);
5419 return;
5422 /* remove vid from vlvf if sr-iov is enabled,
5423 * if not in vlvf remove from vfta */
5424 if (igb_vlvf_set(adapter, vid, false, pf_id))
5425 igb_vfta_set(hw, vid, false);
5428 static void igb_restore_vlan(struct igb_adapter *adapter)
5430 igb_vlan_rx_register(adapter->netdev, adapter->vlgrp);
5432 if (adapter->vlgrp) {
5433 u16 vid;
5434 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
5435 if (!vlan_group_get_device(adapter->vlgrp, vid))
5436 continue;
5437 igb_vlan_rx_add_vid(adapter->netdev, vid);
5442 int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx)
5444 struct e1000_mac_info *mac = &adapter->hw.mac;
5446 mac->autoneg = 0;
5448 switch (spddplx) {
5449 case SPEED_10 + DUPLEX_HALF:
5450 mac->forced_speed_duplex = ADVERTISE_10_HALF;
5451 break;
5452 case SPEED_10 + DUPLEX_FULL:
5453 mac->forced_speed_duplex = ADVERTISE_10_FULL;
5454 break;
5455 case SPEED_100 + DUPLEX_HALF:
5456 mac->forced_speed_duplex = ADVERTISE_100_HALF;
5457 break;
5458 case SPEED_100 + DUPLEX_FULL:
5459 mac->forced_speed_duplex = ADVERTISE_100_FULL;
5460 break;
5461 case SPEED_1000 + DUPLEX_FULL:
5462 mac->autoneg = 1;
5463 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
5464 break;
5465 case SPEED_1000 + DUPLEX_HALF: /* not supported */
5466 default:
5467 dev_err(&adapter->pdev->dev,
5468 "Unsupported Speed/Duplex configuration\n");
5469 return -EINVAL;
5471 return 0;
5474 static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake)
5476 struct net_device *netdev = pci_get_drvdata(pdev);
5477 struct igb_adapter *adapter = netdev_priv(netdev);
5478 struct e1000_hw *hw = &adapter->hw;
5479 u32 ctrl, rctl, status;
5480 u32 wufc = adapter->wol;
5481 #ifdef CONFIG_PM
5482 int retval = 0;
5483 #endif
5485 netif_device_detach(netdev);
5487 if (netif_running(netdev))
5488 igb_close(netdev);
5490 igb_clear_interrupt_scheme(adapter);
5492 #ifdef CONFIG_PM
5493 retval = pci_save_state(pdev);
5494 if (retval)
5495 return retval;
5496 #endif
5498 status = rd32(E1000_STATUS);
5499 if (status & E1000_STATUS_LU)
5500 wufc &= ~E1000_WUFC_LNKC;
5502 if (wufc) {
5503 igb_setup_rctl(adapter);
5504 igb_set_rx_mode(netdev);
5506 /* turn on all-multi mode if wake on multicast is enabled */
5507 if (wufc & E1000_WUFC_MC) {
5508 rctl = rd32(E1000_RCTL);
5509 rctl |= E1000_RCTL_MPE;
5510 wr32(E1000_RCTL, rctl);
5513 ctrl = rd32(E1000_CTRL);
5514 /* advertise wake from D3Cold */
5515 #define E1000_CTRL_ADVD3WUC 0x00100000
5516 /* phy power management enable */
5517 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
5518 ctrl |= E1000_CTRL_ADVD3WUC;
5519 wr32(E1000_CTRL, ctrl);
5521 /* Allow time for pending master requests to run */
5522 igb_disable_pcie_master(&adapter->hw);
5524 wr32(E1000_WUC, E1000_WUC_PME_EN);
5525 wr32(E1000_WUFC, wufc);
5526 } else {
5527 wr32(E1000_WUC, 0);
5528 wr32(E1000_WUFC, 0);
5531 *enable_wake = wufc || adapter->en_mng_pt;
5532 if (!*enable_wake)
5533 igb_shutdown_serdes_link_82575(hw);
5535 /* Release control of h/w to f/w. If f/w is AMT enabled, this
5536 * would have already happened in close and is redundant. */
5537 igb_release_hw_control(adapter);
5539 pci_disable_device(pdev);
5541 return 0;
5544 #ifdef CONFIG_PM
5545 static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
5547 int retval;
5548 bool wake;
5550 retval = __igb_shutdown(pdev, &wake);
5551 if (retval)
5552 return retval;
5554 if (wake) {
5555 pci_prepare_to_sleep(pdev);
5556 } else {
5557 pci_wake_from_d3(pdev, false);
5558 pci_set_power_state(pdev, PCI_D3hot);
5561 return 0;
5564 static int igb_resume(struct pci_dev *pdev)
5566 struct net_device *netdev = pci_get_drvdata(pdev);
5567 struct igb_adapter *adapter = netdev_priv(netdev);
5568 struct e1000_hw *hw = &adapter->hw;
5569 u32 err;
5571 pci_set_power_state(pdev, PCI_D0);
5572 pci_restore_state(pdev);
5574 err = pci_enable_device_mem(pdev);
5575 if (err) {
5576 dev_err(&pdev->dev,
5577 "igb: Cannot enable PCI device from suspend\n");
5578 return err;
5580 pci_set_master(pdev);
5582 pci_enable_wake(pdev, PCI_D3hot, 0);
5583 pci_enable_wake(pdev, PCI_D3cold, 0);
5585 if (igb_init_interrupt_scheme(adapter)) {
5586 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
5587 return -ENOMEM;
5590 /* e1000_power_up_phy(adapter); */
5592 igb_reset(adapter);
5594 /* let the f/w know that the h/w is now under the control of the
5595 * driver. */
5596 igb_get_hw_control(adapter);
5598 wr32(E1000_WUS, ~0);
5600 if (netif_running(netdev)) {
5601 err = igb_open(netdev);
5602 if (err)
5603 return err;
5606 netif_device_attach(netdev);
5608 return 0;
5610 #endif
5612 static void igb_shutdown(struct pci_dev *pdev)
5614 bool wake;
5616 __igb_shutdown(pdev, &wake);
5618 if (system_state == SYSTEM_POWER_OFF) {
5619 pci_wake_from_d3(pdev, wake);
5620 pci_set_power_state(pdev, PCI_D3hot);
5624 #ifdef CONFIG_NET_POLL_CONTROLLER
5626 * Polling 'interrupt' - used by things like netconsole to send skbs
5627 * without having to re-enable interrupts. It's not called while
5628 * the interrupt routine is executing.
5630 static void igb_netpoll(struct net_device *netdev)
5632 struct igb_adapter *adapter = netdev_priv(netdev);
5633 struct e1000_hw *hw = &adapter->hw;
5634 int i;
5636 if (!adapter->msix_entries) {
5637 struct igb_q_vector *q_vector = adapter->q_vector[0];
5638 igb_irq_disable(adapter);
5639 napi_schedule(&q_vector->napi);
5640 return;
5643 for (i = 0; i < adapter->num_q_vectors; i++) {
5644 struct igb_q_vector *q_vector = adapter->q_vector[i];
5645 wr32(E1000_EIMC, q_vector->eims_value);
5646 napi_schedule(&q_vector->napi);
5649 #endif /* CONFIG_NET_POLL_CONTROLLER */
5652 * igb_io_error_detected - called when PCI error is detected
5653 * @pdev: Pointer to PCI device
5654 * @state: The current pci connection state
5656 * This function is called after a PCI bus error affecting
5657 * this device has been detected.
5659 static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
5660 pci_channel_state_t state)
5662 struct net_device *netdev = pci_get_drvdata(pdev);
5663 struct igb_adapter *adapter = netdev_priv(netdev);
5665 netif_device_detach(netdev);
5667 if (state == pci_channel_io_perm_failure)
5668 return PCI_ERS_RESULT_DISCONNECT;
5670 if (netif_running(netdev))
5671 igb_down(adapter);
5672 pci_disable_device(pdev);
5674 /* Request a slot slot reset. */
5675 return PCI_ERS_RESULT_NEED_RESET;
5679 * igb_io_slot_reset - called after the pci bus has been reset.
5680 * @pdev: Pointer to PCI device
5682 * Restart the card from scratch, as if from a cold-boot. Implementation
5683 * resembles the first-half of the igb_resume routine.
5685 static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
5687 struct net_device *netdev = pci_get_drvdata(pdev);
5688 struct igb_adapter *adapter = netdev_priv(netdev);
5689 struct e1000_hw *hw = &adapter->hw;
5690 pci_ers_result_t result;
5691 int err;
5693 if (pci_enable_device_mem(pdev)) {
5694 dev_err(&pdev->dev,
5695 "Cannot re-enable PCI device after reset.\n");
5696 result = PCI_ERS_RESULT_DISCONNECT;
5697 } else {
5698 pci_set_master(pdev);
5699 pci_restore_state(pdev);
5701 pci_enable_wake(pdev, PCI_D3hot, 0);
5702 pci_enable_wake(pdev, PCI_D3cold, 0);
5704 igb_reset(adapter);
5705 wr32(E1000_WUS, ~0);
5706 result = PCI_ERS_RESULT_RECOVERED;
5709 err = pci_cleanup_aer_uncorrect_error_status(pdev);
5710 if (err) {
5711 dev_err(&pdev->dev, "pci_cleanup_aer_uncorrect_error_status "
5712 "failed 0x%0x\n", err);
5713 /* non-fatal, continue */
5716 return result;
5720 * igb_io_resume - called when traffic can start flowing again.
5721 * @pdev: Pointer to PCI device
5723 * This callback is called when the error recovery driver tells us that
5724 * its OK to resume normal operation. Implementation resembles the
5725 * second-half of the igb_resume routine.
5727 static void igb_io_resume(struct pci_dev *pdev)
5729 struct net_device *netdev = pci_get_drvdata(pdev);
5730 struct igb_adapter *adapter = netdev_priv(netdev);
5732 if (netif_running(netdev)) {
5733 if (igb_up(adapter)) {
5734 dev_err(&pdev->dev, "igb_up failed after reset\n");
5735 return;
5739 netif_device_attach(netdev);
5741 /* let the f/w know that the h/w is now under the control of the
5742 * driver. */
5743 igb_get_hw_control(adapter);
5746 static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
5747 u8 qsel)
5749 u32 rar_low, rar_high;
5750 struct e1000_hw *hw = &adapter->hw;
5752 /* HW expects these in little endian so we reverse the byte order
5753 * from network order (big endian) to little endian
5755 rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
5756 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
5757 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
5759 /* Indicate to hardware the Address is Valid. */
5760 rar_high |= E1000_RAH_AV;
5762 if (hw->mac.type == e1000_82575)
5763 rar_high |= E1000_RAH_POOL_1 * qsel;
5764 else
5765 rar_high |= E1000_RAH_POOL_1 << qsel;
5767 wr32(E1000_RAL(index), rar_low);
5768 wrfl();
5769 wr32(E1000_RAH(index), rar_high);
5770 wrfl();
5773 static int igb_set_vf_mac(struct igb_adapter *adapter,
5774 int vf, unsigned char *mac_addr)
5776 struct e1000_hw *hw = &adapter->hw;
5777 /* VF MAC addresses start at end of receive addresses and moves
5778 * torwards the first, as a result a collision should not be possible */
5779 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
5781 memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
5783 igb_rar_set_qsel(adapter, mac_addr, rar_entry, vf);
5785 return 0;
5788 static void igb_vmm_control(struct igb_adapter *adapter)
5790 struct e1000_hw *hw = &adapter->hw;
5791 u32 reg_data;
5793 if (!adapter->vfs_allocated_count)
5794 return;
5796 /* VF's need PF reset indication before they
5797 * can send/receive mail */
5798 reg_data = rd32(E1000_CTRL_EXT);
5799 reg_data |= E1000_CTRL_EXT_PFRSTD;
5800 wr32(E1000_CTRL_EXT, reg_data);
5802 igb_vmdq_set_loopback_pf(hw, true);
5803 igb_vmdq_set_replication_pf(hw, true);
5806 /* igb_main.c */