ixgbe: link change interrupt was not causing link event
[linux-2.6/mini2440.git] / drivers / net / ixgbe / ixgbe_main.c
blob11bf86b93622d5b76352ef90b575b7f0054232fa
1 /*******************************************************************************
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *******************************************************************************/
29 #include <linux/types.h>
30 #include <linux/module.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/vmalloc.h>
34 #include <linux/string.h>
35 #include <linux/in.h>
36 #include <linux/ip.h>
37 #include <linux/tcp.h>
38 #include <linux/ipv6.h>
39 #include <net/checksum.h>
40 #include <net/ip6_checksum.h>
41 #include <linux/ethtool.h>
42 #include <linux/if_vlan.h>
44 #include "ixgbe.h"
45 #include "ixgbe_common.h"
47 char ixgbe_driver_name[] = "ixgbe";
48 static const char ixgbe_driver_string[] =
49 "Intel(R) 10 Gigabit PCI Express Network Driver";
51 #define DRV_VERSION "1.3.18-k4"
52 const char ixgbe_driver_version[] = DRV_VERSION;
53 static const char ixgbe_copyright[] =
54 "Copyright (c) 1999-2007 Intel Corporation.";
56 static const struct ixgbe_info *ixgbe_info_tbl[] = {
57 [board_82598] = &ixgbe_82598_info,
60 /* ixgbe_pci_tbl - PCI Device ID Table
62 * Wildcard entries (PCI_ANY_ID) should come last
63 * Last entry must be all 0s
65 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
66 * Class, Class Mask, private data (not used) }
68 static struct pci_device_id ixgbe_pci_tbl[] = {
69 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT),
70 board_82598 },
71 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT),
72 board_82598 },
73 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4),
74 board_82598 },
75 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT),
76 board_82598 },
78 /* required last entry */
79 {0, }
81 MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
83 #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
84 static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
85 void *p);
86 static struct notifier_block dca_notifier = {
87 .notifier_call = ixgbe_notify_dca,
88 .next = NULL,
89 .priority = 0
91 #endif
93 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
94 MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
95 MODULE_LICENSE("GPL");
96 MODULE_VERSION(DRV_VERSION);
98 #define DEFAULT_DEBUG_LEVEL_SHIFT 3
100 static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
102 u32 ctrl_ext;
104 /* Let firmware take over control of h/w */
105 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
106 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
107 ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
110 static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
112 u32 ctrl_ext;
114 /* Let firmware know the driver has taken over */
115 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
116 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
117 ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
120 #ifdef DEBUG
122 * ixgbe_get_hw_dev_name - return device name string
123 * used by hardware layer to print debugging information
125 char *ixgbe_get_hw_dev_name(struct ixgbe_hw *hw)
127 struct ixgbe_adapter *adapter = hw->back;
128 struct net_device *netdev = adapter->netdev;
129 return netdev->name;
131 #endif
133 static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, u16 int_alloc_entry,
134 u8 msix_vector)
136 u32 ivar, index;
138 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
139 index = (int_alloc_entry >> 2) & 0x1F;
140 ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR(index));
141 ivar &= ~(0xFF << (8 * (int_alloc_entry & 0x3)));
142 ivar |= (msix_vector << (8 * (int_alloc_entry & 0x3)));
143 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
146 static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
147 struct ixgbe_tx_buffer
148 *tx_buffer_info)
150 if (tx_buffer_info->dma) {
151 pci_unmap_page(adapter->pdev, tx_buffer_info->dma,
152 tx_buffer_info->length, PCI_DMA_TODEVICE);
153 tx_buffer_info->dma = 0;
155 if (tx_buffer_info->skb) {
156 dev_kfree_skb_any(tx_buffer_info->skb);
157 tx_buffer_info->skb = NULL;
159 /* tx_buffer_info must be completely set up in the transmit path */
162 static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
163 struct ixgbe_ring *tx_ring,
164 unsigned int eop)
166 struct ixgbe_hw *hw = &adapter->hw;
167 u32 head, tail;
169 /* Detect a transmit hang in hardware, this serializes the
170 * check with the clearing of time_stamp and movement of eop */
171 head = IXGBE_READ_REG(hw, tx_ring->head);
172 tail = IXGBE_READ_REG(hw, tx_ring->tail);
173 adapter->detect_tx_hung = false;
174 if ((head != tail) &&
175 tx_ring->tx_buffer_info[eop].time_stamp &&
176 time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) &&
177 !(IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)) {
178 /* detected Tx unit hang */
179 union ixgbe_adv_tx_desc *tx_desc;
180 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
181 DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
182 " Tx Queue <%d>\n"
183 " TDH, TDT <%x>, <%x>\n"
184 " next_to_use <%x>\n"
185 " next_to_clean <%x>\n"
186 "tx_buffer_info[next_to_clean]\n"
187 " time_stamp <%lx>\n"
188 " jiffies <%lx>\n",
189 tx_ring->queue_index,
190 head, tail,
191 tx_ring->next_to_use, eop,
192 tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
193 return true;
196 return false;
199 #define IXGBE_MAX_TXD_PWR 14
200 #define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
202 /* Tx Descriptors needed, worst case */
203 #define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
204 (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
205 #define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
206 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
208 #define GET_TX_HEAD_FROM_RING(ring) (\
209 *(volatile u32 *) \
210 ((union ixgbe_adv_tx_desc *)(ring)->desc + (ring)->count))
211 static void ixgbe_tx_timeout(struct net_device *netdev);
214 * ixgbe_clean_tx_irq - Reclaim resources after transmit completes
215 * @adapter: board private structure
216 * @tx_ring: tx ring to clean
218 static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter,
219 struct ixgbe_ring *tx_ring)
221 union ixgbe_adv_tx_desc *tx_desc;
222 struct ixgbe_tx_buffer *tx_buffer_info;
223 struct net_device *netdev = adapter->netdev;
224 struct sk_buff *skb;
225 unsigned int i;
226 u32 head, oldhead;
227 unsigned int count = 0;
228 unsigned int total_bytes = 0, total_packets = 0;
230 rmb();
231 head = GET_TX_HEAD_FROM_RING(tx_ring);
232 head = le32_to_cpu(head);
233 i = tx_ring->next_to_clean;
234 while (1) {
235 while (i != head) {
236 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
237 tx_buffer_info = &tx_ring->tx_buffer_info[i];
238 skb = tx_buffer_info->skb;
240 if (skb) {
241 unsigned int segs, bytecount;
243 /* gso_segs is currently only valid for tcp */
244 segs = skb_shinfo(skb)->gso_segs ?: 1;
245 /* multiply data chunks by size of headers */
246 bytecount = ((segs - 1) * skb_headlen(skb)) +
247 skb->len;
248 total_packets += segs;
249 total_bytes += bytecount;
252 ixgbe_unmap_and_free_tx_resource(adapter,
253 tx_buffer_info);
255 i++;
256 if (i == tx_ring->count)
257 i = 0;
259 count++;
260 if (count == tx_ring->count)
261 goto done_cleaning;
263 oldhead = head;
264 rmb();
265 head = GET_TX_HEAD_FROM_RING(tx_ring);
266 head = le32_to_cpu(head);
267 if (head == oldhead)
268 goto done_cleaning;
269 } /* while (1) */
271 done_cleaning:
272 tx_ring->next_to_clean = i;
274 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
275 if (unlikely(count && netif_carrier_ok(netdev) &&
276 (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
277 /* Make sure that anybody stopping the queue after this
278 * sees the new next_to_clean.
280 smp_mb();
281 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
282 !test_bit(__IXGBE_DOWN, &adapter->state)) {
283 netif_wake_subqueue(netdev, tx_ring->queue_index);
284 ++adapter->restart_queue;
288 if (adapter->detect_tx_hung) {
289 if (ixgbe_check_tx_hang(adapter, tx_ring, i)) {
290 /* schedule immediate reset if we believe we hung */
291 DPRINTK(PROBE, INFO,
292 "tx hang %d detected, resetting adapter\n",
293 adapter->tx_timeout_count + 1);
294 ixgbe_tx_timeout(adapter->netdev);
298 /* re-arm the interrupt */
299 if ((total_packets >= tx_ring->work_limit) ||
300 (count == tx_ring->count))
301 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, tx_ring->v_idx);
303 tx_ring->total_bytes += total_bytes;
304 tx_ring->total_packets += total_packets;
305 tx_ring->stats.bytes += total_bytes;
306 tx_ring->stats.packets += total_packets;
307 adapter->net_stats.tx_bytes += total_bytes;
308 adapter->net_stats.tx_packets += total_packets;
309 return (total_packets ? true : false);
312 #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
313 static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
314 struct ixgbe_ring *rx_ring)
316 u32 rxctrl;
317 int cpu = get_cpu();
318 int q = rx_ring - adapter->rx_ring;
320 if (rx_ring->cpu != cpu) {
321 rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q));
322 rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK;
323 rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
324 rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
325 rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
326 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl);
327 rx_ring->cpu = cpu;
329 put_cpu();
332 static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
333 struct ixgbe_ring *tx_ring)
335 u32 txctrl;
336 int cpu = get_cpu();
337 int q = tx_ring - adapter->tx_ring;
339 if (tx_ring->cpu != cpu) {
340 txctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q));
341 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
342 txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
343 txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
344 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q), txctrl);
345 tx_ring->cpu = cpu;
347 put_cpu();
350 static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
352 int i;
354 if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
355 return;
357 for (i = 0; i < adapter->num_tx_queues; i++) {
358 adapter->tx_ring[i].cpu = -1;
359 ixgbe_update_tx_dca(adapter, &adapter->tx_ring[i]);
361 for (i = 0; i < adapter->num_rx_queues; i++) {
362 adapter->rx_ring[i].cpu = -1;
363 ixgbe_update_rx_dca(adapter, &adapter->rx_ring[i]);
367 static int __ixgbe_notify_dca(struct device *dev, void *data)
369 struct net_device *netdev = dev_get_drvdata(dev);
370 struct ixgbe_adapter *adapter = netdev_priv(netdev);
371 unsigned long event = *(unsigned long *)data;
373 switch (event) {
374 case DCA_PROVIDER_ADD:
375 /* if we're already enabled, don't do it again */
376 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
377 break;
378 /* Always use CB2 mode, difference is masked
379 * in the CB driver. */
380 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
381 if (dca_add_requester(dev) == 0) {
382 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
383 ixgbe_setup_dca(adapter);
384 break;
386 /* Fall Through since DCA is disabled. */
387 case DCA_PROVIDER_REMOVE:
388 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
389 dca_remove_requester(dev);
390 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
391 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
393 break;
396 return 0;
399 #endif /* CONFIG_DCA or CONFIG_DCA_MODULE */
401 * ixgbe_receive_skb - Send a completed packet up the stack
402 * @adapter: board private structure
403 * @skb: packet to send up
404 * @status: hardware indication of status of receive
405 * @rx_ring: rx descriptor ring (for a specific queue) to setup
406 * @rx_desc: rx descriptor
408 static void ixgbe_receive_skb(struct ixgbe_adapter *adapter,
409 struct sk_buff *skb, u8 status,
410 struct ixgbe_ring *ring,
411 union ixgbe_adv_rx_desc *rx_desc)
413 bool is_vlan = (status & IXGBE_RXD_STAT_VP);
414 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
416 if (adapter->netdev->features & NETIF_F_LRO &&
417 skb->ip_summed == CHECKSUM_UNNECESSARY) {
418 if (adapter->vlgrp && is_vlan)
419 lro_vlan_hwaccel_receive_skb(&ring->lro_mgr, skb,
420 adapter->vlgrp, tag,
421 rx_desc);
422 else
423 lro_receive_skb(&ring->lro_mgr, skb, rx_desc);
424 ring->lro_used = true;
425 } else {
426 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) {
427 if (adapter->vlgrp && is_vlan)
428 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, tag);
429 else
430 netif_receive_skb(skb);
431 } else {
432 if (adapter->vlgrp && is_vlan)
433 vlan_hwaccel_rx(skb, adapter->vlgrp, tag);
434 else
435 netif_rx(skb);
441 * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum
442 * @adapter: address of board private structure
443 * @status_err: hardware indication of status of receive
444 * @skb: skb currently being received and modified
446 static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
447 u32 status_err, struct sk_buff *skb)
449 skb->ip_summed = CHECKSUM_NONE;
451 /* Rx csum disabled */
452 if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
453 return;
455 /* if IP and error */
456 if ((status_err & IXGBE_RXD_STAT_IPCS) &&
457 (status_err & IXGBE_RXDADV_ERR_IPE)) {
458 adapter->hw_csum_rx_error++;
459 return;
462 if (!(status_err & IXGBE_RXD_STAT_L4CS))
463 return;
465 if (status_err & IXGBE_RXDADV_ERR_TCPE) {
466 adapter->hw_csum_rx_error++;
467 return;
470 /* It must be a TCP or UDP packet with a valid checksum */
471 skb->ip_summed = CHECKSUM_UNNECESSARY;
472 adapter->hw_csum_rx_good++;
476 * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split
477 * @adapter: address of board private structure
479 static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
480 struct ixgbe_ring *rx_ring,
481 int cleaned_count)
483 struct net_device *netdev = adapter->netdev;
484 struct pci_dev *pdev = adapter->pdev;
485 union ixgbe_adv_rx_desc *rx_desc;
486 struct ixgbe_rx_buffer *bi;
487 unsigned int i;
488 unsigned int bufsz = rx_ring->rx_buf_len + NET_IP_ALIGN;
490 i = rx_ring->next_to_use;
491 bi = &rx_ring->rx_buffer_info[i];
493 while (cleaned_count--) {
494 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
496 if (!bi->page &&
497 (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) {
498 bi->page = alloc_page(GFP_ATOMIC);
499 if (!bi->page) {
500 adapter->alloc_rx_page_failed++;
501 goto no_buffers;
503 bi->page_dma = pci_map_page(pdev, bi->page, 0,
504 PAGE_SIZE,
505 PCI_DMA_FROMDEVICE);
508 if (!bi->skb) {
509 struct sk_buff *skb = netdev_alloc_skb(netdev, bufsz);
511 if (!skb) {
512 adapter->alloc_rx_buff_failed++;
513 goto no_buffers;
517 * Make buffer alignment 2 beyond a 16 byte boundary
518 * this will result in a 16 byte aligned IP header after
519 * the 14 byte MAC header is removed
521 skb_reserve(skb, NET_IP_ALIGN);
523 bi->skb = skb;
524 bi->dma = pci_map_single(pdev, skb->data, bufsz,
525 PCI_DMA_FROMDEVICE);
527 /* Refresh the desc even if buffer_addrs didn't change because
528 * each write-back erases this info. */
529 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
530 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
531 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
532 } else {
533 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
536 i++;
537 if (i == rx_ring->count)
538 i = 0;
539 bi = &rx_ring->rx_buffer_info[i];
542 no_buffers:
543 if (rx_ring->next_to_use != i) {
544 rx_ring->next_to_use = i;
545 if (i-- == 0)
546 i = (rx_ring->count - 1);
549 * Force memory writes to complete before letting h/w
550 * know there are new descriptors to fetch. (Only
551 * applicable for weak-ordered memory model archs,
552 * such as IA-64).
554 wmb();
555 writel(i, adapter->hw.hw_addr + rx_ring->tail);
559 static inline u16 ixgbe_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc)
561 return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info;
564 static inline u16 ixgbe_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
566 return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
569 static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter,
570 struct ixgbe_ring *rx_ring,
571 int *work_done, int work_to_do)
573 struct net_device *netdev = adapter->netdev;
574 struct pci_dev *pdev = adapter->pdev;
575 union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
576 struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
577 struct sk_buff *skb;
578 unsigned int i;
579 u32 len, staterr;
580 u16 hdr_info;
581 bool cleaned = false;
582 int cleaned_count = 0;
583 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
585 i = rx_ring->next_to_clean;
586 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
587 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
588 rx_buffer_info = &rx_ring->rx_buffer_info[i];
590 while (staterr & IXGBE_RXD_STAT_DD) {
591 u32 upper_len = 0;
592 if (*work_done >= work_to_do)
593 break;
594 (*work_done)++;
596 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
597 hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc));
598 len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
599 IXGBE_RXDADV_HDRBUFLEN_SHIFT;
600 if (hdr_info & IXGBE_RXDADV_SPH)
601 adapter->rx_hdr_split++;
602 if (len > IXGBE_RX_HDR_SIZE)
603 len = IXGBE_RX_HDR_SIZE;
604 upper_len = le16_to_cpu(rx_desc->wb.upper.length);
605 } else {
606 len = le16_to_cpu(rx_desc->wb.upper.length);
609 cleaned = true;
610 skb = rx_buffer_info->skb;
611 prefetch(skb->data - NET_IP_ALIGN);
612 rx_buffer_info->skb = NULL;
614 if (len && !skb_shinfo(skb)->nr_frags) {
615 pci_unmap_single(pdev, rx_buffer_info->dma,
616 rx_ring->rx_buf_len + NET_IP_ALIGN,
617 PCI_DMA_FROMDEVICE);
618 skb_put(skb, len);
621 if (upper_len) {
622 pci_unmap_page(pdev, rx_buffer_info->page_dma,
623 PAGE_SIZE, PCI_DMA_FROMDEVICE);
624 rx_buffer_info->page_dma = 0;
625 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
626 rx_buffer_info->page, 0, upper_len);
627 rx_buffer_info->page = NULL;
629 skb->len += upper_len;
630 skb->data_len += upper_len;
631 skb->truesize += upper_len;
634 i++;
635 if (i == rx_ring->count)
636 i = 0;
637 next_buffer = &rx_ring->rx_buffer_info[i];
639 next_rxd = IXGBE_RX_DESC_ADV(*rx_ring, i);
640 prefetch(next_rxd);
642 cleaned_count++;
643 if (staterr & IXGBE_RXD_STAT_EOP) {
644 rx_ring->stats.packets++;
645 rx_ring->stats.bytes += skb->len;
646 } else {
647 rx_buffer_info->skb = next_buffer->skb;
648 rx_buffer_info->dma = next_buffer->dma;
649 next_buffer->skb = skb;
650 adapter->non_eop_descs++;
651 goto next_desc;
654 if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) {
655 dev_kfree_skb_irq(skb);
656 goto next_desc;
659 ixgbe_rx_checksum(adapter, staterr, skb);
661 /* probably a little skewed due to removing CRC */
662 total_rx_bytes += skb->len;
663 total_rx_packets++;
665 skb->protocol = eth_type_trans(skb, netdev);
666 ixgbe_receive_skb(adapter, skb, staterr, rx_ring, rx_desc);
667 netdev->last_rx = jiffies;
669 next_desc:
670 rx_desc->wb.upper.status_error = 0;
672 /* return some buffers to hardware, one at a time is too slow */
673 if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
674 ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
675 cleaned_count = 0;
678 /* use prefetched values */
679 rx_desc = next_rxd;
680 rx_buffer_info = next_buffer;
682 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
685 if (rx_ring->lro_used) {
686 lro_flush_all(&rx_ring->lro_mgr);
687 rx_ring->lro_used = false;
690 rx_ring->next_to_clean = i;
691 cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
693 if (cleaned_count)
694 ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
696 rx_ring->total_packets += total_rx_packets;
697 rx_ring->total_bytes += total_rx_bytes;
698 adapter->net_stats.rx_bytes += total_rx_bytes;
699 adapter->net_stats.rx_packets += total_rx_packets;
701 return cleaned;
704 static int ixgbe_clean_rxonly(struct napi_struct *, int);
706 * ixgbe_configure_msix - Configure MSI-X hardware
707 * @adapter: board private structure
709 * ixgbe_configure_msix sets up the hardware to properly generate MSI-X
710 * interrupts.
712 static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
714 struct ixgbe_q_vector *q_vector;
715 int i, j, q_vectors, v_idx, r_idx;
716 u32 mask;
718 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
720 /* Populate the IVAR table and set the ITR values to the
721 * corresponding register.
723 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
724 q_vector = &adapter->q_vector[v_idx];
725 /* XXX for_each_bit(...) */
726 r_idx = find_first_bit(q_vector->rxr_idx,
727 adapter->num_rx_queues);
729 for (i = 0; i < q_vector->rxr_count; i++) {
730 j = adapter->rx_ring[r_idx].reg_idx;
731 ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(j), v_idx);
732 r_idx = find_next_bit(q_vector->rxr_idx,
733 adapter->num_rx_queues,
734 r_idx + 1);
736 r_idx = find_first_bit(q_vector->txr_idx,
737 adapter->num_tx_queues);
739 for (i = 0; i < q_vector->txr_count; i++) {
740 j = adapter->tx_ring[r_idx].reg_idx;
741 ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(j), v_idx);
742 r_idx = find_next_bit(q_vector->txr_idx,
743 adapter->num_tx_queues,
744 r_idx + 1);
747 /* if this is a tx only vector use half the irq (tx) rate */
748 if (q_vector->txr_count && !q_vector->rxr_count)
749 q_vector->eitr = adapter->tx_eitr;
750 else
751 /* rx only or mixed */
752 q_vector->eitr = adapter->rx_eitr;
754 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx),
755 EITR_INTS_PER_SEC_TO_REG(q_vector->eitr));
758 ixgbe_set_ivar(adapter, IXGBE_IVAR_OTHER_CAUSES_INDEX, v_idx);
759 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
761 /* set up to autoclear timer, and the vectors */
762 mask = IXGBE_EIMS_ENABLE_MASK;
763 mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
764 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
767 enum latency_range {
768 lowest_latency = 0,
769 low_latency = 1,
770 bulk_latency = 2,
771 latency_invalid = 255
775 * ixgbe_update_itr - update the dynamic ITR value based on statistics
776 * @adapter: pointer to adapter
777 * @eitr: eitr setting (ints per sec) to give last timeslice
778 * @itr_setting: current throttle rate in ints/second
779 * @packets: the number of packets during this measurement interval
780 * @bytes: the number of bytes during this measurement interval
782 * Stores a new ITR value based on packets and byte
783 * counts during the last interrupt. The advantage of per interrupt
784 * computation is faster updates and more accurate ITR for the current
785 * traffic pattern. Constants in this function were computed
786 * based on theoretical maximum wire speed and thresholds were set based
787 * on testing data as well as attempting to minimize response time
788 * while increasing bulk throughput.
789 * this functionality is controlled by the InterruptThrottleRate module
790 * parameter (see ixgbe_param.c)
792 static u8 ixgbe_update_itr(struct ixgbe_adapter *adapter,
793 u32 eitr, u8 itr_setting,
794 int packets, int bytes)
796 unsigned int retval = itr_setting;
797 u32 timepassed_us;
798 u64 bytes_perint;
800 if (packets == 0)
801 goto update_itr_done;
804 /* simple throttlerate management
805 * 0-20MB/s lowest (100000 ints/s)
806 * 20-100MB/s low (20000 ints/s)
807 * 100-1249MB/s bulk (8000 ints/s)
809 /* what was last interrupt timeslice? */
810 timepassed_us = 1000000/eitr;
811 bytes_perint = bytes / timepassed_us; /* bytes/usec */
813 switch (itr_setting) {
814 case lowest_latency:
815 if (bytes_perint > adapter->eitr_low)
816 retval = low_latency;
817 break;
818 case low_latency:
819 if (bytes_perint > adapter->eitr_high)
820 retval = bulk_latency;
821 else if (bytes_perint <= adapter->eitr_low)
822 retval = lowest_latency;
823 break;
824 case bulk_latency:
825 if (bytes_perint <= adapter->eitr_high)
826 retval = low_latency;
827 break;
830 update_itr_done:
831 return retval;
834 static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
836 struct ixgbe_adapter *adapter = q_vector->adapter;
837 struct ixgbe_hw *hw = &adapter->hw;
838 u32 new_itr;
839 u8 current_itr, ret_itr;
840 int i, r_idx, v_idx = ((void *)q_vector - (void *)(adapter->q_vector)) /
841 sizeof(struct ixgbe_q_vector);
842 struct ixgbe_ring *rx_ring, *tx_ring;
844 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
845 for (i = 0; i < q_vector->txr_count; i++) {
846 tx_ring = &(adapter->tx_ring[r_idx]);
847 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
848 q_vector->tx_eitr,
849 tx_ring->total_packets,
850 tx_ring->total_bytes);
851 /* if the result for this queue would decrease interrupt
852 * rate for this vector then use that result */
853 q_vector->tx_eitr = ((q_vector->tx_eitr > ret_itr) ?
854 q_vector->tx_eitr - 1 : ret_itr);
855 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
856 r_idx + 1);
859 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
860 for (i = 0; i < q_vector->rxr_count; i++) {
861 rx_ring = &(adapter->rx_ring[r_idx]);
862 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
863 q_vector->rx_eitr,
864 rx_ring->total_packets,
865 rx_ring->total_bytes);
866 /* if the result for this queue would decrease interrupt
867 * rate for this vector then use that result */
868 q_vector->rx_eitr = ((q_vector->rx_eitr > ret_itr) ?
869 q_vector->rx_eitr - 1 : ret_itr);
870 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
871 r_idx + 1);
874 current_itr = max(q_vector->rx_eitr, q_vector->tx_eitr);
876 switch (current_itr) {
877 /* counts and packets in update_itr are dependent on these numbers */
878 case lowest_latency:
879 new_itr = 100000;
880 break;
881 case low_latency:
882 new_itr = 20000; /* aka hwitr = ~200 */
883 break;
884 case bulk_latency:
885 default:
886 new_itr = 8000;
887 break;
890 if (new_itr != q_vector->eitr) {
891 u32 itr_reg;
892 /* do an exponential smoothing */
893 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
894 q_vector->eitr = new_itr;
895 itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr);
896 /* must write high and low 16 bits to reset counter */
897 DPRINTK(TX_ERR, DEBUG, "writing eitr(%d): %08X\n", v_idx,
898 itr_reg);
899 IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg | (itr_reg)<<16);
902 return;
906 static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
908 struct ixgbe_hw *hw = &adapter->hw;
910 adapter->lsc_int++;
911 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
912 adapter->link_check_timeout = jiffies;
913 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
914 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
915 schedule_work(&adapter->watchdog_task);
919 static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
921 struct net_device *netdev = data;
922 struct ixgbe_adapter *adapter = netdev_priv(netdev);
923 struct ixgbe_hw *hw = &adapter->hw;
924 u32 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
926 if (eicr & IXGBE_EICR_LSC)
927 ixgbe_check_lsc(adapter);
929 if (!test_bit(__IXGBE_DOWN, &adapter->state))
930 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
932 return IRQ_HANDLED;
935 static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
937 struct ixgbe_q_vector *q_vector = data;
938 struct ixgbe_adapter *adapter = q_vector->adapter;
939 struct ixgbe_ring *tx_ring;
940 int i, r_idx;
942 if (!q_vector->txr_count)
943 return IRQ_HANDLED;
945 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
946 for (i = 0; i < q_vector->txr_count; i++) {
947 tx_ring = &(adapter->tx_ring[r_idx]);
948 #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
949 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
950 ixgbe_update_tx_dca(adapter, tx_ring);
951 #endif
952 tx_ring->total_bytes = 0;
953 tx_ring->total_packets = 0;
954 ixgbe_clean_tx_irq(adapter, tx_ring);
955 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
956 r_idx + 1);
959 return IRQ_HANDLED;
963 * ixgbe_msix_clean_rx - single unshared vector rx clean (all queues)
964 * @irq: unused
965 * @data: pointer to our q_vector struct for this interrupt vector
967 static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
969 struct ixgbe_q_vector *q_vector = data;
970 struct ixgbe_adapter *adapter = q_vector->adapter;
971 struct ixgbe_ring *rx_ring;
972 int r_idx;
974 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
975 if (!q_vector->rxr_count)
976 return IRQ_HANDLED;
978 rx_ring = &(adapter->rx_ring[r_idx]);
979 /* disable interrupts on this vector only */
980 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rx_ring->v_idx);
981 rx_ring->total_bytes = 0;
982 rx_ring->total_packets = 0;
983 netif_rx_schedule(adapter->netdev, &q_vector->napi);
985 return IRQ_HANDLED;
988 static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
990 ixgbe_msix_clean_rx(irq, data);
991 ixgbe_msix_clean_tx(irq, data);
993 return IRQ_HANDLED;
997 * ixgbe_clean_rxonly - msix (aka one shot) rx clean routine
998 * @napi: napi struct with our devices info in it
999 * @budget: amount of work driver is allowed to do this pass, in packets
1002 static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
1004 struct ixgbe_q_vector *q_vector =
1005 container_of(napi, struct ixgbe_q_vector, napi);
1006 struct ixgbe_adapter *adapter = q_vector->adapter;
1007 struct ixgbe_ring *rx_ring;
1008 int work_done = 0;
1009 long r_idx;
1011 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1012 rx_ring = &(adapter->rx_ring[r_idx]);
1013 #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
1014 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1015 ixgbe_update_rx_dca(adapter, rx_ring);
1016 #endif
1018 ixgbe_clean_rx_irq(adapter, rx_ring, &work_done, budget);
1020 /* If all Rx work done, exit the polling mode */
1021 if (work_done < budget) {
1022 netif_rx_complete(adapter->netdev, napi);
1023 if (adapter->rx_eitr < IXGBE_MIN_ITR_USECS)
1024 ixgbe_set_itr_msix(q_vector);
1025 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1026 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, rx_ring->v_idx);
1029 return work_done;
1032 static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
1033 int r_idx)
1035 a->q_vector[v_idx].adapter = a;
1036 set_bit(r_idx, a->q_vector[v_idx].rxr_idx);
1037 a->q_vector[v_idx].rxr_count++;
1038 a->rx_ring[r_idx].v_idx = 1 << v_idx;
1041 static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
1042 int r_idx)
1044 a->q_vector[v_idx].adapter = a;
1045 set_bit(r_idx, a->q_vector[v_idx].txr_idx);
1046 a->q_vector[v_idx].txr_count++;
1047 a->tx_ring[r_idx].v_idx = 1 << v_idx;
1051 * ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors
1052 * @adapter: board private structure to initialize
1053 * @vectors: allotted vector count for descriptor rings
1055 * This function maps descriptor rings to the queue-specific vectors
1056 * we were allotted through the MSI-X enabling code. Ideally, we'd have
1057 * one vector per ring/queue, but on a constrained vector budget, we
1058 * group the rings as "efficiently" as possible. You would add new
1059 * mapping configurations in here.
1061 static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter,
1062 int vectors)
1064 int v_start = 0;
1065 int rxr_idx = 0, txr_idx = 0;
1066 int rxr_remaining = adapter->num_rx_queues;
1067 int txr_remaining = adapter->num_tx_queues;
1068 int i, j;
1069 int rqpv, tqpv;
1070 int err = 0;
1072 /* No mapping required if MSI-X is disabled. */
1073 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
1074 goto out;
1077 * The ideal configuration...
1078 * We have enough vectors to map one per queue.
1080 if (vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
1081 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
1082 map_vector_to_rxq(adapter, v_start, rxr_idx);
1084 for (; txr_idx < txr_remaining; v_start++, txr_idx++)
1085 map_vector_to_txq(adapter, v_start, txr_idx);
1087 goto out;
1091 * If we don't have enough vectors for a 1-to-1
1092 * mapping, we'll have to group them so there are
1093 * multiple queues per vector.
1095 /* Re-adjusting *qpv takes care of the remainder. */
1096 for (i = v_start; i < vectors; i++) {
1097 rqpv = DIV_ROUND_UP(rxr_remaining, vectors - i);
1098 for (j = 0; j < rqpv; j++) {
1099 map_vector_to_rxq(adapter, i, rxr_idx);
1100 rxr_idx++;
1101 rxr_remaining--;
1104 for (i = v_start; i < vectors; i++) {
1105 tqpv = DIV_ROUND_UP(txr_remaining, vectors - i);
1106 for (j = 0; j < tqpv; j++) {
1107 map_vector_to_txq(adapter, i, txr_idx);
1108 txr_idx++;
1109 txr_remaining--;
1113 out:
1114 return err;
1118 * ixgbe_request_msix_irqs - Initialize MSI-X interrupts
1119 * @adapter: board private structure
1121 * ixgbe_request_msix_irqs allocates MSI-X vectors and requests
1122 * interrupts from the kernel.
1124 static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
1126 struct net_device *netdev = adapter->netdev;
1127 irqreturn_t (*handler)(int, void *);
1128 int i, vector, q_vectors, err;
1130 /* Decrement for Other and TCP Timer vectors */
1131 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1133 /* Map the Tx/Rx rings to the vectors we were allotted. */
1134 err = ixgbe_map_rings_to_vectors(adapter, q_vectors);
1135 if (err)
1136 goto out;
1138 #define SET_HANDLER(_v) ((!(_v)->rxr_count) ? &ixgbe_msix_clean_tx : \
1139 (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \
1140 &ixgbe_msix_clean_many)
1141 for (vector = 0; vector < q_vectors; vector++) {
1142 handler = SET_HANDLER(&adapter->q_vector[vector]);
1143 sprintf(adapter->name[vector], "%s:v%d-%s",
1144 netdev->name, vector,
1145 (handler == &ixgbe_msix_clean_rx) ? "Rx" :
1146 ((handler == &ixgbe_msix_clean_tx) ? "Tx" : "TxRx"));
1147 err = request_irq(adapter->msix_entries[vector].vector,
1148 handler, 0, adapter->name[vector],
1149 &(adapter->q_vector[vector]));
1150 if (err) {
1151 DPRINTK(PROBE, ERR,
1152 "request_irq failed for MSIX interrupt "
1153 "Error: %d\n", err);
1154 goto free_queue_irqs;
1158 sprintf(adapter->name[vector], "%s:lsc", netdev->name);
1159 err = request_irq(adapter->msix_entries[vector].vector,
1160 &ixgbe_msix_lsc, 0, adapter->name[vector], netdev);
1161 if (err) {
1162 DPRINTK(PROBE, ERR,
1163 "request_irq for msix_lsc failed: %d\n", err);
1164 goto free_queue_irqs;
1167 return 0;
1169 free_queue_irqs:
1170 for (i = vector - 1; i >= 0; i--)
1171 free_irq(adapter->msix_entries[--vector].vector,
1172 &(adapter->q_vector[i]));
1173 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
1174 pci_disable_msix(adapter->pdev);
1175 kfree(adapter->msix_entries);
1176 adapter->msix_entries = NULL;
1177 out:
1178 return err;
1181 static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
1183 struct ixgbe_hw *hw = &adapter->hw;
1184 struct ixgbe_q_vector *q_vector = adapter->q_vector;
1185 u8 current_itr;
1186 u32 new_itr = q_vector->eitr;
1187 struct ixgbe_ring *rx_ring = &adapter->rx_ring[0];
1188 struct ixgbe_ring *tx_ring = &adapter->tx_ring[0];
1190 q_vector->tx_eitr = ixgbe_update_itr(adapter, new_itr,
1191 q_vector->tx_eitr,
1192 tx_ring->total_packets,
1193 tx_ring->total_bytes);
1194 q_vector->rx_eitr = ixgbe_update_itr(adapter, new_itr,
1195 q_vector->rx_eitr,
1196 rx_ring->total_packets,
1197 rx_ring->total_bytes);
1199 current_itr = max(q_vector->rx_eitr, q_vector->tx_eitr);
1201 switch (current_itr) {
1202 /* counts and packets in update_itr are dependent on these numbers */
1203 case lowest_latency:
1204 new_itr = 100000;
1205 break;
1206 case low_latency:
1207 new_itr = 20000; /* aka hwitr = ~200 */
1208 break;
1209 case bulk_latency:
1210 new_itr = 8000;
1211 break;
1212 default:
1213 break;
1216 if (new_itr != q_vector->eitr) {
1217 u32 itr_reg;
1218 /* do an exponential smoothing */
1219 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
1220 q_vector->eitr = new_itr;
1221 itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr);
1222 /* must write high and low 16 bits to reset counter */
1223 IXGBE_WRITE_REG(hw, IXGBE_EITR(0), itr_reg | (itr_reg)<<16);
1226 return;
1229 static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter);
1232 * ixgbe_intr - legacy mode Interrupt Handler
1233 * @irq: interrupt number
1234 * @data: pointer to a network interface device structure
1235 * @pt_regs: CPU registers structure
1237 static irqreturn_t ixgbe_intr(int irq, void *data)
1239 struct net_device *netdev = data;
1240 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1241 struct ixgbe_hw *hw = &adapter->hw;
1242 u32 eicr;
1245 /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read
1246 * therefore no explict interrupt disable is necessary */
1247 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
1248 if (!eicr)
1249 return IRQ_NONE; /* Not our interrupt */
1251 if (eicr & IXGBE_EICR_LSC)
1252 ixgbe_check_lsc(adapter);
1254 if (netif_rx_schedule_prep(netdev, &adapter->q_vector[0].napi)) {
1255 adapter->tx_ring[0].total_packets = 0;
1256 adapter->tx_ring[0].total_bytes = 0;
1257 adapter->rx_ring[0].total_packets = 0;
1258 adapter->rx_ring[0].total_bytes = 0;
1259 /* would disable interrupts here but EIAM disabled it */
1260 __netif_rx_schedule(netdev, &adapter->q_vector[0].napi);
1263 return IRQ_HANDLED;
1266 static inline void ixgbe_reset_q_vectors(struct ixgbe_adapter *adapter)
1268 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1270 for (i = 0; i < q_vectors; i++) {
1271 struct ixgbe_q_vector *q_vector = &adapter->q_vector[i];
1272 bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES);
1273 bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES);
1274 q_vector->rxr_count = 0;
1275 q_vector->txr_count = 0;
1280 * ixgbe_request_irq - initialize interrupts
1281 * @adapter: board private structure
1283 * Attempts to configure interrupts using the best available
1284 * capabilities of the hardware and kernel.
1286 static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
1288 struct net_device *netdev = adapter->netdev;
1289 int err;
1291 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
1292 err = ixgbe_request_msix_irqs(adapter);
1293 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
1294 err = request_irq(adapter->pdev->irq, &ixgbe_intr, 0,
1295 netdev->name, netdev);
1296 } else {
1297 err = request_irq(adapter->pdev->irq, &ixgbe_intr, IRQF_SHARED,
1298 netdev->name, netdev);
1301 if (err)
1302 DPRINTK(PROBE, ERR, "request_irq failed, Error %d\n", err);
1304 return err;
1307 static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
1309 struct net_device *netdev = adapter->netdev;
1311 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
1312 int i, q_vectors;
1314 q_vectors = adapter->num_msix_vectors;
1316 i = q_vectors - 1;
1317 free_irq(adapter->msix_entries[i].vector, netdev);
1319 i--;
1320 for (; i >= 0; i--) {
1321 free_irq(adapter->msix_entries[i].vector,
1322 &(adapter->q_vector[i]));
1325 ixgbe_reset_q_vectors(adapter);
1326 } else {
1327 free_irq(adapter->pdev->irq, netdev);
1332 * ixgbe_irq_disable - Mask off interrupt generation on the NIC
1333 * @adapter: board private structure
1335 static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
1337 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
1338 IXGBE_WRITE_FLUSH(&adapter->hw);
1339 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
1340 int i;
1341 for (i = 0; i < adapter->num_msix_vectors; i++)
1342 synchronize_irq(adapter->msix_entries[i].vector);
1343 } else {
1344 synchronize_irq(adapter->pdev->irq);
1349 * ixgbe_irq_enable - Enable default interrupt generation settings
1350 * @adapter: board private structure
1352 static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
1354 u32 mask;
1355 mask = IXGBE_EIMS_ENABLE_MASK;
1356 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
1357 IXGBE_WRITE_FLUSH(&adapter->hw);
1361 * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts
1364 static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
1366 struct ixgbe_hw *hw = &adapter->hw;
1368 IXGBE_WRITE_REG(hw, IXGBE_EITR(0),
1369 EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr));
1371 ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(0), 0);
1372 ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(0), 0);
1374 map_vector_to_rxq(adapter, 0, 0);
1375 map_vector_to_txq(adapter, 0, 0);
1377 DPRINTK(HW, INFO, "Legacy interrupt IVAR setup done\n");
1381 * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset
1382 * @adapter: board private structure
1384 * Configure the Tx unit of the MAC after a reset.
1386 static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
1388 u64 tdba, tdwba;
1389 struct ixgbe_hw *hw = &adapter->hw;
1390 u32 i, j, tdlen, txctrl;
1392 /* Setup the HW Tx Head and Tail descriptor pointers */
1393 for (i = 0; i < adapter->num_tx_queues; i++) {
1394 struct ixgbe_ring *ring = &adapter->tx_ring[i];
1395 j = ring->reg_idx;
1396 tdba = ring->dma;
1397 tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc);
1398 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
1399 (tdba & DMA_32BIT_MASK));
1400 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
1401 tdwba = ring->dma +
1402 (ring->count * sizeof(union ixgbe_adv_tx_desc));
1403 tdwba |= IXGBE_TDWBAL_HEAD_WB_ENABLE;
1404 IXGBE_WRITE_REG(hw, IXGBE_TDWBAL(j), tdwba & DMA_32BIT_MASK);
1405 IXGBE_WRITE_REG(hw, IXGBE_TDWBAH(j), (tdwba >> 32));
1406 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), tdlen);
1407 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
1408 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
1409 adapter->tx_ring[i].head = IXGBE_TDH(j);
1410 adapter->tx_ring[i].tail = IXGBE_TDT(j);
1411 /* Disable Tx Head Writeback RO bit, since this hoses
1412 * bookkeeping if things aren't delivered in order.
1414 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
1415 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
1416 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
1420 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1422 static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, int index)
1424 struct ixgbe_ring *rx_ring;
1425 u32 srrctl;
1426 int queue0;
1427 unsigned long mask;
1429 /* program one srrctl register per VMDq index */
1430 if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) {
1431 long shift, len;
1432 mask = (unsigned long) adapter->ring_feature[RING_F_RSS].mask;
1433 len = sizeof(adapter->ring_feature[RING_F_VMDQ].mask) * 8;
1434 shift = find_first_bit(&mask, len);
1435 queue0 = index & mask;
1436 index = (index & mask) >> shift;
1437 /* program one srrctl per RSS queue since RDRXCTL.MVMEN is enabled */
1438 } else {
1439 mask = (unsigned long) adapter->ring_feature[RING_F_RSS].mask;
1440 queue0 = index & mask;
1441 index = index & mask;
1444 rx_ring = &adapter->rx_ring[queue0];
1446 srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(index));
1448 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1449 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1451 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
1452 srrctl |= IXGBE_RXBUFFER_2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1453 srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
1454 srrctl |= ((IXGBE_RX_HDR_SIZE <<
1455 IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
1456 IXGBE_SRRCTL_BSIZEHDR_MASK);
1457 } else {
1458 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1460 if (rx_ring->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE)
1461 srrctl |= IXGBE_RXBUFFER_2048 >>
1462 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1463 else
1464 srrctl |= rx_ring->rx_buf_len >>
1465 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1467 IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(index), srrctl);
1471 * ixgbe_get_skb_hdr - helper function for LRO header processing
1472 * @skb: pointer to sk_buff to be added to LRO packet
1473 * @iphdr: pointer to tcp header structure
1474 * @tcph: pointer to tcp header structure
1475 * @hdr_flags: pointer to header flags
1476 * @priv: private data
1478 static int ixgbe_get_skb_hdr(struct sk_buff *skb, void **iphdr, void **tcph,
1479 u64 *hdr_flags, void *priv)
1481 union ixgbe_adv_rx_desc *rx_desc = priv;
1483 /* Verify that this is a valid IPv4 TCP packet */
1484 if (!((ixgbe_get_pkt_info(rx_desc) & IXGBE_RXDADV_PKTTYPE_IPV4) &&
1485 (ixgbe_get_pkt_info(rx_desc) & IXGBE_RXDADV_PKTTYPE_TCP)))
1486 return -1;
1488 /* Set network headers */
1489 skb_reset_network_header(skb);
1490 skb_set_transport_header(skb, ip_hdrlen(skb));
1491 *iphdr = ip_hdr(skb);
1492 *tcph = tcp_hdr(skb);
1493 *hdr_flags = LRO_IPV4 | LRO_TCP;
1494 return 0;
1497 #define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
1498 (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
1501 * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset
1502 * @adapter: board private structure
1504 * Configure the Rx unit of the MAC after a reset.
1506 static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
1508 u64 rdba;
1509 struct ixgbe_hw *hw = &adapter->hw;
1510 struct net_device *netdev = adapter->netdev;
1511 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1512 int i, j;
1513 u32 rdlen, rxctrl, rxcsum;
1514 static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D,
1515 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE,
1516 0x6A3E67EA, 0x14364D17, 0x3BED200D};
1517 u32 fctrl, hlreg0;
1518 u32 pages;
1519 u32 reta = 0, mrqc;
1520 u32 rdrxctl;
1521 int rx_buf_len;
1523 /* Decide whether to use packet split mode or not */
1524 if (netdev->mtu > ETH_DATA_LEN)
1525 adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
1526 else
1527 adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
1529 /* Set the RX buffer length according to the mode */
1530 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
1531 rx_buf_len = IXGBE_RX_HDR_SIZE;
1532 } else {
1533 if (netdev->mtu <= ETH_DATA_LEN)
1534 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1535 else
1536 rx_buf_len = ALIGN(max_frame, 1024);
1539 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1540 fctrl |= IXGBE_FCTRL_BAM;
1541 fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */
1542 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
1544 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
1545 if (adapter->netdev->mtu <= ETH_DATA_LEN)
1546 hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
1547 else
1548 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
1549 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
1551 pages = PAGE_USE_COUNT(adapter->netdev->mtu);
1553 rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
1554 /* disable receives while setting up the descriptors */
1555 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1556 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
1558 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1559 * the Base and Length of the Rx Descriptor Ring */
1560 for (i = 0; i < adapter->num_rx_queues; i++) {
1561 rdba = adapter->rx_ring[i].dma;
1562 j = adapter->rx_ring[i].reg_idx;
1563 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), (rdba & DMA_32BIT_MASK));
1564 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
1565 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j), rdlen);
1566 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
1567 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
1568 adapter->rx_ring[i].head = IXGBE_RDH(j);
1569 adapter->rx_ring[i].tail = IXGBE_RDT(j);
1570 adapter->rx_ring[i].rx_buf_len = rx_buf_len;
1571 /* Intitial LRO Settings */
1572 adapter->rx_ring[i].lro_mgr.max_aggr = IXGBE_MAX_LRO_AGGREGATE;
1573 adapter->rx_ring[i].lro_mgr.max_desc = IXGBE_MAX_LRO_DESCRIPTORS;
1574 adapter->rx_ring[i].lro_mgr.get_skb_header = ixgbe_get_skb_hdr;
1575 adapter->rx_ring[i].lro_mgr.features = LRO_F_EXTRACT_VLAN_ID;
1576 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
1577 adapter->rx_ring[i].lro_mgr.features |= LRO_F_NAPI;
1578 adapter->rx_ring[i].lro_mgr.dev = adapter->netdev;
1579 adapter->rx_ring[i].lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
1580 adapter->rx_ring[i].lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
1582 ixgbe_configure_srrctl(adapter, j);
1586 * For VMDq support of different descriptor types or
1587 * buffer sizes through the use of multiple SRRCTL
1588 * registers, RDRXCTL.MVMEN must be set to 1
1590 * also, the manual doesn't mention it clearly but DCA hints
1591 * will only use queue 0's tags unless this bit is set. Side
1592 * effects of setting this bit are only that SRRCTL must be
1593 * fully programmed [0..15]
1595 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
1596 rdrxctl |= IXGBE_RDRXCTL_MVMEN;
1597 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
1600 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
1601 /* Fill out redirection table */
1602 for (i = 0, j = 0; i < 128; i++, j++) {
1603 if (j == adapter->ring_feature[RING_F_RSS].indices)
1604 j = 0;
1605 /* reta = 4-byte sliding window of
1606 * 0x00..(indices-1)(indices-1)00..etc. */
1607 reta = (reta << 8) | (j * 0x11);
1608 if ((i & 3) == 3)
1609 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
1612 /* Fill out hash function seeds */
1613 for (i = 0; i < 10; i++)
1614 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]);
1616 mrqc = IXGBE_MRQC_RSSEN
1617 /* Perform hash on these packet types */
1618 | IXGBE_MRQC_RSS_FIELD_IPV4
1619 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
1620 | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
1621 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
1622 | IXGBE_MRQC_RSS_FIELD_IPV6_EX
1623 | IXGBE_MRQC_RSS_FIELD_IPV6
1624 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
1625 | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
1626 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
1627 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
1630 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
1632 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED ||
1633 adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED) {
1634 /* Disable indicating checksum in descriptor, enables
1635 * RSS hash */
1636 rxcsum |= IXGBE_RXCSUM_PCSD;
1638 if (!(rxcsum & IXGBE_RXCSUM_PCSD)) {
1639 /* Enable IPv4 payload checksum for UDP fragments
1640 * if PCSD is not set */
1641 rxcsum |= IXGBE_RXCSUM_IPPCSE;
1644 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
1647 static void ixgbe_vlan_rx_register(struct net_device *netdev,
1648 struct vlan_group *grp)
1650 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1651 u32 ctrl;
1653 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1654 ixgbe_irq_disable(adapter);
1655 adapter->vlgrp = grp;
1657 if (grp) {
1658 /* enable VLAN tag insert/strip */
1659 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
1660 ctrl |= IXGBE_VLNCTRL_VME;
1661 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
1662 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
1665 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1666 ixgbe_irq_enable(adapter);
1669 static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1671 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1673 /* add VID to filter table */
1674 ixgbe_set_vfta(&adapter->hw, vid, 0, true);
1677 static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1679 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1681 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1682 ixgbe_irq_disable(adapter);
1684 vlan_group_set_device(adapter->vlgrp, vid, NULL);
1686 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1687 ixgbe_irq_enable(adapter);
1689 /* remove VID from filter table */
1690 ixgbe_set_vfta(&adapter->hw, vid, 0, false);
1693 static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
1695 ixgbe_vlan_rx_register(adapter->netdev, adapter->vlgrp);
1697 if (adapter->vlgrp) {
1698 u16 vid;
1699 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
1700 if (!vlan_group_get_device(adapter->vlgrp, vid))
1701 continue;
1702 ixgbe_vlan_rx_add_vid(adapter->netdev, vid);
1707 static u8 *ixgbe_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr, u32 *vmdq)
1709 struct dev_mc_list *mc_ptr;
1710 u8 *addr = *mc_addr_ptr;
1711 *vmdq = 0;
1713 mc_ptr = container_of(addr, struct dev_mc_list, dmi_addr[0]);
1714 if (mc_ptr->next)
1715 *mc_addr_ptr = mc_ptr->next->dmi_addr;
1716 else
1717 *mc_addr_ptr = NULL;
1719 return addr;
1723 * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set
1724 * @netdev: network interface device structure
1726 * The set_rx_method entry point is called whenever the unicast/multicast
1727 * address list or the network interface flags are updated. This routine is
1728 * responsible for configuring the hardware for proper unicast, multicast and
1729 * promiscuous mode.
1731 static void ixgbe_set_rx_mode(struct net_device *netdev)
1733 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1734 struct ixgbe_hw *hw = &adapter->hw;
1735 u32 fctrl, vlnctrl;
1736 u8 *addr_list = NULL;
1737 int addr_count = 0;
1739 /* Check for Promiscuous and All Multicast modes */
1741 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1742 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1744 if (netdev->flags & IFF_PROMISC) {
1745 hw->addr_ctrl.user_set_promisc = 1;
1746 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1747 vlnctrl &= ~IXGBE_VLNCTRL_VFE;
1748 } else {
1749 if (netdev->flags & IFF_ALLMULTI) {
1750 fctrl |= IXGBE_FCTRL_MPE;
1751 fctrl &= ~IXGBE_FCTRL_UPE;
1752 } else {
1753 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1755 vlnctrl |= IXGBE_VLNCTRL_VFE;
1756 hw->addr_ctrl.user_set_promisc = 0;
1759 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
1760 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
1762 /* reprogram secondary unicast list */
1763 addr_count = netdev->uc_count;
1764 if (addr_count)
1765 addr_list = netdev->uc_list->dmi_addr;
1766 ixgbe_update_uc_addr_list(hw, addr_list, addr_count,
1767 ixgbe_addr_list_itr);
1769 /* reprogram multicast list */
1770 addr_count = netdev->mc_count;
1771 if (addr_count)
1772 addr_list = netdev->mc_list->dmi_addr;
1773 ixgbe_update_mc_addr_list(hw, addr_list, addr_count,
1774 ixgbe_addr_list_itr);
1777 static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
1779 int q_idx;
1780 struct ixgbe_q_vector *q_vector;
1781 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1783 /* legacy and MSI only use one vector */
1784 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
1785 q_vectors = 1;
1787 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1788 q_vector = &adapter->q_vector[q_idx];
1789 if (!q_vector->rxr_count)
1790 continue;
1791 napi_enable(&q_vector->napi);
1795 static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
1797 int q_idx;
1798 struct ixgbe_q_vector *q_vector;
1799 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1801 /* legacy and MSI only use one vector */
1802 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
1803 q_vectors = 1;
1805 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1806 q_vector = &adapter->q_vector[q_idx];
1807 if (!q_vector->rxr_count)
1808 continue;
1809 napi_disable(&q_vector->napi);
1813 static void ixgbe_configure(struct ixgbe_adapter *adapter)
1815 struct net_device *netdev = adapter->netdev;
1816 int i;
1818 ixgbe_set_rx_mode(netdev);
1820 ixgbe_restore_vlan(adapter);
1822 ixgbe_configure_tx(adapter);
1823 ixgbe_configure_rx(adapter);
1824 for (i = 0; i < adapter->num_rx_queues; i++)
1825 ixgbe_alloc_rx_buffers(adapter, &adapter->rx_ring[i],
1826 (adapter->rx_ring[i].count - 1));
1829 static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
1831 struct net_device *netdev = adapter->netdev;
1832 struct ixgbe_hw *hw = &adapter->hw;
1833 int i, j = 0;
1834 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1835 u32 txdctl, rxdctl, mhadd;
1836 u32 gpie;
1838 ixgbe_get_hw_control(adapter);
1840 if ((adapter->flags & IXGBE_FLAG_MSIX_ENABLED) ||
1841 (adapter->flags & IXGBE_FLAG_MSI_ENABLED)) {
1842 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
1843 gpie = (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME |
1844 IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD);
1845 } else {
1846 /* MSI only */
1847 gpie = 0;
1849 /* XXX: to interrupt immediately for EICS writes, enable this */
1850 /* gpie |= IXGBE_GPIE_EIMEN; */
1851 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
1854 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
1855 /* legacy interrupts, use EIAM to auto-mask when reading EICR,
1856 * specifically only auto mask tx and rx interrupts */
1857 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
1860 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
1861 if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
1862 mhadd &= ~IXGBE_MHADD_MFS_MASK;
1863 mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT;
1865 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
1868 for (i = 0; i < adapter->num_tx_queues; i++) {
1869 j = adapter->tx_ring[i].reg_idx;
1870 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
1871 /* enable WTHRESH=8 descriptors, to encourage burst writeback */
1872 txdctl |= (8 << 16);
1873 txdctl |= IXGBE_TXDCTL_ENABLE;
1874 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
1877 for (i = 0; i < adapter->num_rx_queues; i++) {
1878 j = adapter->rx_ring[i].reg_idx;
1879 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
1880 /* enable PTHRESH=32 descriptors (half the internal cache)
1881 * and HTHRESH=0 descriptors (to minimize latency on fetch),
1882 * this also removes a pesky rx_no_buffer_count increment */
1883 rxdctl |= 0x0020;
1884 rxdctl |= IXGBE_RXDCTL_ENABLE;
1885 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), rxdctl);
1887 /* enable all receives */
1888 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1889 rxdctl |= (IXGBE_RXCTRL_DMBYPS | IXGBE_RXCTRL_RXEN);
1890 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxdctl);
1892 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
1893 ixgbe_configure_msix(adapter);
1894 else
1895 ixgbe_configure_msi_and_legacy(adapter);
1897 clear_bit(__IXGBE_DOWN, &adapter->state);
1898 ixgbe_napi_enable_all(adapter);
1900 /* clear any pending interrupts, may auto mask */
1901 IXGBE_READ_REG(hw, IXGBE_EICR);
1903 ixgbe_irq_enable(adapter);
1905 /* bring the link up in the watchdog, this could race with our first
1906 * link up interrupt but shouldn't be a problem */
1907 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
1908 adapter->link_check_timeout = jiffies;
1909 mod_timer(&adapter->watchdog_timer, jiffies);
1910 return 0;
1913 void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
1915 WARN_ON(in_interrupt());
1916 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
1917 msleep(1);
1918 ixgbe_down(adapter);
1919 ixgbe_up(adapter);
1920 clear_bit(__IXGBE_RESETTING, &adapter->state);
1923 int ixgbe_up(struct ixgbe_adapter *adapter)
1925 /* hardware has been reset, we need to reload some things */
1926 ixgbe_configure(adapter);
1928 return ixgbe_up_complete(adapter);
1931 void ixgbe_reset(struct ixgbe_adapter *adapter)
1933 if (ixgbe_init_hw(&adapter->hw))
1934 DPRINTK(PROBE, ERR, "Hardware Error\n");
1936 /* reprogram the RAR[0] in case user changed it. */
1937 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
1941 #ifdef CONFIG_PM
1942 static int ixgbe_resume(struct pci_dev *pdev)
1944 struct net_device *netdev = pci_get_drvdata(pdev);
1945 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1946 u32 err;
1948 pci_set_power_state(pdev, PCI_D0);
1949 pci_restore_state(pdev);
1950 err = pci_enable_device(pdev);
1951 if (err) {
1952 printk(KERN_ERR "ixgbe: Cannot enable PCI device from " \
1953 "suspend\n");
1954 return err;
1956 pci_set_master(pdev);
1958 pci_enable_wake(pdev, PCI_D3hot, 0);
1959 pci_enable_wake(pdev, PCI_D3cold, 0);
1961 if (netif_running(netdev)) {
1962 err = ixgbe_request_irq(adapter);
1963 if (err)
1964 return err;
1967 ixgbe_reset(adapter);
1969 if (netif_running(netdev))
1970 ixgbe_up(adapter);
1972 netif_device_attach(netdev);
1974 return 0;
1976 #endif
1979 * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
1980 * @adapter: board private structure
1981 * @rx_ring: ring to free buffers from
1983 static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
1984 struct ixgbe_ring *rx_ring)
1986 struct pci_dev *pdev = adapter->pdev;
1987 unsigned long size;
1988 unsigned int i;
1990 /* Free all the Rx ring sk_buffs */
1992 for (i = 0; i < rx_ring->count; i++) {
1993 struct ixgbe_rx_buffer *rx_buffer_info;
1995 rx_buffer_info = &rx_ring->rx_buffer_info[i];
1996 if (rx_buffer_info->dma) {
1997 pci_unmap_single(pdev, rx_buffer_info->dma,
1998 rx_ring->rx_buf_len,
1999 PCI_DMA_FROMDEVICE);
2000 rx_buffer_info->dma = 0;
2002 if (rx_buffer_info->skb) {
2003 dev_kfree_skb(rx_buffer_info->skb);
2004 rx_buffer_info->skb = NULL;
2006 if (!rx_buffer_info->page)
2007 continue;
2008 pci_unmap_page(pdev, rx_buffer_info->page_dma, PAGE_SIZE,
2009 PCI_DMA_FROMDEVICE);
2010 rx_buffer_info->page_dma = 0;
2012 put_page(rx_buffer_info->page);
2013 rx_buffer_info->page = NULL;
2016 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
2017 memset(rx_ring->rx_buffer_info, 0, size);
2019 /* Zero out the descriptor ring */
2020 memset(rx_ring->desc, 0, rx_ring->size);
2022 rx_ring->next_to_clean = 0;
2023 rx_ring->next_to_use = 0;
2025 writel(0, adapter->hw.hw_addr + rx_ring->head);
2026 writel(0, adapter->hw.hw_addr + rx_ring->tail);
2030 * ixgbe_clean_tx_ring - Free Tx Buffers
2031 * @adapter: board private structure
2032 * @tx_ring: ring to be cleaned
2034 static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter,
2035 struct ixgbe_ring *tx_ring)
2037 struct ixgbe_tx_buffer *tx_buffer_info;
2038 unsigned long size;
2039 unsigned int i;
2041 /* Free all the Tx ring sk_buffs */
2043 for (i = 0; i < tx_ring->count; i++) {
2044 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2045 ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
2048 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
2049 memset(tx_ring->tx_buffer_info, 0, size);
2051 /* Zero out the descriptor ring */
2052 memset(tx_ring->desc, 0, tx_ring->size);
2054 tx_ring->next_to_use = 0;
2055 tx_ring->next_to_clean = 0;
2057 writel(0, adapter->hw.hw_addr + tx_ring->head);
2058 writel(0, adapter->hw.hw_addr + tx_ring->tail);
2062 * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues
2063 * @adapter: board private structure
2065 static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
2067 int i;
2069 for (i = 0; i < adapter->num_rx_queues; i++)
2070 ixgbe_clean_rx_ring(adapter, &adapter->rx_ring[i]);
2074 * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues
2075 * @adapter: board private structure
2077 static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
2079 int i;
2081 for (i = 0; i < adapter->num_tx_queues; i++)
2082 ixgbe_clean_tx_ring(adapter, &adapter->tx_ring[i]);
2085 void ixgbe_down(struct ixgbe_adapter *adapter)
2087 struct net_device *netdev = adapter->netdev;
2088 u32 rxctrl;
2090 /* signal that we are down to the interrupt handler */
2091 set_bit(__IXGBE_DOWN, &adapter->state);
2093 /* disable receives */
2094 rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL);
2095 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL,
2096 rxctrl & ~IXGBE_RXCTRL_RXEN);
2098 netif_tx_disable(netdev);
2100 /* disable transmits in the hardware */
2102 /* flush both disables */
2103 IXGBE_WRITE_FLUSH(&adapter->hw);
2104 msleep(10);
2106 ixgbe_irq_disable(adapter);
2108 ixgbe_napi_disable_all(adapter);
2109 del_timer_sync(&adapter->watchdog_timer);
2110 cancel_work_sync(&adapter->watchdog_task);
2112 netif_carrier_off(netdev);
2113 netif_tx_stop_all_queues(netdev);
2115 #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
2116 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
2117 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
2118 dca_remove_requester(&adapter->pdev->dev);
2121 #endif
2122 if (!pci_channel_offline(adapter->pdev))
2123 ixgbe_reset(adapter);
2124 ixgbe_clean_all_tx_rings(adapter);
2125 ixgbe_clean_all_rx_rings(adapter);
2127 #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
2128 /* since we reset the hardware DCA settings were cleared */
2129 if (dca_add_requester(&adapter->pdev->dev) == 0) {
2130 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
2131 /* always use CB2 mode, difference is masked
2132 * in the CB driver */
2133 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
2134 ixgbe_setup_dca(adapter);
2136 #endif
2139 static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
2141 struct net_device *netdev = pci_get_drvdata(pdev);
2142 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2143 #ifdef CONFIG_PM
2144 int retval = 0;
2145 #endif
2147 netif_device_detach(netdev);
2149 if (netif_running(netdev)) {
2150 ixgbe_down(adapter);
2151 ixgbe_free_irq(adapter);
2154 #ifdef CONFIG_PM
2155 retval = pci_save_state(pdev);
2156 if (retval)
2157 return retval;
2158 #endif
2160 pci_enable_wake(pdev, PCI_D3hot, 0);
2161 pci_enable_wake(pdev, PCI_D3cold, 0);
2163 ixgbe_release_hw_control(adapter);
2165 pci_disable_device(pdev);
2167 pci_set_power_state(pdev, pci_choose_state(pdev, state));
2169 return 0;
2172 static void ixgbe_shutdown(struct pci_dev *pdev)
2174 ixgbe_suspend(pdev, PMSG_SUSPEND);
2178 * ixgbe_poll - NAPI Rx polling callback
2179 * @napi: structure for representing this polling device
2180 * @budget: how many packets driver is allowed to clean
2182 * This function is used for legacy and MSI, NAPI mode
2184 static int ixgbe_poll(struct napi_struct *napi, int budget)
2186 struct ixgbe_q_vector *q_vector = container_of(napi,
2187 struct ixgbe_q_vector, napi);
2188 struct ixgbe_adapter *adapter = q_vector->adapter;
2189 int tx_cleaned = 0, work_done = 0;
2191 #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
2192 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
2193 ixgbe_update_tx_dca(adapter, adapter->tx_ring);
2194 ixgbe_update_rx_dca(adapter, adapter->rx_ring);
2196 #endif
2198 tx_cleaned = ixgbe_clean_tx_irq(adapter, adapter->tx_ring);
2199 ixgbe_clean_rx_irq(adapter, adapter->rx_ring, &work_done, budget);
2201 if (tx_cleaned)
2202 work_done = budget;
2204 /* If budget not fully consumed, exit the polling mode */
2205 if (work_done < budget) {
2206 netif_rx_complete(adapter->netdev, napi);
2207 if (adapter->rx_eitr < IXGBE_MIN_ITR_USECS)
2208 ixgbe_set_itr(adapter);
2209 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2210 ixgbe_irq_enable(adapter);
2213 return work_done;
2217 * ixgbe_tx_timeout - Respond to a Tx Hang
2218 * @netdev: network interface device structure
2220 static void ixgbe_tx_timeout(struct net_device *netdev)
2222 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2224 /* Do the reset outside of interrupt context */
2225 schedule_work(&adapter->reset_task);
2228 static void ixgbe_reset_task(struct work_struct *work)
2230 struct ixgbe_adapter *adapter;
2231 adapter = container_of(work, struct ixgbe_adapter, reset_task);
2233 adapter->tx_timeout_count++;
2235 ixgbe_reinit_locked(adapter);
2238 static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
2239 int vectors)
2241 int err, vector_threshold;
2243 /* We'll want at least 3 (vector_threshold):
2244 * 1) TxQ[0] Cleanup
2245 * 2) RxQ[0] Cleanup
2246 * 3) Other (Link Status Change, etc.)
2247 * 4) TCP Timer (optional)
2249 vector_threshold = MIN_MSIX_COUNT;
2251 /* The more we get, the more we will assign to Tx/Rx Cleanup
2252 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
2253 * Right now, we simply care about how many we'll get; we'll
2254 * set them up later while requesting irq's.
2256 while (vectors >= vector_threshold) {
2257 err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2258 vectors);
2259 if (!err) /* Success in acquiring all requested vectors. */
2260 break;
2261 else if (err < 0)
2262 vectors = 0; /* Nasty failure, quit now */
2263 else /* err == number of vectors we should try again with */
2264 vectors = err;
2267 if (vectors < vector_threshold) {
2268 /* Can't allocate enough MSI-X interrupts? Oh well.
2269 * This just means we'll go with either a single MSI
2270 * vector or fall back to legacy interrupts.
2272 DPRINTK(HW, DEBUG, "Unable to allocate MSI-X interrupts\n");
2273 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
2274 kfree(adapter->msix_entries);
2275 adapter->msix_entries = NULL;
2276 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
2277 adapter->num_tx_queues = 1;
2278 adapter->num_rx_queues = 1;
2279 } else {
2280 adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */
2281 adapter->num_msix_vectors = vectors;
2285 static void __devinit ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
2287 int nrq, ntq;
2288 int feature_mask = 0, rss_i, rss_m;
2290 /* Number of supported queues */
2291 switch (adapter->hw.mac.type) {
2292 case ixgbe_mac_82598EB:
2293 rss_i = adapter->ring_feature[RING_F_RSS].indices;
2294 rss_m = 0;
2295 feature_mask |= IXGBE_FLAG_RSS_ENABLED;
2297 switch (adapter->flags & feature_mask) {
2298 case (IXGBE_FLAG_RSS_ENABLED):
2299 rss_m = 0xF;
2300 nrq = rss_i;
2301 ntq = rss_i;
2302 break;
2303 case 0:
2304 default:
2305 rss_i = 0;
2306 rss_m = 0;
2307 nrq = 1;
2308 ntq = 1;
2309 break;
2312 adapter->ring_feature[RING_F_RSS].indices = rss_i;
2313 adapter->ring_feature[RING_F_RSS].mask = rss_m;
2314 break;
2315 default:
2316 nrq = 1;
2317 ntq = 1;
2318 break;
2321 adapter->num_rx_queues = nrq;
2322 adapter->num_tx_queues = ntq;
2326 * ixgbe_cache_ring_register - Descriptor ring to register mapping
2327 * @adapter: board private structure to initialize
2329 * Once we know the feature-set enabled for the device, we'll cache
2330 * the register offset the descriptor ring is assigned to.
2332 static void __devinit ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
2334 /* TODO: Remove all uses of the indices in the cases where multiple
2335 * features are OR'd together, if the feature set makes sense.
2337 int feature_mask = 0, rss_i;
2338 int i, txr_idx, rxr_idx;
2340 /* Number of supported queues */
2341 switch (adapter->hw.mac.type) {
2342 case ixgbe_mac_82598EB:
2343 rss_i = adapter->ring_feature[RING_F_RSS].indices;
2344 txr_idx = 0;
2345 rxr_idx = 0;
2346 feature_mask |= IXGBE_FLAG_RSS_ENABLED;
2347 switch (adapter->flags & feature_mask) {
2348 case (IXGBE_FLAG_RSS_ENABLED):
2349 for (i = 0; i < adapter->num_rx_queues; i++)
2350 adapter->rx_ring[i].reg_idx = i;
2351 for (i = 0; i < adapter->num_tx_queues; i++)
2352 adapter->tx_ring[i].reg_idx = i;
2353 break;
2354 case 0:
2355 default:
2356 break;
2358 break;
2359 default:
2360 break;
2365 * ixgbe_alloc_queues - Allocate memory for all rings
2366 * @adapter: board private structure to initialize
2368 * We allocate one ring per queue at run-time since we don't know the
2369 * number of queues at compile-time. The polling_netdev array is
2370 * intended for Multiqueue, but should work fine with a single queue.
2372 static int __devinit ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
2374 int i;
2376 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
2377 sizeof(struct ixgbe_ring), GFP_KERNEL);
2378 if (!adapter->tx_ring)
2379 goto err_tx_ring_allocation;
2381 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
2382 sizeof(struct ixgbe_ring), GFP_KERNEL);
2383 if (!adapter->rx_ring)
2384 goto err_rx_ring_allocation;
2386 for (i = 0; i < adapter->num_tx_queues; i++) {
2387 adapter->tx_ring[i].count = IXGBE_DEFAULT_TXD;
2388 adapter->tx_ring[i].queue_index = i;
2390 for (i = 0; i < adapter->num_rx_queues; i++) {
2391 adapter->rx_ring[i].count = IXGBE_DEFAULT_RXD;
2392 adapter->rx_ring[i].queue_index = i;
2395 ixgbe_cache_ring_register(adapter);
2397 return 0;
2399 err_rx_ring_allocation:
2400 kfree(adapter->tx_ring);
2401 err_tx_ring_allocation:
2402 return -ENOMEM;
2406 * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported
2407 * @adapter: board private structure to initialize
2409 * Attempt to configure the interrupts using the best available
2410 * capabilities of the hardware and the kernel.
2412 static int __devinit ixgbe_set_interrupt_capability(struct ixgbe_adapter
2413 *adapter)
2415 int err = 0;
2416 int vector, v_budget;
2419 * Set the default interrupt throttle rate.
2421 adapter->rx_eitr = (1000000 / IXGBE_DEFAULT_ITR_RX_USECS);
2422 adapter->tx_eitr = (1000000 / IXGBE_DEFAULT_ITR_TX_USECS);
2425 * It's easy to be greedy for MSI-X vectors, but it really
2426 * doesn't do us much good if we have a lot more vectors
2427 * than CPU's. So let's be conservative and only ask for
2428 * (roughly) twice the number of vectors as there are CPU's.
2430 v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues,
2431 (int)(num_online_cpus() * 2)) + NON_Q_VECTORS;
2434 * At the same time, hardware can only support a maximum of
2435 * MAX_MSIX_COUNT vectors. With features such as RSS and VMDq,
2436 * we can easily reach upwards of 64 Rx descriptor queues and
2437 * 32 Tx queues. Thus, we cap it off in those rare cases where
2438 * the cpu count also exceeds our vector limit.
2440 v_budget = min(v_budget, MAX_MSIX_COUNT);
2442 /* A failure in MSI-X entry allocation isn't fatal, but it does
2443 * mean we disable MSI-X capabilities of the adapter. */
2444 adapter->msix_entries = kcalloc(v_budget,
2445 sizeof(struct msix_entry), GFP_KERNEL);
2446 if (!adapter->msix_entries) {
2447 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
2448 ixgbe_set_num_queues(adapter);
2449 kfree(adapter->tx_ring);
2450 kfree(adapter->rx_ring);
2451 err = ixgbe_alloc_queues(adapter);
2452 if (err) {
2453 DPRINTK(PROBE, ERR, "Unable to allocate memory "
2454 "for queues\n");
2455 goto out;
2458 goto try_msi;
2461 for (vector = 0; vector < v_budget; vector++)
2462 adapter->msix_entries[vector].entry = vector;
2464 ixgbe_acquire_msix_vectors(adapter, v_budget);
2466 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
2467 goto out;
2469 try_msi:
2470 err = pci_enable_msi(adapter->pdev);
2471 if (!err) {
2472 adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
2473 } else {
2474 DPRINTK(HW, DEBUG, "Unable to allocate MSI interrupt, "
2475 "falling back to legacy. Error: %d\n", err);
2476 /* reset err */
2477 err = 0;
2480 out:
2481 /* Notify the stack of the (possibly) reduced Tx Queue count. */
2482 adapter->netdev->real_num_tx_queues = adapter->num_tx_queues;
2484 return err;
2487 static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
2489 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
2490 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
2491 pci_disable_msix(adapter->pdev);
2492 kfree(adapter->msix_entries);
2493 adapter->msix_entries = NULL;
2494 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
2495 adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
2496 pci_disable_msi(adapter->pdev);
2498 return;
2502 * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme
2503 * @adapter: board private structure to initialize
2505 * We determine which interrupt scheme to use based on...
2506 * - Kernel support (MSI, MSI-X)
2507 * - which can be user-defined (via MODULE_PARAM)
2508 * - Hardware queue count (num_*_queues)
2509 * - defined by miscellaneous hardware support/features (RSS, etc.)
2511 static int __devinit ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
2513 int err;
2515 /* Number of supported queues */
2516 ixgbe_set_num_queues(adapter);
2518 err = ixgbe_alloc_queues(adapter);
2519 if (err) {
2520 DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
2521 goto err_alloc_queues;
2524 err = ixgbe_set_interrupt_capability(adapter);
2525 if (err) {
2526 DPRINTK(PROBE, ERR, "Unable to setup interrupt capabilities\n");
2527 goto err_set_interrupt;
2530 DPRINTK(DRV, INFO, "Multiqueue %s: Rx Queue count = %u, "
2531 "Tx Queue count = %u\n",
2532 (adapter->num_rx_queues > 1) ? "Enabled" :
2533 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
2535 set_bit(__IXGBE_DOWN, &adapter->state);
2537 return 0;
2539 err_set_interrupt:
2540 kfree(adapter->tx_ring);
2541 kfree(adapter->rx_ring);
2542 err_alloc_queues:
2543 return err;
2547 * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter)
2548 * @adapter: board private structure to initialize
2550 * ixgbe_sw_init initializes the Adapter private data structure.
2551 * Fields are initialized based on PCI device information and
2552 * OS network device settings (MTU size).
2554 static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
2556 struct ixgbe_hw *hw = &adapter->hw;
2557 struct pci_dev *pdev = adapter->pdev;
2558 unsigned int rss;
2560 /* Set capability flags */
2561 rss = min(IXGBE_MAX_RSS_INDICES, (int)num_online_cpus());
2562 adapter->ring_feature[RING_F_RSS].indices = rss;
2563 adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
2565 /* Enable Dynamic interrupt throttling by default */
2566 adapter->rx_eitr = 1;
2567 adapter->tx_eitr = 1;
2569 /* default flow control settings */
2570 hw->fc.original_type = ixgbe_fc_none;
2571 hw->fc.type = ixgbe_fc_none;
2572 hw->fc.high_water = IXGBE_DEFAULT_FCRTH;
2573 hw->fc.low_water = IXGBE_DEFAULT_FCRTL;
2574 hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
2575 hw->fc.send_xon = true;
2577 /* select 10G link by default */
2578 hw->mac.link_mode_select = IXGBE_AUTOC_LMS_10G_LINK_NO_AN;
2579 if (hw->mac.ops.reset(hw)) {
2580 dev_err(&pdev->dev, "HW Init failed\n");
2581 return -EIO;
2583 if (hw->mac.ops.setup_link_speed(hw, IXGBE_LINK_SPEED_10GB_FULL, true,
2584 false)) {
2585 dev_err(&pdev->dev, "Link Speed setup failed\n");
2586 return -EIO;
2589 /* initialize eeprom parameters */
2590 if (ixgbe_init_eeprom(hw)) {
2591 dev_err(&pdev->dev, "EEPROM initialization failed\n");
2592 return -EIO;
2595 /* enable rx csum by default */
2596 adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
2598 set_bit(__IXGBE_DOWN, &adapter->state);
2600 return 0;
2604 * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors)
2605 * @adapter: board private structure
2606 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2608 * Return 0 on success, negative on failure
2610 int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
2611 struct ixgbe_ring *tx_ring)
2613 struct pci_dev *pdev = adapter->pdev;
2614 int size;
2616 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
2617 tx_ring->tx_buffer_info = vmalloc(size);
2618 if (!tx_ring->tx_buffer_info)
2619 goto err;
2620 memset(tx_ring->tx_buffer_info, 0, size);
2622 /* round up to nearest 4K */
2623 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc) +
2624 sizeof(u32);
2625 tx_ring->size = ALIGN(tx_ring->size, 4096);
2627 tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
2628 &tx_ring->dma);
2629 if (!tx_ring->desc)
2630 goto err;
2632 tx_ring->next_to_use = 0;
2633 tx_ring->next_to_clean = 0;
2634 tx_ring->work_limit = tx_ring->count;
2635 return 0;
2637 err:
2638 vfree(tx_ring->tx_buffer_info);
2639 tx_ring->tx_buffer_info = NULL;
2640 DPRINTK(PROBE, ERR, "Unable to allocate memory for the transmit "
2641 "descriptor ring\n");
2642 return -ENOMEM;
2646 * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
2647 * @adapter: board private structure
2648 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2650 * Returns 0 on success, negative on failure
2652 int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
2653 struct ixgbe_ring *rx_ring)
2655 struct pci_dev *pdev = adapter->pdev;
2656 int size;
2658 size = sizeof(struct net_lro_desc) * IXGBE_MAX_LRO_DESCRIPTORS;
2659 rx_ring->lro_mgr.lro_arr = vmalloc(size);
2660 if (!rx_ring->lro_mgr.lro_arr)
2661 return -ENOMEM;
2662 memset(rx_ring->lro_mgr.lro_arr, 0, size);
2664 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
2665 rx_ring->rx_buffer_info = vmalloc(size);
2666 if (!rx_ring->rx_buffer_info) {
2667 DPRINTK(PROBE, ERR,
2668 "vmalloc allocation failed for the rx desc ring\n");
2669 goto alloc_failed;
2671 memset(rx_ring->rx_buffer_info, 0, size);
2673 /* Round up to nearest 4K */
2674 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
2675 rx_ring->size = ALIGN(rx_ring->size, 4096);
2677 rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size, &rx_ring->dma);
2679 if (!rx_ring->desc) {
2680 DPRINTK(PROBE, ERR,
2681 "Memory allocation failed for the rx desc ring\n");
2682 vfree(rx_ring->rx_buffer_info);
2683 goto alloc_failed;
2686 rx_ring->next_to_clean = 0;
2687 rx_ring->next_to_use = 0;
2689 return 0;
2691 alloc_failed:
2692 vfree(rx_ring->lro_mgr.lro_arr);
2693 rx_ring->lro_mgr.lro_arr = NULL;
2694 return -ENOMEM;
2698 * ixgbe_free_tx_resources - Free Tx Resources per Queue
2699 * @adapter: board private structure
2700 * @tx_ring: Tx descriptor ring for a specific queue
2702 * Free all transmit software resources
2704 static void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter,
2705 struct ixgbe_ring *tx_ring)
2707 struct pci_dev *pdev = adapter->pdev;
2709 ixgbe_clean_tx_ring(adapter, tx_ring);
2711 vfree(tx_ring->tx_buffer_info);
2712 tx_ring->tx_buffer_info = NULL;
2714 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
2716 tx_ring->desc = NULL;
2720 * ixgbe_free_all_tx_resources - Free Tx Resources for All Queues
2721 * @adapter: board private structure
2723 * Free all transmit software resources
2725 static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
2727 int i;
2729 for (i = 0; i < adapter->num_tx_queues; i++)
2730 ixgbe_free_tx_resources(adapter, &adapter->tx_ring[i]);
2734 * ixgbe_free_rx_resources - Free Rx Resources
2735 * @adapter: board private structure
2736 * @rx_ring: ring to clean the resources from
2738 * Free all receive software resources
2740 static void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter,
2741 struct ixgbe_ring *rx_ring)
2743 struct pci_dev *pdev = adapter->pdev;
2745 vfree(rx_ring->lro_mgr.lro_arr);
2746 rx_ring->lro_mgr.lro_arr = NULL;
2748 ixgbe_clean_rx_ring(adapter, rx_ring);
2750 vfree(rx_ring->rx_buffer_info);
2751 rx_ring->rx_buffer_info = NULL;
2753 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
2755 rx_ring->desc = NULL;
2759 * ixgbe_free_all_rx_resources - Free Rx Resources for All Queues
2760 * @adapter: board private structure
2762 * Free all receive software resources
2764 static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
2766 int i;
2768 for (i = 0; i < adapter->num_rx_queues; i++)
2769 ixgbe_free_rx_resources(adapter, &adapter->rx_ring[i]);
2773 * ixgbe_setup_all_tx_resources - allocate all queues Tx resources
2774 * @adapter: board private structure
2776 * If this function returns with an error, then it's possible one or
2777 * more of the rings is populated (while the rest are not). It is the
2778 * callers duty to clean those orphaned rings.
2780 * Return 0 on success, negative on failure
2782 static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
2784 int i, err = 0;
2786 for (i = 0; i < adapter->num_tx_queues; i++) {
2787 err = ixgbe_setup_tx_resources(adapter, &adapter->tx_ring[i]);
2788 if (err) {
2789 DPRINTK(PROBE, ERR,
2790 "Allocation for Tx Queue %u failed\n", i);
2791 break;
2795 return err;
2799 * ixgbe_setup_all_rx_resources - allocate all queues Rx resources
2800 * @adapter: board private structure
2802 * If this function returns with an error, then it's possible one or
2803 * more of the rings is populated (while the rest are not). It is the
2804 * callers duty to clean those orphaned rings.
2806 * Return 0 on success, negative on failure
2809 static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
2811 int i, err = 0;
2813 for (i = 0; i < adapter->num_rx_queues; i++) {
2814 err = ixgbe_setup_rx_resources(adapter, &adapter->rx_ring[i]);
2815 if (err) {
2816 DPRINTK(PROBE, ERR,
2817 "Allocation for Rx Queue %u failed\n", i);
2818 break;
2822 return err;
2826 * ixgbe_change_mtu - Change the Maximum Transfer Unit
2827 * @netdev: network interface device structure
2828 * @new_mtu: new value for maximum frame size
2830 * Returns 0 on success, negative on failure
2832 static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
2834 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2835 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
2837 if ((max_frame < (ETH_ZLEN + ETH_FCS_LEN)) ||
2838 (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
2839 return -EINVAL;
2841 DPRINTK(PROBE, INFO, "changing MTU from %d to %d\n",
2842 netdev->mtu, new_mtu);
2843 /* must set new MTU before calling down or up */
2844 netdev->mtu = new_mtu;
2846 if (netif_running(netdev))
2847 ixgbe_reinit_locked(adapter);
2849 return 0;
2853 * ixgbe_open - Called when a network interface is made active
2854 * @netdev: network interface device structure
2856 * Returns 0 on success, negative value on failure
2858 * The open entry point is called when a network interface is made
2859 * active by the system (IFF_UP). At this point all resources needed
2860 * for transmit and receive operations are allocated, the interrupt
2861 * handler is registered with the OS, the watchdog timer is started,
2862 * and the stack is notified that the interface is ready.
2864 static int ixgbe_open(struct net_device *netdev)
2866 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2867 int err;
2869 /* disallow open during test */
2870 if (test_bit(__IXGBE_TESTING, &adapter->state))
2871 return -EBUSY;
2873 /* allocate transmit descriptors */
2874 err = ixgbe_setup_all_tx_resources(adapter);
2875 if (err)
2876 goto err_setup_tx;
2878 /* allocate receive descriptors */
2879 err = ixgbe_setup_all_rx_resources(adapter);
2880 if (err)
2881 goto err_setup_rx;
2883 ixgbe_configure(adapter);
2885 err = ixgbe_request_irq(adapter);
2886 if (err)
2887 goto err_req_irq;
2889 err = ixgbe_up_complete(adapter);
2890 if (err)
2891 goto err_up;
2893 netif_tx_start_all_queues(netdev);
2895 return 0;
2897 err_up:
2898 ixgbe_release_hw_control(adapter);
2899 ixgbe_free_irq(adapter);
2900 err_req_irq:
2901 ixgbe_free_all_rx_resources(adapter);
2902 err_setup_rx:
2903 ixgbe_free_all_tx_resources(adapter);
2904 err_setup_tx:
2905 ixgbe_reset(adapter);
2907 return err;
2911 * ixgbe_close - Disables a network interface
2912 * @netdev: network interface device structure
2914 * Returns 0, this is not allowed to fail
2916 * The close entry point is called when an interface is de-activated
2917 * by the OS. The hardware is still under the drivers control, but
2918 * needs to be disabled. A global MAC reset is issued to stop the
2919 * hardware, and all transmit and receive resources are freed.
2921 static int ixgbe_close(struct net_device *netdev)
2923 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2925 ixgbe_down(adapter);
2926 ixgbe_free_irq(adapter);
2928 ixgbe_free_all_tx_resources(adapter);
2929 ixgbe_free_all_rx_resources(adapter);
2931 ixgbe_release_hw_control(adapter);
2933 return 0;
2937 * ixgbe_update_stats - Update the board statistics counters.
2938 * @adapter: board private structure
2940 void ixgbe_update_stats(struct ixgbe_adapter *adapter)
2942 struct ixgbe_hw *hw = &adapter->hw;
2943 u64 total_mpc = 0;
2944 u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
2946 adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
2947 for (i = 0; i < 8; i++) {
2948 /* for packet buffers not used, the register should read 0 */
2949 mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i));
2950 missed_rx += mpc;
2951 adapter->stats.mpc[i] += mpc;
2952 total_mpc += adapter->stats.mpc[i];
2953 adapter->stats.rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
2955 adapter->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
2956 /* work around hardware counting issue */
2957 adapter->stats.gprc -= missed_rx;
2959 /* 82598 hardware only has a 32 bit counter in the high register */
2960 adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
2961 adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
2962 adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
2963 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
2964 adapter->stats.bprc += bprc;
2965 adapter->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
2966 adapter->stats.mprc -= bprc;
2967 adapter->stats.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
2968 adapter->stats.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
2969 adapter->stats.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
2970 adapter->stats.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
2971 adapter->stats.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
2972 adapter->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
2973 adapter->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
2974 adapter->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
2975 adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
2976 adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
2977 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
2978 adapter->stats.lxontxc += lxon;
2979 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
2980 adapter->stats.lxofftxc += lxoff;
2981 adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
2982 adapter->stats.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
2983 adapter->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
2985 * 82598 errata - tx of flow control packets is included in tx counters
2987 xon_off_tot = lxon + lxoff;
2988 adapter->stats.gptc -= xon_off_tot;
2989 adapter->stats.mptc -= xon_off_tot;
2990 adapter->stats.gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN));
2991 adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
2992 adapter->stats.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
2993 adapter->stats.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
2994 adapter->stats.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
2995 adapter->stats.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
2996 adapter->stats.ptc64 -= xon_off_tot;
2997 adapter->stats.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
2998 adapter->stats.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
2999 adapter->stats.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
3000 adapter->stats.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
3001 adapter->stats.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
3002 adapter->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
3004 /* Fill out the OS statistics structure */
3005 adapter->net_stats.multicast = adapter->stats.mprc;
3007 /* Rx Errors */
3008 adapter->net_stats.rx_errors = adapter->stats.crcerrs +
3009 adapter->stats.rlec;
3010 adapter->net_stats.rx_dropped = 0;
3011 adapter->net_stats.rx_length_errors = adapter->stats.rlec;
3012 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
3013 adapter->net_stats.rx_missed_errors = total_mpc;
3017 * ixgbe_watchdog - Timer Call-back
3018 * @data: pointer to adapter cast into an unsigned long
3020 static void ixgbe_watchdog(unsigned long data)
3022 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
3023 struct ixgbe_hw *hw = &adapter->hw;
3025 /* Do the watchdog outside of interrupt context due to the lovely
3026 * delays that some of the newer hardware requires */
3027 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
3028 /* Cause software interrupt to ensure rx rings are cleaned */
3029 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3030 u32 eics =
3031 (1 << (adapter->num_msix_vectors - NON_Q_VECTORS)) - 1;
3032 IXGBE_WRITE_REG(hw, IXGBE_EICS, eics);
3033 } else {
3034 /* For legacy and MSI interrupts don't set any bits that
3035 * are enabled for EIAM, because this operation would
3036 * set *both* EIMS and EICS for any bit in EIAM */
3037 IXGBE_WRITE_REG(hw, IXGBE_EICS,
3038 (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
3040 /* Reset the timer */
3041 mod_timer(&adapter->watchdog_timer,
3042 round_jiffies(jiffies + 2 * HZ));
3045 schedule_work(&adapter->watchdog_task);
3049 * ixgbe_watchdog_task - worker thread to bring link up
3050 * @work: pointer to work_struct containing our data
3052 static void ixgbe_watchdog_task(struct work_struct *work)
3054 struct ixgbe_adapter *adapter = container_of(work,
3055 struct ixgbe_adapter,
3056 watchdog_task);
3057 struct net_device *netdev = adapter->netdev;
3058 struct ixgbe_hw *hw = &adapter->hw;
3059 u32 link_speed = adapter->link_speed;
3060 bool link_up = adapter->link_up;
3062 adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
3064 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
3065 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
3066 if (link_up ||
3067 time_after(jiffies, (adapter->link_check_timeout +
3068 IXGBE_TRY_LINK_TIMEOUT))) {
3069 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC);
3070 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
3072 adapter->link_up = link_up;
3073 adapter->link_speed = link_speed;
3076 if (link_up) {
3077 if (!netif_carrier_ok(netdev)) {
3078 u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3079 u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
3080 #define FLOW_RX (frctl & IXGBE_FCTRL_RFCE)
3081 #define FLOW_TX (rmcs & IXGBE_RMCS_TFCE_802_3X)
3082 DPRINTK(LINK, INFO, "NIC Link is Up %s, "
3083 "Flow Control: %s\n",
3084 (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
3085 "10 Gbps" :
3086 (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
3087 "1 Gbps" : "unknown speed")),
3088 ((FLOW_RX && FLOW_TX) ? "RX/TX" :
3089 (FLOW_RX ? "RX" :
3090 (FLOW_TX ? "TX" : "None"))));
3092 netif_carrier_on(netdev);
3093 netif_tx_wake_all_queues(netdev);
3094 } else {
3095 /* Force detection of hung controller */
3096 adapter->detect_tx_hung = true;
3098 } else {
3099 adapter->link_up = false;
3100 adapter->link_speed = 0;
3101 if (netif_carrier_ok(netdev)) {
3102 DPRINTK(LINK, INFO, "NIC Link is Down\n");
3103 netif_carrier_off(netdev);
3104 netif_tx_stop_all_queues(netdev);
3108 ixgbe_update_stats(adapter);
3109 adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
3112 static int ixgbe_tso(struct ixgbe_adapter *adapter,
3113 struct ixgbe_ring *tx_ring, struct sk_buff *skb,
3114 u32 tx_flags, u8 *hdr_len)
3116 struct ixgbe_adv_tx_context_desc *context_desc;
3117 unsigned int i;
3118 int err;
3119 struct ixgbe_tx_buffer *tx_buffer_info;
3120 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
3121 u32 mss_l4len_idx = 0, l4len;
3123 if (skb_is_gso(skb)) {
3124 if (skb_header_cloned(skb)) {
3125 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
3126 if (err)
3127 return err;
3129 l4len = tcp_hdrlen(skb);
3130 *hdr_len += l4len;
3132 if (skb->protocol == htons(ETH_P_IP)) {
3133 struct iphdr *iph = ip_hdr(skb);
3134 iph->tot_len = 0;
3135 iph->check = 0;
3136 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
3137 iph->daddr, 0,
3138 IPPROTO_TCP,
3140 adapter->hw_tso_ctxt++;
3141 } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
3142 ipv6_hdr(skb)->payload_len = 0;
3143 tcp_hdr(skb)->check =
3144 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3145 &ipv6_hdr(skb)->daddr,
3146 0, IPPROTO_TCP, 0);
3147 adapter->hw_tso6_ctxt++;
3150 i = tx_ring->next_to_use;
3152 tx_buffer_info = &tx_ring->tx_buffer_info[i];
3153 context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
3155 /* VLAN MACLEN IPLEN */
3156 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
3157 vlan_macip_lens |=
3158 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
3159 vlan_macip_lens |= ((skb_network_offset(skb)) <<
3160 IXGBE_ADVTXD_MACLEN_SHIFT);
3161 *hdr_len += skb_network_offset(skb);
3162 vlan_macip_lens |=
3163 (skb_transport_header(skb) - skb_network_header(skb));
3164 *hdr_len +=
3165 (skb_transport_header(skb) - skb_network_header(skb));
3166 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
3167 context_desc->seqnum_seed = 0;
3169 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
3170 type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
3171 IXGBE_ADVTXD_DTYP_CTXT);
3173 if (skb->protocol == htons(ETH_P_IP))
3174 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
3175 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
3176 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
3178 /* MSS L4LEN IDX */
3179 mss_l4len_idx |=
3180 (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT);
3181 mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT);
3182 /* use index 1 for TSO */
3183 mss_l4len_idx |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
3184 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
3186 tx_buffer_info->time_stamp = jiffies;
3187 tx_buffer_info->next_to_watch = i;
3189 i++;
3190 if (i == tx_ring->count)
3191 i = 0;
3192 tx_ring->next_to_use = i;
3194 return true;
3196 return false;
3199 static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
3200 struct ixgbe_ring *tx_ring,
3201 struct sk_buff *skb, u32 tx_flags)
3203 struct ixgbe_adv_tx_context_desc *context_desc;
3204 unsigned int i;
3205 struct ixgbe_tx_buffer *tx_buffer_info;
3206 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
3208 if (skb->ip_summed == CHECKSUM_PARTIAL ||
3209 (tx_flags & IXGBE_TX_FLAGS_VLAN)) {
3210 i = tx_ring->next_to_use;
3211 tx_buffer_info = &tx_ring->tx_buffer_info[i];
3212 context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
3214 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
3215 vlan_macip_lens |=
3216 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
3217 vlan_macip_lens |= (skb_network_offset(skb) <<
3218 IXGBE_ADVTXD_MACLEN_SHIFT);
3219 if (skb->ip_summed == CHECKSUM_PARTIAL)
3220 vlan_macip_lens |= (skb_transport_header(skb) -
3221 skb_network_header(skb));
3223 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
3224 context_desc->seqnum_seed = 0;
3226 type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
3227 IXGBE_ADVTXD_DTYP_CTXT);
3229 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3230 switch (skb->protocol) {
3231 case __constant_htons(ETH_P_IP):
3232 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
3233 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
3234 type_tucmd_mlhl |=
3235 IXGBE_ADVTXD_TUCMD_L4T_TCP;
3236 break;
3238 case __constant_htons(ETH_P_IPV6):
3239 /* XXX what about other V6 headers?? */
3240 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3241 type_tucmd_mlhl |=
3242 IXGBE_ADVTXD_TUCMD_L4T_TCP;
3243 break;
3245 default:
3246 if (unlikely(net_ratelimit())) {
3247 DPRINTK(PROBE, WARNING,
3248 "partial checksum but proto=%x!\n",
3249 skb->protocol);
3251 break;
3255 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
3256 /* use index zero for tx checksum offload */
3257 context_desc->mss_l4len_idx = 0;
3259 tx_buffer_info->time_stamp = jiffies;
3260 tx_buffer_info->next_to_watch = i;
3261 adapter->hw_csum_tx_good++;
3262 i++;
3263 if (i == tx_ring->count)
3264 i = 0;
3265 tx_ring->next_to_use = i;
3267 return true;
3269 return false;
3272 static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
3273 struct ixgbe_ring *tx_ring,
3274 struct sk_buff *skb, unsigned int first)
3276 struct ixgbe_tx_buffer *tx_buffer_info;
3277 unsigned int len = skb->len;
3278 unsigned int offset = 0, size, count = 0, i;
3279 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
3280 unsigned int f;
3282 len -= skb->data_len;
3284 i = tx_ring->next_to_use;
3286 while (len) {
3287 tx_buffer_info = &tx_ring->tx_buffer_info[i];
3288 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
3290 tx_buffer_info->length = size;
3291 tx_buffer_info->dma = pci_map_single(adapter->pdev,
3292 skb->data + offset,
3293 size, PCI_DMA_TODEVICE);
3294 tx_buffer_info->time_stamp = jiffies;
3295 tx_buffer_info->next_to_watch = i;
3297 len -= size;
3298 offset += size;
3299 count++;
3300 i++;
3301 if (i == tx_ring->count)
3302 i = 0;
3305 for (f = 0; f < nr_frags; f++) {
3306 struct skb_frag_struct *frag;
3308 frag = &skb_shinfo(skb)->frags[f];
3309 len = frag->size;
3310 offset = frag->page_offset;
3312 while (len) {
3313 tx_buffer_info = &tx_ring->tx_buffer_info[i];
3314 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
3316 tx_buffer_info->length = size;
3317 tx_buffer_info->dma = pci_map_page(adapter->pdev,
3318 frag->page,
3319 offset,
3320 size, PCI_DMA_TODEVICE);
3321 tx_buffer_info->time_stamp = jiffies;
3322 tx_buffer_info->next_to_watch = i;
3324 len -= size;
3325 offset += size;
3326 count++;
3327 i++;
3328 if (i == tx_ring->count)
3329 i = 0;
3332 if (i == 0)
3333 i = tx_ring->count - 1;
3334 else
3335 i = i - 1;
3336 tx_ring->tx_buffer_info[i].skb = skb;
3337 tx_ring->tx_buffer_info[first].next_to_watch = i;
3339 return count;
3342 static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
3343 struct ixgbe_ring *tx_ring,
3344 int tx_flags, int count, u32 paylen, u8 hdr_len)
3346 union ixgbe_adv_tx_desc *tx_desc = NULL;
3347 struct ixgbe_tx_buffer *tx_buffer_info;
3348 u32 olinfo_status = 0, cmd_type_len = 0;
3349 unsigned int i;
3350 u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS;
3352 cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
3354 cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
3356 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
3357 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
3359 if (tx_flags & IXGBE_TX_FLAGS_TSO) {
3360 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
3362 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
3363 IXGBE_ADVTXD_POPTS_SHIFT;
3365 /* use index 1 context for tso */
3366 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
3367 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
3368 olinfo_status |= IXGBE_TXD_POPTS_IXSM <<
3369 IXGBE_ADVTXD_POPTS_SHIFT;
3371 } else if (tx_flags & IXGBE_TX_FLAGS_CSUM)
3372 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
3373 IXGBE_ADVTXD_POPTS_SHIFT;
3375 olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
3377 i = tx_ring->next_to_use;
3378 while (count--) {
3379 tx_buffer_info = &tx_ring->tx_buffer_info[i];
3380 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
3381 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
3382 tx_desc->read.cmd_type_len =
3383 cpu_to_le32(cmd_type_len | tx_buffer_info->length);
3384 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
3386 i++;
3387 if (i == tx_ring->count)
3388 i = 0;
3391 tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
3394 * Force memory writes to complete before letting h/w
3395 * know there are new descriptors to fetch. (Only
3396 * applicable for weak-ordered memory model archs,
3397 * such as IA-64).
3399 wmb();
3401 tx_ring->next_to_use = i;
3402 writel(i, adapter->hw.hw_addr + tx_ring->tail);
3405 static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
3406 struct ixgbe_ring *tx_ring, int size)
3408 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3410 netif_stop_subqueue(netdev, tx_ring->queue_index);
3411 /* Herbert's original patch had:
3412 * smp_mb__after_netif_stop_queue();
3413 * but since that doesn't exist yet, just open code it. */
3414 smp_mb();
3416 /* We need to check again in a case another CPU has just
3417 * made room available. */
3418 if (likely(IXGBE_DESC_UNUSED(tx_ring) < size))
3419 return -EBUSY;
3421 /* A reprieve! - use start_queue because it doesn't call schedule */
3422 netif_start_subqueue(netdev, tx_ring->queue_index);
3423 ++adapter->restart_queue;
3424 return 0;
3427 static int ixgbe_maybe_stop_tx(struct net_device *netdev,
3428 struct ixgbe_ring *tx_ring, int size)
3430 if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
3431 return 0;
3432 return __ixgbe_maybe_stop_tx(netdev, tx_ring, size);
3436 static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3438 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3439 struct ixgbe_ring *tx_ring;
3440 unsigned int len = skb->len;
3441 unsigned int first;
3442 unsigned int tx_flags = 0;
3443 u8 hdr_len = 0;
3444 int r_idx = 0, tso;
3445 unsigned int mss = 0;
3446 int count = 0;
3447 unsigned int f;
3448 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
3449 len -= skb->data_len;
3450 r_idx = (adapter->num_tx_queues - 1) & skb->queue_mapping;
3451 tx_ring = &adapter->tx_ring[r_idx];
3454 if (skb->len <= 0) {
3455 dev_kfree_skb(skb);
3456 return NETDEV_TX_OK;
3458 mss = skb_shinfo(skb)->gso_size;
3460 if (mss)
3461 count++;
3462 else if (skb->ip_summed == CHECKSUM_PARTIAL)
3463 count++;
3465 count += TXD_USE_COUNT(len);
3466 for (f = 0; f < nr_frags; f++)
3467 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
3469 if (ixgbe_maybe_stop_tx(netdev, tx_ring, count)) {
3470 adapter->tx_busy++;
3471 return NETDEV_TX_BUSY;
3473 if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
3474 tx_flags |= IXGBE_TX_FLAGS_VLAN;
3475 tx_flags |= (vlan_tx_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT);
3478 if (skb->protocol == htons(ETH_P_IP))
3479 tx_flags |= IXGBE_TX_FLAGS_IPV4;
3480 first = tx_ring->next_to_use;
3481 tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len);
3482 if (tso < 0) {
3483 dev_kfree_skb_any(skb);
3484 return NETDEV_TX_OK;
3487 if (tso)
3488 tx_flags |= IXGBE_TX_FLAGS_TSO;
3489 else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags) &&
3490 (skb->ip_summed == CHECKSUM_PARTIAL))
3491 tx_flags |= IXGBE_TX_FLAGS_CSUM;
3493 ixgbe_tx_queue(adapter, tx_ring, tx_flags,
3494 ixgbe_tx_map(adapter, tx_ring, skb, first),
3495 skb->len, hdr_len);
3497 netdev->trans_start = jiffies;
3499 ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
3501 return NETDEV_TX_OK;
3505 * ixgbe_get_stats - Get System Network Statistics
3506 * @netdev: network interface device structure
3508 * Returns the address of the device statistics structure.
3509 * The statistics are actually updated from the timer callback.
3511 static struct net_device_stats *ixgbe_get_stats(struct net_device *netdev)
3513 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3515 /* only return the current stats */
3516 return &adapter->net_stats;
3520 * ixgbe_set_mac - Change the Ethernet Address of the NIC
3521 * @netdev: network interface device structure
3522 * @p: pointer to an address structure
3524 * Returns 0 on success, negative on failure
3526 static int ixgbe_set_mac(struct net_device *netdev, void *p)
3528 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3529 struct sockaddr *addr = p;
3531 if (!is_valid_ether_addr(addr->sa_data))
3532 return -EADDRNOTAVAIL;
3534 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
3535 memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len);
3537 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
3539 return 0;
3542 #ifdef CONFIG_NET_POLL_CONTROLLER
3544 * Polling 'interrupt' - used by things like netconsole to send skbs
3545 * without having to re-enable interrupts. It's not called while
3546 * the interrupt routine is executing.
3548 static void ixgbe_netpoll(struct net_device *netdev)
3550 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3552 disable_irq(adapter->pdev->irq);
3553 adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
3554 ixgbe_intr(adapter->pdev->irq, netdev);
3555 adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
3556 enable_irq(adapter->pdev->irq);
3558 #endif
3561 * ixgbe_napi_add_all - prep napi structs for use
3562 * @adapter: private struct
3563 * helper function to napi_add each possible q_vector->napi
3565 static void ixgbe_napi_add_all(struct ixgbe_adapter *adapter)
3567 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
3568 int (*poll)(struct napi_struct *, int);
3570 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3571 poll = &ixgbe_clean_rxonly;
3572 } else {
3573 poll = &ixgbe_poll;
3574 /* only one q_vector for legacy modes */
3575 q_vectors = 1;
3578 for (i = 0; i < q_vectors; i++) {
3579 struct ixgbe_q_vector *q_vector = &adapter->q_vector[i];
3580 netif_napi_add(adapter->netdev, &q_vector->napi,
3581 (*poll), 64);
3586 * ixgbe_probe - Device Initialization Routine
3587 * @pdev: PCI device information struct
3588 * @ent: entry in ixgbe_pci_tbl
3590 * Returns 0 on success, negative on failure
3592 * ixgbe_probe initializes an adapter identified by a pci_dev structure.
3593 * The OS initialization, configuring of the adapter private structure,
3594 * and a hardware reset occur.
3596 static int __devinit ixgbe_probe(struct pci_dev *pdev,
3597 const struct pci_device_id *ent)
3599 struct net_device *netdev;
3600 struct ixgbe_adapter *adapter = NULL;
3601 struct ixgbe_hw *hw;
3602 const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
3603 unsigned long mmio_start, mmio_len;
3604 static int cards_found;
3605 int i, err, pci_using_dac;
3606 u16 link_status, link_speed, link_width;
3607 u32 part_num;
3609 err = pci_enable_device(pdev);
3610 if (err)
3611 return err;
3613 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK) &&
3614 !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) {
3615 pci_using_dac = 1;
3616 } else {
3617 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
3618 if (err) {
3619 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
3620 if (err) {
3621 dev_err(&pdev->dev, "No usable DMA "
3622 "configuration, aborting\n");
3623 goto err_dma;
3626 pci_using_dac = 0;
3629 err = pci_request_regions(pdev, ixgbe_driver_name);
3630 if (err) {
3631 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
3632 goto err_pci_reg;
3635 pci_set_master(pdev);
3636 pci_save_state(pdev);
3638 netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), MAX_TX_QUEUES);
3639 if (!netdev) {
3640 err = -ENOMEM;
3641 goto err_alloc_etherdev;
3644 SET_NETDEV_DEV(netdev, &pdev->dev);
3646 pci_set_drvdata(pdev, netdev);
3647 adapter = netdev_priv(netdev);
3649 adapter->netdev = netdev;
3650 adapter->pdev = pdev;
3651 hw = &adapter->hw;
3652 hw->back = adapter;
3653 adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
3655 mmio_start = pci_resource_start(pdev, 0);
3656 mmio_len = pci_resource_len(pdev, 0);
3658 hw->hw_addr = ioremap(mmio_start, mmio_len);
3659 if (!hw->hw_addr) {
3660 err = -EIO;
3661 goto err_ioremap;
3664 for (i = 1; i <= 5; i++) {
3665 if (pci_resource_len(pdev, i) == 0)
3666 continue;
3669 netdev->open = &ixgbe_open;
3670 netdev->stop = &ixgbe_close;
3671 netdev->hard_start_xmit = &ixgbe_xmit_frame;
3672 netdev->get_stats = &ixgbe_get_stats;
3673 netdev->set_rx_mode = &ixgbe_set_rx_mode;
3674 netdev->set_multicast_list = &ixgbe_set_rx_mode;
3675 netdev->set_mac_address = &ixgbe_set_mac;
3676 netdev->change_mtu = &ixgbe_change_mtu;
3677 ixgbe_set_ethtool_ops(netdev);
3678 netdev->tx_timeout = &ixgbe_tx_timeout;
3679 netdev->watchdog_timeo = 5 * HZ;
3680 netdev->vlan_rx_register = ixgbe_vlan_rx_register;
3681 netdev->vlan_rx_add_vid = ixgbe_vlan_rx_add_vid;
3682 netdev->vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid;
3683 #ifdef CONFIG_NET_POLL_CONTROLLER
3684 netdev->poll_controller = ixgbe_netpoll;
3685 #endif
3686 strcpy(netdev->name, pci_name(pdev));
3688 netdev->mem_start = mmio_start;
3689 netdev->mem_end = mmio_start + mmio_len;
3691 adapter->bd_number = cards_found;
3693 /* PCI config space info */
3694 hw->vendor_id = pdev->vendor;
3695 hw->device_id = pdev->device;
3696 hw->revision_id = pdev->revision;
3697 hw->subsystem_vendor_id = pdev->subsystem_vendor;
3698 hw->subsystem_device_id = pdev->subsystem_device;
3700 /* Setup hw api */
3701 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
3702 hw->mac.type = ii->mac;
3704 err = ii->get_invariants(hw);
3705 if (err)
3706 goto err_hw_init;
3708 /* setup the private structure */
3709 err = ixgbe_sw_init(adapter);
3710 if (err)
3711 goto err_sw_init;
3713 netdev->features = NETIF_F_SG |
3714 NETIF_F_IP_CSUM |
3715 NETIF_F_HW_VLAN_TX |
3716 NETIF_F_HW_VLAN_RX |
3717 NETIF_F_HW_VLAN_FILTER;
3719 netdev->features |= NETIF_F_IPV6_CSUM;
3720 netdev->features |= NETIF_F_TSO;
3721 netdev->features |= NETIF_F_TSO6;
3722 netdev->features |= NETIF_F_LRO;
3724 netdev->vlan_features |= NETIF_F_TSO;
3725 netdev->vlan_features |= NETIF_F_TSO6;
3726 netdev->vlan_features |= NETIF_F_IP_CSUM;
3727 netdev->vlan_features |= NETIF_F_SG;
3729 if (pci_using_dac)
3730 netdev->features |= NETIF_F_HIGHDMA;
3732 /* make sure the EEPROM is good */
3733 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
3734 dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n");
3735 err = -EIO;
3736 goto err_eeprom;
3739 memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
3740 memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len);
3742 if (ixgbe_validate_mac_addr(netdev->dev_addr)) {
3743 err = -EIO;
3744 goto err_eeprom;
3747 init_timer(&adapter->watchdog_timer);
3748 adapter->watchdog_timer.function = &ixgbe_watchdog;
3749 adapter->watchdog_timer.data = (unsigned long)adapter;
3751 INIT_WORK(&adapter->reset_task, ixgbe_reset_task);
3752 INIT_WORK(&adapter->watchdog_task, ixgbe_watchdog_task);
3754 err = ixgbe_init_interrupt_scheme(adapter);
3755 if (err)
3756 goto err_sw_init;
3758 /* print bus type/speed/width info */
3759 pci_read_config_word(pdev, IXGBE_PCI_LINK_STATUS, &link_status);
3760 link_speed = link_status & IXGBE_PCI_LINK_SPEED;
3761 link_width = link_status & IXGBE_PCI_LINK_WIDTH;
3762 dev_info(&pdev->dev, "(PCI Express:%s:%s) "
3763 "%02x:%02x:%02x:%02x:%02x:%02x\n",
3764 ((link_speed == IXGBE_PCI_LINK_SPEED_5000) ? "5.0Gb/s" :
3765 (link_speed == IXGBE_PCI_LINK_SPEED_2500) ? "2.5Gb/s" :
3766 "Unknown"),
3767 ((link_width == IXGBE_PCI_LINK_WIDTH_8) ? "Width x8" :
3768 (link_width == IXGBE_PCI_LINK_WIDTH_4) ? "Width x4" :
3769 (link_width == IXGBE_PCI_LINK_WIDTH_2) ? "Width x2" :
3770 (link_width == IXGBE_PCI_LINK_WIDTH_1) ? "Width x1" :
3771 "Unknown"),
3772 netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],
3773 netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]);
3774 ixgbe_read_part_num(hw, &part_num);
3775 dev_info(&pdev->dev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
3776 hw->mac.type, hw->phy.type,
3777 (part_num >> 8), (part_num & 0xff));
3779 if (link_width <= IXGBE_PCI_LINK_WIDTH_4) {
3780 dev_warn(&pdev->dev, "PCI-Express bandwidth available for "
3781 "this card is not sufficient for optimal "
3782 "performance.\n");
3783 dev_warn(&pdev->dev, "For optimal performance a x8 "
3784 "PCI-Express slot is required.\n");
3787 /* reset the hardware with the new settings */
3788 ixgbe_start_hw(hw);
3790 netif_carrier_off(netdev);
3791 netif_tx_stop_all_queues(netdev);
3793 ixgbe_napi_add_all(adapter);
3795 strcpy(netdev->name, "eth%d");
3796 err = register_netdev(netdev);
3797 if (err)
3798 goto err_register;
3800 #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
3801 if (dca_add_requester(&pdev->dev) == 0) {
3802 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
3803 /* always use CB2 mode, difference is masked
3804 * in the CB driver */
3805 IXGBE_WRITE_REG(hw, IXGBE_DCA_CTRL, 2);
3806 ixgbe_setup_dca(adapter);
3808 #endif
3810 dev_info(&pdev->dev, "Intel(R) 10 Gigabit Network Connection\n");
3811 cards_found++;
3812 return 0;
3814 err_register:
3815 ixgbe_release_hw_control(adapter);
3816 err_hw_init:
3817 err_sw_init:
3818 ixgbe_reset_interrupt_capability(adapter);
3819 err_eeprom:
3820 iounmap(hw->hw_addr);
3821 err_ioremap:
3822 free_netdev(netdev);
3823 err_alloc_etherdev:
3824 pci_release_regions(pdev);
3825 err_pci_reg:
3826 err_dma:
3827 pci_disable_device(pdev);
3828 return err;
3832 * ixgbe_remove - Device Removal Routine
3833 * @pdev: PCI device information struct
3835 * ixgbe_remove is called by the PCI subsystem to alert the driver
3836 * that it should release a PCI device. The could be caused by a
3837 * Hot-Plug event, or because the driver is going to be removed from
3838 * memory.
3840 static void __devexit ixgbe_remove(struct pci_dev *pdev)
3842 struct net_device *netdev = pci_get_drvdata(pdev);
3843 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3845 set_bit(__IXGBE_DOWN, &adapter->state);
3846 del_timer_sync(&adapter->watchdog_timer);
3848 flush_scheduled_work();
3850 #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
3851 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
3852 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
3853 dca_remove_requester(&pdev->dev);
3854 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
3857 #endif
3858 unregister_netdev(netdev);
3860 ixgbe_reset_interrupt_capability(adapter);
3862 ixgbe_release_hw_control(adapter);
3864 iounmap(adapter->hw.hw_addr);
3865 pci_release_regions(pdev);
3867 DPRINTK(PROBE, INFO, "complete\n");
3868 kfree(adapter->tx_ring);
3869 kfree(adapter->rx_ring);
3871 free_netdev(netdev);
3873 pci_disable_device(pdev);
3877 * ixgbe_io_error_detected - called when PCI error is detected
3878 * @pdev: Pointer to PCI device
3879 * @state: The current pci connection state
3881 * This function is called after a PCI bus error affecting
3882 * this device has been detected.
3884 static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
3885 pci_channel_state_t state)
3887 struct net_device *netdev = pci_get_drvdata(pdev);
3888 struct ixgbe_adapter *adapter = netdev->priv;
3890 netif_device_detach(netdev);
3892 if (netif_running(netdev))
3893 ixgbe_down(adapter);
3894 pci_disable_device(pdev);
3896 /* Request a slot slot reset. */
3897 return PCI_ERS_RESULT_NEED_RESET;
3901 * ixgbe_io_slot_reset - called after the pci bus has been reset.
3902 * @pdev: Pointer to PCI device
3904 * Restart the card from scratch, as if from a cold-boot.
3906 static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
3908 struct net_device *netdev = pci_get_drvdata(pdev);
3909 struct ixgbe_adapter *adapter = netdev->priv;
3911 if (pci_enable_device(pdev)) {
3912 DPRINTK(PROBE, ERR,
3913 "Cannot re-enable PCI device after reset.\n");
3914 return PCI_ERS_RESULT_DISCONNECT;
3916 pci_set_master(pdev);
3917 pci_restore_state(pdev);
3919 pci_enable_wake(pdev, PCI_D3hot, 0);
3920 pci_enable_wake(pdev, PCI_D3cold, 0);
3922 ixgbe_reset(adapter);
3924 return PCI_ERS_RESULT_RECOVERED;
3928 * ixgbe_io_resume - called when traffic can start flowing again.
3929 * @pdev: Pointer to PCI device
3931 * This callback is called when the error recovery driver tells us that
3932 * its OK to resume normal operation.
3934 static void ixgbe_io_resume(struct pci_dev *pdev)
3936 struct net_device *netdev = pci_get_drvdata(pdev);
3937 struct ixgbe_adapter *adapter = netdev->priv;
3939 if (netif_running(netdev)) {
3940 if (ixgbe_up(adapter)) {
3941 DPRINTK(PROBE, INFO, "ixgbe_up failed after reset\n");
3942 return;
3946 netif_device_attach(netdev);
3950 static struct pci_error_handlers ixgbe_err_handler = {
3951 .error_detected = ixgbe_io_error_detected,
3952 .slot_reset = ixgbe_io_slot_reset,
3953 .resume = ixgbe_io_resume,
3956 static struct pci_driver ixgbe_driver = {
3957 .name = ixgbe_driver_name,
3958 .id_table = ixgbe_pci_tbl,
3959 .probe = ixgbe_probe,
3960 .remove = __devexit_p(ixgbe_remove),
3961 #ifdef CONFIG_PM
3962 .suspend = ixgbe_suspend,
3963 .resume = ixgbe_resume,
3964 #endif
3965 .shutdown = ixgbe_shutdown,
3966 .err_handler = &ixgbe_err_handler
3970 * ixgbe_init_module - Driver Registration Routine
3972 * ixgbe_init_module is the first routine called when the driver is
3973 * loaded. All it does is register with the PCI subsystem.
3975 static int __init ixgbe_init_module(void)
3977 int ret;
3978 printk(KERN_INFO "%s: %s - version %s\n", ixgbe_driver_name,
3979 ixgbe_driver_string, ixgbe_driver_version);
3981 printk(KERN_INFO "%s: %s\n", ixgbe_driver_name, ixgbe_copyright);
3983 #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
3984 dca_register_notify(&dca_notifier);
3986 #endif
3987 ret = pci_register_driver(&ixgbe_driver);
3988 return ret;
3990 module_init(ixgbe_init_module);
3993 * ixgbe_exit_module - Driver Exit Cleanup Routine
3995 * ixgbe_exit_module is called just before the driver is removed
3996 * from memory.
3998 static void __exit ixgbe_exit_module(void)
4000 #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
4001 dca_unregister_notify(&dca_notifier);
4002 #endif
4003 pci_unregister_driver(&ixgbe_driver);
4006 #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
4007 static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
4008 void *p)
4010 int ret_val;
4012 ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event,
4013 __ixgbe_notify_dca);
4015 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
4017 #endif /* CONFIG_DCA or CONFIG_DCA_MODULE */
4019 module_exit(ixgbe_exit_module);
4021 /* ixgbe_main.c */