1 /*******************************************************************************
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *******************************************************************************/
29 #include <linux/types.h>
30 #include <linux/module.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/vmalloc.h>
34 #include <linux/string.h>
37 #include <linux/tcp.h>
38 #include <linux/ipv6.h>
39 #include <net/checksum.h>
40 #include <net/ip6_checksum.h>
41 #include <linux/ethtool.h>
42 #include <linux/if_vlan.h>
45 #include "ixgbe_common.h"
47 char ixgbe_driver_name
[] = "ixgbe";
48 static const char ixgbe_driver_string
[] =
49 "Intel(R) 10 Gigabit PCI Express Network Driver";
51 #define DRV_VERSION "1.3.18-k2"
52 const char ixgbe_driver_version
[] = DRV_VERSION
;
53 static const char ixgbe_copyright
[] =
54 "Copyright (c) 1999-2007 Intel Corporation.";
56 static const struct ixgbe_info
*ixgbe_info_tbl
[] = {
57 [board_82598
] = &ixgbe_82598_info
,
60 /* ixgbe_pci_tbl - PCI Device ID Table
62 * Wildcard entries (PCI_ANY_ID) should come last
63 * Last entry must be all 0s
65 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
66 * Class, Class Mask, private data (not used) }
68 static struct pci_device_id ixgbe_pci_tbl
[] = {
69 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82598AF_DUAL_PORT
),
71 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82598AF_SINGLE_PORT
),
73 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82598AT_DUAL_PORT
),
75 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82598EB_CX4
),
78 /* required last entry */
81 MODULE_DEVICE_TABLE(pci
, ixgbe_pci_tbl
);
84 static int ixgbe_notify_dca(struct notifier_block
*, unsigned long event
,
86 static struct notifier_block dca_notifier
= {
87 .notifier_call
= ixgbe_notify_dca
,
93 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
94 MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
95 MODULE_LICENSE("GPL");
96 MODULE_VERSION(DRV_VERSION
);
98 #define DEFAULT_DEBUG_LEVEL_SHIFT 3
100 static void ixgbe_release_hw_control(struct ixgbe_adapter
*adapter
)
104 /* Let firmware take over control of h/w */
105 ctrl_ext
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_CTRL_EXT
);
106 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_CTRL_EXT
,
107 ctrl_ext
& ~IXGBE_CTRL_EXT_DRV_LOAD
);
110 static void ixgbe_get_hw_control(struct ixgbe_adapter
*adapter
)
114 /* Let firmware know the driver has taken over */
115 ctrl_ext
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_CTRL_EXT
);
116 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_CTRL_EXT
,
117 ctrl_ext
| IXGBE_CTRL_EXT_DRV_LOAD
);
122 * ixgbe_get_hw_dev_name - return device name string
123 * used by hardware layer to print debugging information
125 char *ixgbe_get_hw_dev_name(struct ixgbe_hw
*hw
)
127 struct ixgbe_adapter
*adapter
= hw
->back
;
128 struct net_device
*netdev
= adapter
->netdev
;
133 static void ixgbe_set_ivar(struct ixgbe_adapter
*adapter
, u16 int_alloc_entry
,
138 msix_vector
|= IXGBE_IVAR_ALLOC_VAL
;
139 index
= (int_alloc_entry
>> 2) & 0x1F;
140 ivar
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_IVAR(index
));
141 ivar
&= ~(0xFF << (8 * (int_alloc_entry
& 0x3)));
142 ivar
|= (msix_vector
<< (8 * (int_alloc_entry
& 0x3)));
143 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_IVAR(index
), ivar
);
146 static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter
*adapter
,
147 struct ixgbe_tx_buffer
150 if (tx_buffer_info
->dma
) {
151 pci_unmap_page(adapter
->pdev
,
153 tx_buffer_info
->length
, PCI_DMA_TODEVICE
);
154 tx_buffer_info
->dma
= 0;
156 if (tx_buffer_info
->skb
) {
157 dev_kfree_skb_any(tx_buffer_info
->skb
);
158 tx_buffer_info
->skb
= NULL
;
160 /* tx_buffer_info must be completely set up in the transmit path */
163 static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter
*adapter
,
164 struct ixgbe_ring
*tx_ring
,
166 union ixgbe_adv_tx_desc
*eop_desc
)
168 /* Detect a transmit hang in hardware, this serializes the
169 * check with the clearing of time_stamp and movement of i */
170 adapter
->detect_tx_hung
= false;
171 if (tx_ring
->tx_buffer_info
[eop
].dma
&&
172 time_after(jiffies
, tx_ring
->tx_buffer_info
[eop
].time_stamp
+ HZ
) &&
173 !(IXGBE_READ_REG(&adapter
->hw
, IXGBE_TFCS
) & IXGBE_TFCS_TXOFF
)) {
174 /* detected Tx unit hang */
175 DPRINTK(DRV
, ERR
, "Detected Tx Unit Hang\n"
178 " next_to_use <%x>\n"
179 " next_to_clean <%x>\n"
180 "tx_buffer_info[next_to_clean]\n"
181 " time_stamp <%lx>\n"
182 " next_to_watch <%x>\n"
184 " next_to_watch.status <%x>\n",
185 readl(adapter
->hw
.hw_addr
+ tx_ring
->head
),
186 readl(adapter
->hw
.hw_addr
+ tx_ring
->tail
),
187 tx_ring
->next_to_use
,
188 tx_ring
->next_to_clean
,
189 tx_ring
->tx_buffer_info
[eop
].time_stamp
,
190 eop
, jiffies
, eop_desc
->wb
.status
);
197 #define IXGBE_MAX_TXD_PWR 14
198 #define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
200 /* Tx Descriptors needed, worst case */
201 #define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
202 (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
203 #define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
204 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
207 * ixgbe_clean_tx_irq - Reclaim resources after transmit completes
208 * @adapter: board private structure
210 static bool ixgbe_clean_tx_irq(struct ixgbe_adapter
*adapter
,
211 struct ixgbe_ring
*tx_ring
)
213 struct net_device
*netdev
= adapter
->netdev
;
214 union ixgbe_adv_tx_desc
*tx_desc
, *eop_desc
;
215 struct ixgbe_tx_buffer
*tx_buffer_info
;
217 bool cleaned
= false;
218 unsigned int total_tx_bytes
= 0, total_tx_packets
= 0;
220 i
= tx_ring
->next_to_clean
;
221 eop
= tx_ring
->tx_buffer_info
[i
].next_to_watch
;
222 eop_desc
= IXGBE_TX_DESC_ADV(*tx_ring
, eop
);
223 while (eop_desc
->wb
.status
& cpu_to_le32(IXGBE_TXD_STAT_DD
)) {
226 tx_desc
= IXGBE_TX_DESC_ADV(*tx_ring
, i
);
227 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
228 cleaned
= (i
== eop
);
230 tx_ring
->stats
.bytes
+= tx_buffer_info
->length
;
232 struct sk_buff
*skb
= tx_buffer_info
->skb
;
233 unsigned int segs
, bytecount
;
234 segs
= skb_shinfo(skb
)->gso_segs
?: 1;
235 /* multiply data chunks by size of headers */
236 bytecount
= ((segs
- 1) * skb_headlen(skb
)) +
238 total_tx_packets
+= segs
;
239 total_tx_bytes
+= bytecount
;
241 ixgbe_unmap_and_free_tx_resource(adapter
,
243 tx_desc
->wb
.status
= 0;
246 if (i
== tx_ring
->count
)
250 tx_ring
->stats
.packets
++;
252 eop
= tx_ring
->tx_buffer_info
[i
].next_to_watch
;
253 eop_desc
= IXGBE_TX_DESC_ADV(*tx_ring
, eop
);
255 /* weight of a sort for tx, avoid endless transmit cleanup */
256 if (total_tx_packets
>= tx_ring
->work_limit
)
260 tx_ring
->next_to_clean
= i
;
262 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
263 if (total_tx_packets
&& netif_carrier_ok(netdev
) &&
264 (IXGBE_DESC_UNUSED(tx_ring
) >= TX_WAKE_THRESHOLD
)) {
265 /* Make sure that anybody stopping the queue after this
266 * sees the new next_to_clean.
269 if (__netif_subqueue_stopped(netdev
, tx_ring
->queue_index
) &&
270 !test_bit(__IXGBE_DOWN
, &adapter
->state
)) {
271 netif_wake_subqueue(netdev
, tx_ring
->queue_index
);
272 adapter
->restart_queue
++;
276 if (adapter
->detect_tx_hung
)
277 if (ixgbe_check_tx_hang(adapter
, tx_ring
, eop
, eop_desc
))
278 netif_stop_subqueue(netdev
, tx_ring
->queue_index
);
280 if (total_tx_packets
>= tx_ring
->work_limit
)
281 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EICS
, tx_ring
->eims_value
);
283 tx_ring
->total_bytes
+= total_tx_bytes
;
284 tx_ring
->total_packets
+= total_tx_packets
;
285 adapter
->net_stats
.tx_bytes
+= total_tx_bytes
;
286 adapter
->net_stats
.tx_packets
+= total_tx_packets
;
287 cleaned
= total_tx_packets
? true : false;
292 static void ixgbe_update_rx_dca(struct ixgbe_adapter
*adapter
,
293 struct ixgbe_ring
*rxr
)
297 int q
= rxr
- adapter
->rx_ring
;
299 if (rxr
->cpu
!= cpu
) {
300 rxctrl
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_DCA_RXCTRL(q
));
301 rxctrl
&= ~IXGBE_DCA_RXCTRL_CPUID_MASK
;
302 rxctrl
|= dca_get_tag(cpu
);
303 rxctrl
|= IXGBE_DCA_RXCTRL_DESC_DCA_EN
;
304 rxctrl
|= IXGBE_DCA_RXCTRL_HEAD_DCA_EN
;
305 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_DCA_RXCTRL(q
), rxctrl
);
311 static void ixgbe_update_tx_dca(struct ixgbe_adapter
*adapter
,
312 struct ixgbe_ring
*txr
)
316 int q
= txr
- adapter
->tx_ring
;
318 if (txr
->cpu
!= cpu
) {
319 txctrl
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_DCA_TXCTRL(q
));
320 txctrl
&= ~IXGBE_DCA_TXCTRL_CPUID_MASK
;
321 txctrl
|= dca_get_tag(cpu
);
322 txctrl
|= IXGBE_DCA_TXCTRL_DESC_DCA_EN
;
323 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_DCA_TXCTRL(q
), txctrl
);
329 static void ixgbe_setup_dca(struct ixgbe_adapter
*adapter
)
333 if (!(adapter
->flags
& IXGBE_FLAG_DCA_ENABLED
))
336 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
337 adapter
->tx_ring
[i
].cpu
= -1;
338 ixgbe_update_tx_dca(adapter
, &adapter
->tx_ring
[i
]);
340 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
341 adapter
->rx_ring
[i
].cpu
= -1;
342 ixgbe_update_rx_dca(adapter
, &adapter
->rx_ring
[i
]);
346 static int __ixgbe_notify_dca(struct device
*dev
, void *data
)
348 struct net_device
*netdev
= dev_get_drvdata(dev
);
349 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
350 unsigned long event
= *(unsigned long *)data
;
353 case DCA_PROVIDER_ADD
:
354 adapter
->flags
|= IXGBE_FLAG_DCA_ENABLED
;
355 /* Always use CB2 mode, difference is masked
356 * in the CB driver. */
357 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_DCA_CTRL
, 2);
358 if (dca_add_requester(dev
) == 0) {
359 ixgbe_setup_dca(adapter
);
362 /* Fall Through since DCA is disabled. */
363 case DCA_PROVIDER_REMOVE
:
364 if (adapter
->flags
& IXGBE_FLAG_DCA_ENABLED
) {
365 dca_remove_requester(dev
);
366 adapter
->flags
&= ~IXGBE_FLAG_DCA_ENABLED
;
367 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_DCA_CTRL
, 1);
375 #endif /* CONFIG_DCA */
377 * ixgbe_receive_skb - Send a completed packet up the stack
378 * @adapter: board private structure
379 * @skb: packet to send up
380 * @status: hardware indication of status of receive
381 * @rx_ring: rx descriptor ring (for a specific queue) to setup
382 * @rx_desc: rx descriptor
384 static void ixgbe_receive_skb(struct ixgbe_adapter
*adapter
,
385 struct sk_buff
*skb
, u8 status
,
386 struct ixgbe_ring
*ring
,
387 union ixgbe_adv_rx_desc
*rx_desc
)
389 bool is_vlan
= (status
& IXGBE_RXD_STAT_VP
);
390 u16 tag
= le16_to_cpu(rx_desc
->wb
.upper
.vlan
);
392 if (adapter
->netdev
->features
& NETIF_F_LRO
&&
393 skb
->ip_summed
== CHECKSUM_UNNECESSARY
) {
394 if (adapter
->vlgrp
&& is_vlan
)
395 lro_vlan_hwaccel_receive_skb(&ring
->lro_mgr
, skb
,
399 lro_receive_skb(&ring
->lro_mgr
, skb
, rx_desc
);
400 ring
->lro_used
= true;
402 if (!(adapter
->flags
& IXGBE_FLAG_IN_NETPOLL
)) {
403 if (adapter
->vlgrp
&& is_vlan
)
404 vlan_hwaccel_receive_skb(skb
, adapter
->vlgrp
, tag
);
406 netif_receive_skb(skb
);
408 if (adapter
->vlgrp
&& is_vlan
)
409 vlan_hwaccel_rx(skb
, adapter
->vlgrp
, tag
);
417 * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum
418 * @adapter: address of board private structure
419 * @status_err: hardware indication of status of receive
420 * @skb: skb currently being received and modified
422 static inline void ixgbe_rx_checksum(struct ixgbe_adapter
*adapter
,
426 skb
->ip_summed
= CHECKSUM_NONE
;
428 /* Ignore Checksum bit is set, or rx csum disabled */
429 if ((status_err
& IXGBE_RXD_STAT_IXSM
) ||
430 !(adapter
->flags
& IXGBE_FLAG_RX_CSUM_ENABLED
))
433 /* if IP and error */
434 if ((status_err
& IXGBE_RXD_STAT_IPCS
) &&
435 (status_err
& IXGBE_RXDADV_ERR_IPE
)) {
436 adapter
->hw_csum_rx_error
++;
440 if (!(status_err
& IXGBE_RXD_STAT_L4CS
))
443 if (status_err
& IXGBE_RXDADV_ERR_TCPE
) {
444 adapter
->hw_csum_rx_error
++;
448 /* It must be a TCP or UDP packet with a valid checksum */
449 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
450 adapter
->hw_csum_rx_good
++;
454 * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split
455 * @adapter: address of board private structure
457 static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter
*adapter
,
458 struct ixgbe_ring
*rx_ring
,
461 struct net_device
*netdev
= adapter
->netdev
;
462 struct pci_dev
*pdev
= adapter
->pdev
;
463 union ixgbe_adv_rx_desc
*rx_desc
;
464 struct ixgbe_rx_buffer
*rx_buffer_info
;
467 unsigned int bufsz
= adapter
->rx_buf_len
+ NET_IP_ALIGN
;
469 i
= rx_ring
->next_to_use
;
470 rx_buffer_info
= &rx_ring
->rx_buffer_info
[i
];
472 while (cleaned_count
--) {
473 rx_desc
= IXGBE_RX_DESC_ADV(*rx_ring
, i
);
475 if (!rx_buffer_info
->page
&&
476 (adapter
->flags
& IXGBE_FLAG_RX_PS_ENABLED
)) {
477 rx_buffer_info
->page
= alloc_page(GFP_ATOMIC
);
478 if (!rx_buffer_info
->page
) {
479 adapter
->alloc_rx_page_failed
++;
482 rx_buffer_info
->page_dma
=
483 pci_map_page(pdev
, rx_buffer_info
->page
,
484 0, PAGE_SIZE
, PCI_DMA_FROMDEVICE
);
487 if (!rx_buffer_info
->skb
) {
488 skb
= netdev_alloc_skb(netdev
, bufsz
);
491 adapter
->alloc_rx_buff_failed
++;
496 * Make buffer alignment 2 beyond a 16 byte boundary
497 * this will result in a 16 byte aligned IP header after
498 * the 14 byte MAC header is removed
500 skb_reserve(skb
, NET_IP_ALIGN
);
502 rx_buffer_info
->skb
= skb
;
503 rx_buffer_info
->dma
= pci_map_single(pdev
, skb
->data
,
507 /* Refresh the desc even if buffer_addrs didn't change because
508 * each write-back erases this info. */
509 if (adapter
->flags
& IXGBE_FLAG_RX_PS_ENABLED
) {
510 rx_desc
->read
.pkt_addr
=
511 cpu_to_le64(rx_buffer_info
->page_dma
);
512 rx_desc
->read
.hdr_addr
=
513 cpu_to_le64(rx_buffer_info
->dma
);
515 rx_desc
->read
.pkt_addr
=
516 cpu_to_le64(rx_buffer_info
->dma
);
520 if (i
== rx_ring
->count
)
522 rx_buffer_info
= &rx_ring
->rx_buffer_info
[i
];
525 if (rx_ring
->next_to_use
!= i
) {
526 rx_ring
->next_to_use
= i
;
528 i
= (rx_ring
->count
- 1);
531 * Force memory writes to complete before letting h/w
532 * know there are new descriptors to fetch. (Only
533 * applicable for weak-ordered memory model archs,
537 writel(i
, adapter
->hw
.hw_addr
+ rx_ring
->tail
);
541 static bool ixgbe_clean_rx_irq(struct ixgbe_adapter
*adapter
,
542 struct ixgbe_ring
*rx_ring
,
543 int *work_done
, int work_to_do
)
545 struct net_device
*netdev
= adapter
->netdev
;
546 struct pci_dev
*pdev
= adapter
->pdev
;
547 union ixgbe_adv_rx_desc
*rx_desc
, *next_rxd
;
548 struct ixgbe_rx_buffer
*rx_buffer_info
, *next_buffer
;
551 u32 upper_len
, len
, staterr
;
553 bool cleaned
= false;
554 int cleaned_count
= 0;
555 unsigned int total_rx_bytes
= 0, total_rx_packets
= 0;
557 i
= rx_ring
->next_to_clean
;
559 rx_desc
= IXGBE_RX_DESC_ADV(*rx_ring
, i
);
560 staterr
= le32_to_cpu(rx_desc
->wb
.upper
.status_error
);
561 rx_buffer_info
= &rx_ring
->rx_buffer_info
[i
];
563 while (staterr
& IXGBE_RXD_STAT_DD
) {
564 if (*work_done
>= work_to_do
)
568 if (adapter
->flags
& IXGBE_FLAG_RX_PS_ENABLED
) {
570 le16_to_cpu(rx_desc
->wb
.lower
.lo_dword
.hdr_info
);
572 ((hdr_info
& IXGBE_RXDADV_HDRBUFLEN_MASK
) >>
573 IXGBE_RXDADV_HDRBUFLEN_SHIFT
);
574 if (hdr_info
& IXGBE_RXDADV_SPH
)
575 adapter
->rx_hdr_split
++;
576 if (len
> IXGBE_RX_HDR_SIZE
)
577 len
= IXGBE_RX_HDR_SIZE
;
578 upper_len
= le16_to_cpu(rx_desc
->wb
.upper
.length
);
580 len
= le16_to_cpu(rx_desc
->wb
.upper
.length
);
583 skb
= rx_buffer_info
->skb
;
584 prefetch(skb
->data
- NET_IP_ALIGN
);
585 rx_buffer_info
->skb
= NULL
;
587 if (len
&& !skb_shinfo(skb
)->nr_frags
) {
588 pci_unmap_single(pdev
, rx_buffer_info
->dma
,
589 adapter
->rx_buf_len
+ NET_IP_ALIGN
,
595 pci_unmap_page(pdev
, rx_buffer_info
->page_dma
,
596 PAGE_SIZE
, PCI_DMA_FROMDEVICE
);
597 rx_buffer_info
->page_dma
= 0;
598 skb_fill_page_desc(skb
, skb_shinfo(skb
)->nr_frags
,
599 rx_buffer_info
->page
, 0, upper_len
);
600 rx_buffer_info
->page
= NULL
;
602 skb
->len
+= upper_len
;
603 skb
->data_len
+= upper_len
;
604 skb
->truesize
+= upper_len
;
608 if (i
== rx_ring
->count
)
610 next_buffer
= &rx_ring
->rx_buffer_info
[i
];
612 next_rxd
= IXGBE_RX_DESC_ADV(*rx_ring
, i
);
616 if (staterr
& IXGBE_RXD_STAT_EOP
) {
617 rx_ring
->stats
.packets
++;
618 rx_ring
->stats
.bytes
+= skb
->len
;
620 rx_buffer_info
->skb
= next_buffer
->skb
;
621 rx_buffer_info
->dma
= next_buffer
->dma
;
622 next_buffer
->skb
= skb
;
623 adapter
->non_eop_descs
++;
627 if (staterr
& IXGBE_RXDADV_ERR_FRAME_ERR_MASK
) {
628 dev_kfree_skb_irq(skb
);
632 ixgbe_rx_checksum(adapter
, staterr
, skb
);
634 /* probably a little skewed due to removing CRC */
635 total_rx_bytes
+= skb
->len
;
638 skb
->protocol
= eth_type_trans(skb
, netdev
);
639 ixgbe_receive_skb(adapter
, skb
, staterr
, rx_ring
, rx_desc
);
640 netdev
->last_rx
= jiffies
;
643 rx_desc
->wb
.upper
.status_error
= 0;
645 /* return some buffers to hardware, one at a time is too slow */
646 if (cleaned_count
>= IXGBE_RX_BUFFER_WRITE
) {
647 ixgbe_alloc_rx_buffers(adapter
, rx_ring
, cleaned_count
);
651 /* use prefetched values */
653 rx_buffer_info
= next_buffer
;
655 staterr
= le32_to_cpu(rx_desc
->wb
.upper
.status_error
);
658 if (rx_ring
->lro_used
) {
659 lro_flush_all(&rx_ring
->lro_mgr
);
660 rx_ring
->lro_used
= false;
663 rx_ring
->next_to_clean
= i
;
664 cleaned_count
= IXGBE_DESC_UNUSED(rx_ring
);
667 ixgbe_alloc_rx_buffers(adapter
, rx_ring
, cleaned_count
);
669 adapter
->net_stats
.rx_bytes
+= total_rx_bytes
;
670 adapter
->net_stats
.rx_packets
+= total_rx_packets
;
672 rx_ring
->total_packets
+= total_rx_packets
;
673 rx_ring
->total_bytes
+= total_rx_bytes
;
674 adapter
->net_stats
.rx_bytes
+= total_rx_bytes
;
675 adapter
->net_stats
.rx_packets
+= total_rx_packets
;
680 static int ixgbe_clean_rxonly(struct napi_struct
*, int);
682 * ixgbe_configure_msix - Configure MSI-X hardware
683 * @adapter: board private structure
685 * ixgbe_configure_msix sets up the hardware to properly generate MSI-X
688 static void ixgbe_configure_msix(struct ixgbe_adapter
*adapter
)
690 struct ixgbe_q_vector
*q_vector
;
691 int i
, j
, q_vectors
, v_idx
, r_idx
;
694 q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
696 /* Populate the IVAR table and set the ITR values to the
697 * corresponding register.
699 for (v_idx
= 0; v_idx
< q_vectors
; v_idx
++) {
700 q_vector
= &adapter
->q_vector
[v_idx
];
701 /* XXX for_each_bit(...) */
702 r_idx
= find_first_bit(q_vector
->rxr_idx
,
703 adapter
->num_rx_queues
);
705 for (i
= 0; i
< q_vector
->rxr_count
; i
++) {
706 j
= adapter
->rx_ring
[r_idx
].reg_idx
;
707 ixgbe_set_ivar(adapter
, IXGBE_IVAR_RX_QUEUE(j
), v_idx
);
708 r_idx
= find_next_bit(q_vector
->rxr_idx
,
709 adapter
->num_rx_queues
,
712 r_idx
= find_first_bit(q_vector
->txr_idx
,
713 adapter
->num_tx_queues
);
715 for (i
= 0; i
< q_vector
->txr_count
; i
++) {
716 j
= adapter
->tx_ring
[r_idx
].reg_idx
;
717 ixgbe_set_ivar(adapter
, IXGBE_IVAR_TX_QUEUE(j
), v_idx
);
718 r_idx
= find_next_bit(q_vector
->txr_idx
,
719 adapter
->num_tx_queues
,
723 /* if this is a tx only vector use half the irq (tx) rate */
724 if (q_vector
->txr_count
&& !q_vector
->rxr_count
)
725 q_vector
->eitr
= adapter
->tx_eitr
;
727 /* rx only or mixed */
728 q_vector
->eitr
= adapter
->rx_eitr
;
730 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EITR(v_idx
),
731 EITR_INTS_PER_SEC_TO_REG(q_vector
->eitr
));
734 ixgbe_set_ivar(adapter
, IXGBE_IVAR_OTHER_CAUSES_INDEX
, v_idx
);
735 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EITR(v_idx
), 1950);
737 /* set up to autoclear timer, lsc, and the vectors */
738 mask
= IXGBE_EIMS_ENABLE_MASK
;
739 mask
&= ~IXGBE_EIMS_OTHER
;
740 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EIAC
, mask
);
747 latency_invalid
= 255
751 * ixgbe_update_itr - update the dynamic ITR value based on statistics
752 * @adapter: pointer to adapter
753 * @eitr: eitr setting (ints per sec) to give last timeslice
754 * @itr_setting: current throttle rate in ints/second
755 * @packets: the number of packets during this measurement interval
756 * @bytes: the number of bytes during this measurement interval
758 * Stores a new ITR value based on packets and byte
759 * counts during the last interrupt. The advantage of per interrupt
760 * computation is faster updates and more accurate ITR for the current
761 * traffic pattern. Constants in this function were computed
762 * based on theoretical maximum wire speed and thresholds were set based
763 * on testing data as well as attempting to minimize response time
764 * while increasing bulk throughput.
765 * this functionality is controlled by the InterruptThrottleRate module
766 * parameter (see ixgbe_param.c)
768 static u8
ixgbe_update_itr(struct ixgbe_adapter
*adapter
,
769 u32 eitr
, u8 itr_setting
,
770 int packets
, int bytes
)
772 unsigned int retval
= itr_setting
;
777 goto update_itr_done
;
780 /* simple throttlerate management
781 * 0-20MB/s lowest (100000 ints/s)
782 * 20-100MB/s low (20000 ints/s)
783 * 100-1249MB/s bulk (8000 ints/s)
785 /* what was last interrupt timeslice? */
786 timepassed_us
= 1000000/eitr
;
787 bytes_perint
= bytes
/ timepassed_us
; /* bytes/usec */
789 switch (itr_setting
) {
791 if (bytes_perint
> adapter
->eitr_low
)
792 retval
= low_latency
;
795 if (bytes_perint
> adapter
->eitr_high
)
796 retval
= bulk_latency
;
797 else if (bytes_perint
<= adapter
->eitr_low
)
798 retval
= lowest_latency
;
801 if (bytes_perint
<= adapter
->eitr_high
)
802 retval
= low_latency
;
810 static void ixgbe_set_itr_msix(struct ixgbe_q_vector
*q_vector
)
812 struct ixgbe_adapter
*adapter
= q_vector
->adapter
;
813 struct ixgbe_hw
*hw
= &adapter
->hw
;
815 u8 current_itr
, ret_itr
;
816 int i
, r_idx
, v_idx
= ((void *)q_vector
- (void *)(adapter
->q_vector
)) /
817 sizeof(struct ixgbe_q_vector
);
818 struct ixgbe_ring
*rx_ring
, *tx_ring
;
820 r_idx
= find_first_bit(q_vector
->txr_idx
, adapter
->num_tx_queues
);
821 for (i
= 0; i
< q_vector
->txr_count
; i
++) {
822 tx_ring
= &(adapter
->tx_ring
[r_idx
]);
823 ret_itr
= ixgbe_update_itr(adapter
, q_vector
->eitr
,
825 tx_ring
->total_packets
,
826 tx_ring
->total_bytes
);
827 /* if the result for this queue would decrease interrupt
828 * rate for this vector then use that result */
829 q_vector
->tx_eitr
= ((q_vector
->tx_eitr
> ret_itr
) ?
830 q_vector
->tx_eitr
- 1 : ret_itr
);
831 r_idx
= find_next_bit(q_vector
->txr_idx
, adapter
->num_tx_queues
,
835 r_idx
= find_first_bit(q_vector
->rxr_idx
, adapter
->num_rx_queues
);
836 for (i
= 0; i
< q_vector
->rxr_count
; i
++) {
837 rx_ring
= &(adapter
->rx_ring
[r_idx
]);
838 ret_itr
= ixgbe_update_itr(adapter
, q_vector
->eitr
,
840 rx_ring
->total_packets
,
841 rx_ring
->total_bytes
);
842 /* if the result for this queue would decrease interrupt
843 * rate for this vector then use that result */
844 q_vector
->rx_eitr
= ((q_vector
->rx_eitr
> ret_itr
) ?
845 q_vector
->rx_eitr
- 1 : ret_itr
);
846 r_idx
= find_next_bit(q_vector
->rxr_idx
, adapter
->num_rx_queues
,
850 current_itr
= max(q_vector
->rx_eitr
, q_vector
->tx_eitr
);
852 switch (current_itr
) {
853 /* counts and packets in update_itr are dependent on these numbers */
858 new_itr
= 20000; /* aka hwitr = ~200 */
866 if (new_itr
!= q_vector
->eitr
) {
868 /* do an exponential smoothing */
869 new_itr
= ((q_vector
->eitr
* 90)/100) + ((new_itr
* 10)/100);
870 q_vector
->eitr
= new_itr
;
871 itr_reg
= EITR_INTS_PER_SEC_TO_REG(new_itr
);
872 /* must write high and low 16 bits to reset counter */
873 DPRINTK(TX_ERR
, DEBUG
, "writing eitr(%d): %08X\n", v_idx
,
875 IXGBE_WRITE_REG(hw
, IXGBE_EITR(v_idx
), itr_reg
| (itr_reg
)<<16);
881 static irqreturn_t
ixgbe_msix_lsc(int irq
, void *data
)
883 struct net_device
*netdev
= data
;
884 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
885 struct ixgbe_hw
*hw
= &adapter
->hw
;
886 u32 eicr
= IXGBE_READ_REG(hw
, IXGBE_EICR
);
888 if (eicr
& IXGBE_EICR_LSC
) {
890 if (!test_bit(__IXGBE_DOWN
, &adapter
->state
))
891 mod_timer(&adapter
->watchdog_timer
, jiffies
);
894 if (!test_bit(__IXGBE_DOWN
, &adapter
->state
))
895 IXGBE_WRITE_REG(hw
, IXGBE_EIMS
, IXGBE_EIMS_OTHER
);
900 static irqreturn_t
ixgbe_msix_clean_tx(int irq
, void *data
)
902 struct ixgbe_q_vector
*q_vector
= data
;
903 struct ixgbe_adapter
*adapter
= q_vector
->adapter
;
904 struct ixgbe_ring
*txr
;
907 if (!q_vector
->txr_count
)
910 r_idx
= find_first_bit(q_vector
->txr_idx
, adapter
->num_tx_queues
);
911 for (i
= 0; i
< q_vector
->txr_count
; i
++) {
912 txr
= &(adapter
->tx_ring
[r_idx
]);
914 if (adapter
->flags
& IXGBE_FLAG_DCA_ENABLED
)
915 ixgbe_update_tx_dca(adapter
, txr
);
917 txr
->total_bytes
= 0;
918 txr
->total_packets
= 0;
919 ixgbe_clean_tx_irq(adapter
, txr
);
920 r_idx
= find_next_bit(q_vector
->txr_idx
, adapter
->num_tx_queues
,
928 * ixgbe_msix_clean_rx - single unshared vector rx clean (all queues)
930 * @data: pointer to our q_vector struct for this interrupt vector
932 static irqreturn_t
ixgbe_msix_clean_rx(int irq
, void *data
)
934 struct ixgbe_q_vector
*q_vector
= data
;
935 struct ixgbe_adapter
*adapter
= q_vector
->adapter
;
936 struct ixgbe_ring
*rxr
;
939 r_idx
= find_first_bit(q_vector
->rxr_idx
, adapter
->num_rx_queues
);
940 if (!q_vector
->rxr_count
)
943 rxr
= &(adapter
->rx_ring
[r_idx
]);
944 /* disable interrupts on this vector only */
945 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EIMC
, rxr
->v_idx
);
946 rxr
->total_bytes
= 0;
947 rxr
->total_packets
= 0;
948 netif_rx_schedule(adapter
->netdev
, &q_vector
->napi
);
953 static irqreturn_t
ixgbe_msix_clean_many(int irq
, void *data
)
955 ixgbe_msix_clean_rx(irq
, data
);
956 ixgbe_msix_clean_tx(irq
, data
);
962 * ixgbe_clean_rxonly - msix (aka one shot) rx clean routine
963 * @napi: napi struct with our devices info in it
964 * @budget: amount of work driver is allowed to do this pass, in packets
967 static int ixgbe_clean_rxonly(struct napi_struct
*napi
, int budget
)
969 struct ixgbe_q_vector
*q_vector
=
970 container_of(napi
, struct ixgbe_q_vector
, napi
);
971 struct ixgbe_adapter
*adapter
= q_vector
->adapter
;
972 struct ixgbe_ring
*rxr
;
976 r_idx
= find_first_bit(q_vector
->rxr_idx
, adapter
->num_rx_queues
);
977 rxr
= &(adapter
->rx_ring
[r_idx
]);
979 if (adapter
->flags
& IXGBE_FLAG_DCA_ENABLED
)
980 ixgbe_update_rx_dca(adapter
, rxr
);
983 ixgbe_clean_rx_irq(adapter
, rxr
, &work_done
, budget
);
985 /* If all Rx work done, exit the polling mode */
986 if (work_done
< budget
) {
987 netif_rx_complete(adapter
->netdev
, napi
);
988 if (adapter
->rx_eitr
< IXGBE_MIN_ITR_USECS
)
989 ixgbe_set_itr_msix(q_vector
);
990 if (!test_bit(__IXGBE_DOWN
, &adapter
->state
))
991 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EIMS
, rxr
->v_idx
);
997 static inline void map_vector_to_rxq(struct ixgbe_adapter
*a
, int v_idx
,
1000 a
->q_vector
[v_idx
].adapter
= a
;
1001 set_bit(r_idx
, a
->q_vector
[v_idx
].rxr_idx
);
1002 a
->q_vector
[v_idx
].rxr_count
++;
1003 a
->rx_ring
[r_idx
].v_idx
= 1 << v_idx
;
1006 static inline void map_vector_to_txq(struct ixgbe_adapter
*a
, int v_idx
,
1009 a
->q_vector
[v_idx
].adapter
= a
;
1010 set_bit(r_idx
, a
->q_vector
[v_idx
].txr_idx
);
1011 a
->q_vector
[v_idx
].txr_count
++;
1012 a
->tx_ring
[r_idx
].v_idx
= 1 << v_idx
;
1016 * ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors
1017 * @adapter: board private structure to initialize
1018 * @vectors: allotted vector count for descriptor rings
1020 * This function maps descriptor rings to the queue-specific vectors
1021 * we were allotted through the MSI-X enabling code. Ideally, we'd have
1022 * one vector per ring/queue, but on a constrained vector budget, we
1023 * group the rings as "efficiently" as possible. You would add new
1024 * mapping configurations in here.
1026 static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter
*adapter
,
1030 int rxr_idx
= 0, txr_idx
= 0;
1031 int rxr_remaining
= adapter
->num_rx_queues
;
1032 int txr_remaining
= adapter
->num_tx_queues
;
1037 /* No mapping required if MSI-X is disabled. */
1038 if (!(adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
))
1042 * The ideal configuration...
1043 * We have enough vectors to map one per queue.
1045 if (vectors
== adapter
->num_rx_queues
+ adapter
->num_tx_queues
) {
1046 for (; rxr_idx
< rxr_remaining
; v_start
++, rxr_idx
++)
1047 map_vector_to_rxq(adapter
, v_start
, rxr_idx
);
1049 for (; txr_idx
< txr_remaining
; v_start
++, txr_idx
++)
1050 map_vector_to_txq(adapter
, v_start
, txr_idx
);
1056 * If we don't have enough vectors for a 1-to-1
1057 * mapping, we'll have to group them so there are
1058 * multiple queues per vector.
1060 /* Re-adjusting *qpv takes care of the remainder. */
1061 for (i
= v_start
; i
< vectors
; i
++) {
1062 rqpv
= DIV_ROUND_UP(rxr_remaining
, vectors
- i
);
1063 for (j
= 0; j
< rqpv
; j
++) {
1064 map_vector_to_rxq(adapter
, i
, rxr_idx
);
1069 for (i
= v_start
; i
< vectors
; i
++) {
1070 tqpv
= DIV_ROUND_UP(txr_remaining
, vectors
- i
);
1071 for (j
= 0; j
< tqpv
; j
++) {
1072 map_vector_to_txq(adapter
, i
, txr_idx
);
1083 * ixgbe_request_msix_irqs - Initialize MSI-X interrupts
1084 * @adapter: board private structure
1086 * ixgbe_request_msix_irqs allocates MSI-X vectors and requests
1087 * interrupts from the kernel.
1089 static int ixgbe_request_msix_irqs(struct ixgbe_adapter
*adapter
)
1091 struct net_device
*netdev
= adapter
->netdev
;
1092 irqreturn_t (*handler
)(int, void *);
1093 int i
, vector
, q_vectors
, err
;
1095 /* Decrement for Other and TCP Timer vectors */
1096 q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
1098 /* Map the Tx/Rx rings to the vectors we were allotted. */
1099 err
= ixgbe_map_rings_to_vectors(adapter
, q_vectors
);
1103 #define SET_HANDLER(_v) ((!(_v)->rxr_count) ? &ixgbe_msix_clean_tx : \
1104 (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \
1105 &ixgbe_msix_clean_many)
1106 for (vector
= 0; vector
< q_vectors
; vector
++) {
1107 handler
= SET_HANDLER(&adapter
->q_vector
[vector
]);
1108 sprintf(adapter
->name
[vector
], "%s:v%d-%s",
1109 netdev
->name
, vector
,
1110 (handler
== &ixgbe_msix_clean_rx
) ? "Rx" :
1111 ((handler
== &ixgbe_msix_clean_tx
) ? "Tx" : "TxRx"));
1112 err
= request_irq(adapter
->msix_entries
[vector
].vector
,
1113 handler
, 0, adapter
->name
[vector
],
1114 &(adapter
->q_vector
[vector
]));
1117 "request_irq failed for MSIX interrupt "
1118 "Error: %d\n", err
);
1119 goto free_queue_irqs
;
1123 sprintf(adapter
->name
[vector
], "%s:lsc", netdev
->name
);
1124 err
= request_irq(adapter
->msix_entries
[vector
].vector
,
1125 &ixgbe_msix_lsc
, 0, adapter
->name
[vector
], netdev
);
1128 "request_irq for msix_lsc failed: %d\n", err
);
1129 goto free_queue_irqs
;
1135 for (i
= vector
- 1; i
>= 0; i
--)
1136 free_irq(adapter
->msix_entries
[--vector
].vector
,
1137 &(adapter
->q_vector
[i
]));
1138 adapter
->flags
&= ~IXGBE_FLAG_MSIX_ENABLED
;
1139 pci_disable_msix(adapter
->pdev
);
1140 kfree(adapter
->msix_entries
);
1141 adapter
->msix_entries
= NULL
;
1146 static void ixgbe_set_itr(struct ixgbe_adapter
*adapter
)
1148 struct ixgbe_hw
*hw
= &adapter
->hw
;
1149 struct ixgbe_q_vector
*q_vector
= adapter
->q_vector
;
1151 u32 new_itr
= q_vector
->eitr
;
1152 struct ixgbe_ring
*rx_ring
= &adapter
->rx_ring
[0];
1153 struct ixgbe_ring
*tx_ring
= &adapter
->tx_ring
[0];
1155 q_vector
->tx_eitr
= ixgbe_update_itr(adapter
, new_itr
,
1157 tx_ring
->total_packets
,
1158 tx_ring
->total_bytes
);
1159 q_vector
->rx_eitr
= ixgbe_update_itr(adapter
, new_itr
,
1161 rx_ring
->total_packets
,
1162 rx_ring
->total_bytes
);
1164 current_itr
= max(q_vector
->rx_eitr
, q_vector
->tx_eitr
);
1166 switch (current_itr
) {
1167 /* counts and packets in update_itr are dependent on these numbers */
1168 case lowest_latency
:
1172 new_itr
= 20000; /* aka hwitr = ~200 */
1181 if (new_itr
!= q_vector
->eitr
) {
1183 /* do an exponential smoothing */
1184 new_itr
= ((q_vector
->eitr
* 90)/100) + ((new_itr
* 10)/100);
1185 q_vector
->eitr
= new_itr
;
1186 itr_reg
= EITR_INTS_PER_SEC_TO_REG(new_itr
);
1187 /* must write high and low 16 bits to reset counter */
1188 IXGBE_WRITE_REG(hw
, IXGBE_EITR(0), itr_reg
| (itr_reg
)<<16);
1194 static inline void ixgbe_irq_enable(struct ixgbe_adapter
*adapter
);
1197 * ixgbe_intr - legacy mode Interrupt Handler
1198 * @irq: interrupt number
1199 * @data: pointer to a network interface device structure
1200 * @pt_regs: CPU registers structure
1202 static irqreturn_t
ixgbe_intr(int irq
, void *data
)
1204 struct net_device
*netdev
= data
;
1205 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
1206 struct ixgbe_hw
*hw
= &adapter
->hw
;
1210 /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read
1211 * therefore no explict interrupt disable is necessary */
1212 eicr
= IXGBE_READ_REG(hw
, IXGBE_EICR
);
1214 return IRQ_NONE
; /* Not our interrupt */
1216 if (eicr
& IXGBE_EICR_LSC
) {
1218 if (!test_bit(__IXGBE_DOWN
, &adapter
->state
))
1219 mod_timer(&adapter
->watchdog_timer
, jiffies
);
1223 if (netif_rx_schedule_prep(netdev
, &adapter
->q_vector
[0].napi
)) {
1224 adapter
->tx_ring
[0].total_packets
= 0;
1225 adapter
->tx_ring
[0].total_bytes
= 0;
1226 adapter
->rx_ring
[0].total_packets
= 0;
1227 adapter
->rx_ring
[0].total_bytes
= 0;
1228 /* would disable interrupts here but EIAM disabled it */
1229 __netif_rx_schedule(netdev
, &adapter
->q_vector
[0].napi
);
1235 static inline void ixgbe_reset_q_vectors(struct ixgbe_adapter
*adapter
)
1237 int i
, q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
1239 for (i
= 0; i
< q_vectors
; i
++) {
1240 struct ixgbe_q_vector
*q_vector
= &adapter
->q_vector
[i
];
1241 bitmap_zero(q_vector
->rxr_idx
, MAX_RX_QUEUES
);
1242 bitmap_zero(q_vector
->txr_idx
, MAX_TX_QUEUES
);
1243 q_vector
->rxr_count
= 0;
1244 q_vector
->txr_count
= 0;
1249 * ixgbe_request_irq - initialize interrupts
1250 * @adapter: board private structure
1252 * Attempts to configure interrupts using the best available
1253 * capabilities of the hardware and kernel.
1255 static int ixgbe_request_irq(struct ixgbe_adapter
*adapter
)
1257 struct net_device
*netdev
= adapter
->netdev
;
1260 if (adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
) {
1261 err
= ixgbe_request_msix_irqs(adapter
);
1262 } else if (adapter
->flags
& IXGBE_FLAG_MSI_ENABLED
) {
1263 err
= request_irq(adapter
->pdev
->irq
, &ixgbe_intr
, 0,
1264 netdev
->name
, netdev
);
1266 err
= request_irq(adapter
->pdev
->irq
, &ixgbe_intr
, IRQF_SHARED
,
1267 netdev
->name
, netdev
);
1271 DPRINTK(PROBE
, ERR
, "request_irq failed, Error %d\n", err
);
1276 static void ixgbe_free_irq(struct ixgbe_adapter
*adapter
)
1278 struct net_device
*netdev
= adapter
->netdev
;
1280 if (adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
) {
1283 q_vectors
= adapter
->num_msix_vectors
;
1286 free_irq(adapter
->msix_entries
[i
].vector
, netdev
);
1289 for (; i
>= 0; i
--) {
1290 free_irq(adapter
->msix_entries
[i
].vector
,
1291 &(adapter
->q_vector
[i
]));
1294 ixgbe_reset_q_vectors(adapter
);
1296 free_irq(adapter
->pdev
->irq
, netdev
);
1301 * ixgbe_irq_disable - Mask off interrupt generation on the NIC
1302 * @adapter: board private structure
1304 static inline void ixgbe_irq_disable(struct ixgbe_adapter
*adapter
)
1306 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EIMC
, ~0);
1307 IXGBE_WRITE_FLUSH(&adapter
->hw
);
1308 if (adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
) {
1310 for (i
= 0; i
< adapter
->num_msix_vectors
; i
++)
1311 synchronize_irq(adapter
->msix_entries
[i
].vector
);
1313 synchronize_irq(adapter
->pdev
->irq
);
1318 * ixgbe_irq_enable - Enable default interrupt generation settings
1319 * @adapter: board private structure
1321 static inline void ixgbe_irq_enable(struct ixgbe_adapter
*adapter
)
1324 mask
= IXGBE_EIMS_ENABLE_MASK
;
1325 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EIMS
, mask
);
1326 IXGBE_WRITE_FLUSH(&adapter
->hw
);
1330 * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts
1333 static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter
*adapter
)
1335 struct ixgbe_hw
*hw
= &adapter
->hw
;
1337 IXGBE_WRITE_REG(hw
, IXGBE_EITR(0),
1338 EITR_INTS_PER_SEC_TO_REG(adapter
->rx_eitr
));
1340 ixgbe_set_ivar(adapter
, IXGBE_IVAR_RX_QUEUE(0), 0);
1341 ixgbe_set_ivar(adapter
, IXGBE_IVAR_TX_QUEUE(0), 0);
1343 map_vector_to_rxq(adapter
, 0, 0);
1344 map_vector_to_txq(adapter
, 0, 0);
1346 DPRINTK(HW
, INFO
, "Legacy interrupt IVAR setup done\n");
1350 * ixgbe_configure_tx - Configure 8254x Transmit Unit after Reset
1351 * @adapter: board private structure
1353 * Configure the Tx unit of the MAC after a reset.
1355 static void ixgbe_configure_tx(struct ixgbe_adapter
*adapter
)
1358 struct ixgbe_hw
*hw
= &adapter
->hw
;
1359 u32 i
, j
, tdlen
, txctrl
;
1361 /* Setup the HW Tx Head and Tail descriptor pointers */
1362 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
1363 j
= adapter
->tx_ring
[i
].reg_idx
;
1364 tdba
= adapter
->tx_ring
[i
].dma
;
1365 tdlen
= adapter
->tx_ring
[i
].count
*
1366 sizeof(union ixgbe_adv_tx_desc
);
1367 IXGBE_WRITE_REG(hw
, IXGBE_TDBAL(j
),
1368 (tdba
& DMA_32BIT_MASK
));
1369 IXGBE_WRITE_REG(hw
, IXGBE_TDBAH(j
), (tdba
>> 32));
1370 IXGBE_WRITE_REG(hw
, IXGBE_TDLEN(j
), tdlen
);
1371 IXGBE_WRITE_REG(hw
, IXGBE_TDH(j
), 0);
1372 IXGBE_WRITE_REG(hw
, IXGBE_TDT(j
), 0);
1373 adapter
->tx_ring
[i
].head
= IXGBE_TDH(j
);
1374 adapter
->tx_ring
[i
].tail
= IXGBE_TDT(j
);
1375 /* Disable Tx Head Writeback RO bit, since this hoses
1376 * bookkeeping if things aren't delivered in order.
1378 txctrl
= IXGBE_READ_REG(hw
, IXGBE_DCA_TXCTRL(i
));
1379 txctrl
&= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN
;
1380 IXGBE_WRITE_REG(hw
, IXGBE_DCA_TXCTRL(i
), txctrl
);
1384 #define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
1385 (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
1387 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1389 * ixgbe_get_skb_hdr - helper function for LRO header processing
1390 * @skb: pointer to sk_buff to be added to LRO packet
1391 * @iphdr: pointer to tcp header structure
1392 * @tcph: pointer to tcp header structure
1393 * @hdr_flags: pointer to header flags
1394 * @priv: private data
1396 static int ixgbe_get_skb_hdr(struct sk_buff
*skb
, void **iphdr
, void **tcph
,
1397 u64
*hdr_flags
, void *priv
)
1399 union ixgbe_adv_rx_desc
*rx_desc
= priv
;
1401 /* Verify that this is a valid IPv4 TCP packet */
1402 if (!(rx_desc
->wb
.lower
.lo_dword
.pkt_info
&
1403 (IXGBE_RXDADV_PKTTYPE_IPV4
| IXGBE_RXDADV_PKTTYPE_TCP
)))
1406 /* Set network headers */
1407 skb_reset_network_header(skb
);
1408 skb_set_transport_header(skb
, ip_hdrlen(skb
));
1409 *iphdr
= ip_hdr(skb
);
1410 *tcph
= tcp_hdr(skb
);
1411 *hdr_flags
= LRO_IPV4
| LRO_TCP
;
1416 * ixgbe_configure_rx - Configure 8254x Receive Unit after Reset
1417 * @adapter: board private structure
1419 * Configure the Rx unit of the MAC after a reset.
1421 static void ixgbe_configure_rx(struct ixgbe_adapter
*adapter
)
1424 struct ixgbe_hw
*hw
= &adapter
->hw
;
1425 struct net_device
*netdev
= adapter
->netdev
;
1426 int max_frame
= netdev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
;
1428 u32 rdlen
, rxctrl
, rxcsum
;
1432 u32 reta
= 0, mrqc
, srrctl
;
1434 /* Decide whether to use packet split mode or not */
1435 if (netdev
->mtu
> ETH_DATA_LEN
)
1436 adapter
->flags
|= IXGBE_FLAG_RX_PS_ENABLED
;
1438 adapter
->flags
&= ~IXGBE_FLAG_RX_PS_ENABLED
;
1440 /* Set the RX buffer length according to the mode */
1441 if (adapter
->flags
& IXGBE_FLAG_RX_PS_ENABLED
) {
1442 adapter
->rx_buf_len
= IXGBE_RX_HDR_SIZE
;
1444 if (netdev
->mtu
<= ETH_DATA_LEN
)
1445 adapter
->rx_buf_len
= MAXIMUM_ETHERNET_VLAN_SIZE
;
1447 adapter
->rx_buf_len
= ALIGN(max_frame
, 1024);
1450 fctrl
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_FCTRL
);
1451 fctrl
|= IXGBE_FCTRL_BAM
;
1452 fctrl
|= IXGBE_FCTRL_DPF
; /* discard pause frames when FC enabled */
1453 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_FCTRL
, fctrl
);
1455 hlreg0
= IXGBE_READ_REG(hw
, IXGBE_HLREG0
);
1456 if (adapter
->netdev
->mtu
<= ETH_DATA_LEN
)
1457 hlreg0
&= ~IXGBE_HLREG0_JUMBOEN
;
1459 hlreg0
|= IXGBE_HLREG0_JUMBOEN
;
1460 IXGBE_WRITE_REG(hw
, IXGBE_HLREG0
, hlreg0
);
1462 pages
= PAGE_USE_COUNT(adapter
->netdev
->mtu
);
1464 srrctl
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_SRRCTL(0));
1465 srrctl
&= ~IXGBE_SRRCTL_BSIZEHDR_MASK
;
1466 srrctl
&= ~IXGBE_SRRCTL_BSIZEPKT_MASK
;
1468 if (adapter
->flags
& IXGBE_FLAG_RX_PS_ENABLED
) {
1469 srrctl
|= PAGE_SIZE
>> IXGBE_SRRCTL_BSIZEPKT_SHIFT
;
1470 srrctl
|= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS
;
1471 srrctl
|= ((IXGBE_RX_HDR_SIZE
<<
1472 IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT
) &
1473 IXGBE_SRRCTL_BSIZEHDR_MASK
);
1475 srrctl
|= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF
;
1477 if (adapter
->rx_buf_len
== MAXIMUM_ETHERNET_VLAN_SIZE
)
1479 IXGBE_RXBUFFER_2048
>> IXGBE_SRRCTL_BSIZEPKT_SHIFT
;
1482 adapter
->rx_buf_len
>> IXGBE_SRRCTL_BSIZEPKT_SHIFT
;
1484 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_SRRCTL(0), srrctl
);
1486 rdlen
= adapter
->rx_ring
[0].count
* sizeof(union ixgbe_adv_rx_desc
);
1487 /* disable receives while setting up the descriptors */
1488 rxctrl
= IXGBE_READ_REG(hw
, IXGBE_RXCTRL
);
1489 IXGBE_WRITE_REG(hw
, IXGBE_RXCTRL
, rxctrl
& ~IXGBE_RXCTRL_RXEN
);
1491 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1492 * the Base and Length of the Rx Descriptor Ring */
1493 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
1494 rdba
= adapter
->rx_ring
[i
].dma
;
1495 IXGBE_WRITE_REG(hw
, IXGBE_RDBAL(i
), (rdba
& DMA_32BIT_MASK
));
1496 IXGBE_WRITE_REG(hw
, IXGBE_RDBAH(i
), (rdba
>> 32));
1497 IXGBE_WRITE_REG(hw
, IXGBE_RDLEN(i
), rdlen
);
1498 IXGBE_WRITE_REG(hw
, IXGBE_RDH(i
), 0);
1499 IXGBE_WRITE_REG(hw
, IXGBE_RDT(i
), 0);
1500 adapter
->rx_ring
[i
].head
= IXGBE_RDH(i
);
1501 adapter
->rx_ring
[i
].tail
= IXGBE_RDT(i
);
1504 /* Intitial LRO Settings */
1505 adapter
->rx_ring
[i
].lro_mgr
.max_aggr
= IXGBE_MAX_LRO_AGGREGATE
;
1506 adapter
->rx_ring
[i
].lro_mgr
.max_desc
= IXGBE_MAX_LRO_DESCRIPTORS
;
1507 adapter
->rx_ring
[i
].lro_mgr
.get_skb_header
= ixgbe_get_skb_hdr
;
1508 adapter
->rx_ring
[i
].lro_mgr
.features
= LRO_F_EXTRACT_VLAN_ID
;
1509 if (!(adapter
->flags
& IXGBE_FLAG_IN_NETPOLL
))
1510 adapter
->rx_ring
[i
].lro_mgr
.features
|= LRO_F_NAPI
;
1511 adapter
->rx_ring
[i
].lro_mgr
.dev
= adapter
->netdev
;
1512 adapter
->rx_ring
[i
].lro_mgr
.ip_summed
= CHECKSUM_UNNECESSARY
;
1513 adapter
->rx_ring
[i
].lro_mgr
.ip_summed_aggr
= CHECKSUM_UNNECESSARY
;
1515 if (adapter
->flags
& IXGBE_FLAG_RSS_ENABLED
) {
1516 /* Fill out redirection table */
1517 for (i
= 0, j
= 0; i
< 128; i
++, j
++) {
1518 if (j
== adapter
->ring_feature
[RING_F_RSS
].indices
)
1520 /* reta = 4-byte sliding window of
1521 * 0x00..(indices-1)(indices-1)00..etc. */
1522 reta
= (reta
<< 8) | (j
* 0x11);
1524 IXGBE_WRITE_REG(hw
, IXGBE_RETA(i
>> 2), reta
);
1527 /* Fill out hash function seeds */
1528 /* XXX use a random constant here to glue certain flows */
1529 get_random_bytes(&random
[0], 40);
1530 for (i
= 0; i
< 10; i
++)
1531 IXGBE_WRITE_REG(hw
, IXGBE_RSSRK(i
), random
[i
]);
1533 mrqc
= IXGBE_MRQC_RSSEN
1534 /* Perform hash on these packet types */
1535 | IXGBE_MRQC_RSS_FIELD_IPV4
1536 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
1537 | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
1538 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
1539 | IXGBE_MRQC_RSS_FIELD_IPV6_EX
1540 | IXGBE_MRQC_RSS_FIELD_IPV6
1541 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
1542 | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
1543 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP
;
1544 IXGBE_WRITE_REG(hw
, IXGBE_MRQC
, mrqc
);
1547 rxcsum
= IXGBE_READ_REG(hw
, IXGBE_RXCSUM
);
1549 if (adapter
->flags
& IXGBE_FLAG_RSS_ENABLED
||
1550 adapter
->flags
& IXGBE_FLAG_RX_CSUM_ENABLED
) {
1551 /* Disable indicating checksum in descriptor, enables
1553 rxcsum
|= IXGBE_RXCSUM_PCSD
;
1555 if (!(rxcsum
& IXGBE_RXCSUM_PCSD
)) {
1556 /* Enable IPv4 payload checksum for UDP fragments
1557 * if PCSD is not set */
1558 rxcsum
|= IXGBE_RXCSUM_IPPCSE
;
1561 IXGBE_WRITE_REG(hw
, IXGBE_RXCSUM
, rxcsum
);
1564 static void ixgbe_vlan_rx_register(struct net_device
*netdev
,
1565 struct vlan_group
*grp
)
1567 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
1570 if (!test_bit(__IXGBE_DOWN
, &adapter
->state
))
1571 ixgbe_irq_disable(adapter
);
1572 adapter
->vlgrp
= grp
;
1575 /* enable VLAN tag insert/strip */
1576 ctrl
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_VLNCTRL
);
1577 ctrl
|= IXGBE_VLNCTRL_VME
;
1578 if (!(netdev
->flags
& IFF_PROMISC
))
1579 ctrl
|= IXGBE_VLNCTRL_VFE
;
1580 ctrl
&= ~IXGBE_VLNCTRL_CFIEN
;
1581 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_VLNCTRL
, ctrl
);
1584 if (!test_bit(__IXGBE_DOWN
, &adapter
->state
))
1585 ixgbe_irq_enable(adapter
);
1588 static void ixgbe_vlan_rx_add_vid(struct net_device
*netdev
, u16 vid
)
1590 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
1592 /* add VID to filter table */
1593 ixgbe_set_vfta(&adapter
->hw
, vid
, 0, true);
1596 static void ixgbe_vlan_rx_kill_vid(struct net_device
*netdev
, u16 vid
)
1598 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
1600 if (!test_bit(__IXGBE_DOWN
, &adapter
->state
))
1601 ixgbe_irq_disable(adapter
);
1603 vlan_group_set_device(adapter
->vlgrp
, vid
, NULL
);
1605 if (!test_bit(__IXGBE_DOWN
, &adapter
->state
))
1606 ixgbe_irq_enable(adapter
);
1608 /* remove VID from filter table */
1609 ixgbe_set_vfta(&adapter
->hw
, vid
, 0, false);
1612 static void ixgbe_restore_vlan(struct ixgbe_adapter
*adapter
)
1614 ixgbe_vlan_rx_register(adapter
->netdev
, adapter
->vlgrp
);
1616 if (adapter
->vlgrp
) {
1618 for (vid
= 0; vid
< VLAN_GROUP_ARRAY_LEN
; vid
++) {
1619 if (!vlan_group_get_device(adapter
->vlgrp
, vid
))
1621 ixgbe_vlan_rx_add_vid(adapter
->netdev
, vid
);
1627 * ixgbe_set_multi - Multicast and Promiscuous mode set
1628 * @netdev: network interface device structure
1630 * The set_multi entry point is called whenever the multicast address
1631 * list or the network interface flags are updated. This routine is
1632 * responsible for configuring the hardware for proper multicast,
1633 * promiscuous mode, and all-multi behavior.
1635 static void ixgbe_set_multi(struct net_device
*netdev
)
1637 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
1638 struct ixgbe_hw
*hw
= &adapter
->hw
;
1639 struct dev_mc_list
*mc_ptr
;
1644 /* Check for Promiscuous and All Multicast modes */
1646 fctrl
= IXGBE_READ_REG(hw
, IXGBE_FCTRL
);
1648 if (netdev
->flags
& IFF_PROMISC
) {
1649 fctrl
|= (IXGBE_FCTRL_UPE
| IXGBE_FCTRL_MPE
);
1650 fctrl
&= ~IXGBE_VLNCTRL_VFE
;
1652 if (netdev
->flags
& IFF_ALLMULTI
) {
1653 fctrl
|= IXGBE_FCTRL_MPE
;
1654 fctrl
&= ~IXGBE_FCTRL_UPE
;
1656 fctrl
&= ~(IXGBE_FCTRL_UPE
| IXGBE_FCTRL_MPE
);
1659 fctrl
|= IXGBE_VLNCTRL_VFE
;
1662 IXGBE_WRITE_REG(hw
, IXGBE_FCTRL
, fctrl
);
1664 if (netdev
->mc_count
) {
1665 mta_list
= kcalloc(netdev
->mc_count
, ETH_ALEN
, GFP_ATOMIC
);
1669 /* Shared function expects packed array of only addresses. */
1670 mc_ptr
= netdev
->mc_list
;
1672 for (i
= 0; i
< netdev
->mc_count
; i
++) {
1675 memcpy(mta_list
+ (i
* ETH_ALEN
), mc_ptr
->dmi_addr
,
1677 mc_ptr
= mc_ptr
->next
;
1680 ixgbe_update_mc_addr_list(hw
, mta_list
, i
, 0);
1683 ixgbe_update_mc_addr_list(hw
, NULL
, 0, 0);
1688 static void ixgbe_napi_enable_all(struct ixgbe_adapter
*adapter
)
1691 struct ixgbe_q_vector
*q_vector
;
1692 int q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
1694 /* legacy and MSI only use one vector */
1695 if (!(adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
))
1698 for (q_idx
= 0; q_idx
< q_vectors
; q_idx
++) {
1699 q_vector
= &adapter
->q_vector
[q_idx
];
1700 if (!q_vector
->rxr_count
)
1702 napi_enable(&q_vector
->napi
);
1706 static void ixgbe_napi_disable_all(struct ixgbe_adapter
*adapter
)
1709 struct ixgbe_q_vector
*q_vector
;
1710 int q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
1712 /* legacy and MSI only use one vector */
1713 if (!(adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
))
1716 for (q_idx
= 0; q_idx
< q_vectors
; q_idx
++) {
1717 q_vector
= &adapter
->q_vector
[q_idx
];
1718 if (!q_vector
->rxr_count
)
1720 napi_disable(&q_vector
->napi
);
1724 static void ixgbe_configure(struct ixgbe_adapter
*adapter
)
1726 struct net_device
*netdev
= adapter
->netdev
;
1729 ixgbe_set_multi(netdev
);
1731 ixgbe_restore_vlan(adapter
);
1733 ixgbe_configure_tx(adapter
);
1734 ixgbe_configure_rx(adapter
);
1735 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
1736 ixgbe_alloc_rx_buffers(adapter
, &adapter
->rx_ring
[i
],
1737 (adapter
->rx_ring
[i
].count
- 1));
1740 static int ixgbe_up_complete(struct ixgbe_adapter
*adapter
)
1742 struct net_device
*netdev
= adapter
->netdev
;
1743 struct ixgbe_hw
*hw
= &adapter
->hw
;
1745 int max_frame
= netdev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
;
1746 u32 txdctl
, rxdctl
, mhadd
;
1749 ixgbe_get_hw_control(adapter
);
1751 if ((adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
) ||
1752 (adapter
->flags
& IXGBE_FLAG_MSI_ENABLED
)) {
1753 if (adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
) {
1754 gpie
= (IXGBE_GPIE_MSIX_MODE
| IXGBE_GPIE_EIAME
|
1755 IXGBE_GPIE_PBA_SUPPORT
| IXGBE_GPIE_OCD
);
1760 /* XXX: to interrupt immediately for EICS writes, enable this */
1761 /* gpie |= IXGBE_GPIE_EIMEN; */
1762 IXGBE_WRITE_REG(hw
, IXGBE_GPIE
, gpie
);
1765 if (!(adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
)) {
1766 /* legacy interrupts, use EIAM to auto-mask when reading EICR,
1767 * specifically only auto mask tx and rx interrupts */
1768 IXGBE_WRITE_REG(hw
, IXGBE_EIAM
, IXGBE_EICS_RTX_QUEUE
);
1771 mhadd
= IXGBE_READ_REG(hw
, IXGBE_MHADD
);
1772 if (max_frame
!= (mhadd
>> IXGBE_MHADD_MFS_SHIFT
)) {
1773 mhadd
&= ~IXGBE_MHADD_MFS_MASK
;
1774 mhadd
|= max_frame
<< IXGBE_MHADD_MFS_SHIFT
;
1776 IXGBE_WRITE_REG(hw
, IXGBE_MHADD
, mhadd
);
1779 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
1780 j
= adapter
->tx_ring
[i
].reg_idx
;
1781 txdctl
= IXGBE_READ_REG(hw
, IXGBE_TXDCTL(j
));
1782 txdctl
|= IXGBE_TXDCTL_ENABLE
;
1783 IXGBE_WRITE_REG(hw
, IXGBE_TXDCTL(j
), txdctl
);
1786 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
1787 j
= adapter
->rx_ring
[i
].reg_idx
;
1788 rxdctl
= IXGBE_READ_REG(hw
, IXGBE_RXDCTL(j
));
1789 /* enable PTHRESH=32 descriptors (half the internal cache)
1790 * and HTHRESH=0 descriptors (to minimize latency on fetch),
1791 * this also removes a pesky rx_no_buffer_count increment */
1793 rxdctl
|= IXGBE_RXDCTL_ENABLE
;
1794 IXGBE_WRITE_REG(hw
, IXGBE_RXDCTL(j
), rxdctl
);
1796 /* enable all receives */
1797 rxdctl
= IXGBE_READ_REG(hw
, IXGBE_RXCTRL
);
1798 rxdctl
|= (IXGBE_RXCTRL_DMBYPS
| IXGBE_RXCTRL_RXEN
);
1799 IXGBE_WRITE_REG(hw
, IXGBE_RXCTRL
, rxdctl
);
1801 if (adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
)
1802 ixgbe_configure_msix(adapter
);
1804 ixgbe_configure_msi_and_legacy(adapter
);
1806 clear_bit(__IXGBE_DOWN
, &adapter
->state
);
1807 ixgbe_napi_enable_all(adapter
);
1809 /* clear any pending interrupts, may auto mask */
1810 IXGBE_READ_REG(hw
, IXGBE_EICR
);
1812 ixgbe_irq_enable(adapter
);
1814 /* bring the link up in the watchdog, this could race with our first
1815 * link up interrupt but shouldn't be a problem */
1816 mod_timer(&adapter
->watchdog_timer
, jiffies
);
1820 void ixgbe_reinit_locked(struct ixgbe_adapter
*adapter
)
1822 WARN_ON(in_interrupt());
1823 while (test_and_set_bit(__IXGBE_RESETTING
, &adapter
->state
))
1825 ixgbe_down(adapter
);
1827 clear_bit(__IXGBE_RESETTING
, &adapter
->state
);
1830 int ixgbe_up(struct ixgbe_adapter
*adapter
)
1832 /* hardware has been reset, we need to reload some things */
1833 ixgbe_configure(adapter
);
1835 return ixgbe_up_complete(adapter
);
1838 void ixgbe_reset(struct ixgbe_adapter
*adapter
)
1840 if (ixgbe_init_hw(&adapter
->hw
))
1841 DPRINTK(PROBE
, ERR
, "Hardware Error\n");
1843 /* reprogram the RAR[0] in case user changed it. */
1844 ixgbe_set_rar(&adapter
->hw
, 0, adapter
->hw
.mac
.addr
, 0, IXGBE_RAH_AV
);
1849 static int ixgbe_resume(struct pci_dev
*pdev
)
1851 struct net_device
*netdev
= pci_get_drvdata(pdev
);
1852 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
1855 pci_set_power_state(pdev
, PCI_D0
);
1856 pci_restore_state(pdev
);
1857 err
= pci_enable_device(pdev
);
1859 printk(KERN_ERR
"ixgbe: Cannot enable PCI device from " \
1863 pci_set_master(pdev
);
1865 pci_enable_wake(pdev
, PCI_D3hot
, 0);
1866 pci_enable_wake(pdev
, PCI_D3cold
, 0);
1868 if (netif_running(netdev
)) {
1869 err
= ixgbe_request_irq(adapter
);
1874 ixgbe_reset(adapter
);
1876 if (netif_running(netdev
))
1879 netif_device_attach(netdev
);
1886 * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
1887 * @adapter: board private structure
1888 * @rx_ring: ring to free buffers from
1890 static void ixgbe_clean_rx_ring(struct ixgbe_adapter
*adapter
,
1891 struct ixgbe_ring
*rx_ring
)
1893 struct pci_dev
*pdev
= adapter
->pdev
;
1897 /* Free all the Rx ring sk_buffs */
1899 for (i
= 0; i
< rx_ring
->count
; i
++) {
1900 struct ixgbe_rx_buffer
*rx_buffer_info
;
1902 rx_buffer_info
= &rx_ring
->rx_buffer_info
[i
];
1903 if (rx_buffer_info
->dma
) {
1904 pci_unmap_single(pdev
, rx_buffer_info
->dma
,
1905 adapter
->rx_buf_len
,
1906 PCI_DMA_FROMDEVICE
);
1907 rx_buffer_info
->dma
= 0;
1909 if (rx_buffer_info
->skb
) {
1910 dev_kfree_skb(rx_buffer_info
->skb
);
1911 rx_buffer_info
->skb
= NULL
;
1913 if (!rx_buffer_info
->page
)
1915 pci_unmap_page(pdev
, rx_buffer_info
->page_dma
, PAGE_SIZE
,
1916 PCI_DMA_FROMDEVICE
);
1917 rx_buffer_info
->page_dma
= 0;
1919 put_page(rx_buffer_info
->page
);
1920 rx_buffer_info
->page
= NULL
;
1923 size
= sizeof(struct ixgbe_rx_buffer
) * rx_ring
->count
;
1924 memset(rx_ring
->rx_buffer_info
, 0, size
);
1926 /* Zero out the descriptor ring */
1927 memset(rx_ring
->desc
, 0, rx_ring
->size
);
1929 rx_ring
->next_to_clean
= 0;
1930 rx_ring
->next_to_use
= 0;
1932 writel(0, adapter
->hw
.hw_addr
+ rx_ring
->head
);
1933 writel(0, adapter
->hw
.hw_addr
+ rx_ring
->tail
);
1937 * ixgbe_clean_tx_ring - Free Tx Buffers
1938 * @adapter: board private structure
1939 * @tx_ring: ring to be cleaned
1941 static void ixgbe_clean_tx_ring(struct ixgbe_adapter
*adapter
,
1942 struct ixgbe_ring
*tx_ring
)
1944 struct ixgbe_tx_buffer
*tx_buffer_info
;
1948 /* Free all the Tx ring sk_buffs */
1950 for (i
= 0; i
< tx_ring
->count
; i
++) {
1951 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
1952 ixgbe_unmap_and_free_tx_resource(adapter
, tx_buffer_info
);
1955 size
= sizeof(struct ixgbe_tx_buffer
) * tx_ring
->count
;
1956 memset(tx_ring
->tx_buffer_info
, 0, size
);
1958 /* Zero out the descriptor ring */
1959 memset(tx_ring
->desc
, 0, tx_ring
->size
);
1961 tx_ring
->next_to_use
= 0;
1962 tx_ring
->next_to_clean
= 0;
1964 writel(0, adapter
->hw
.hw_addr
+ tx_ring
->head
);
1965 writel(0, adapter
->hw
.hw_addr
+ tx_ring
->tail
);
1969 * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues
1970 * @adapter: board private structure
1972 static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter
*adapter
)
1976 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
1977 ixgbe_clean_rx_ring(adapter
, &adapter
->rx_ring
[i
]);
1981 * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues
1982 * @adapter: board private structure
1984 static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter
*adapter
)
1988 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
1989 ixgbe_clean_tx_ring(adapter
, &adapter
->tx_ring
[i
]);
1992 void ixgbe_down(struct ixgbe_adapter
*adapter
)
1994 struct net_device
*netdev
= adapter
->netdev
;
1997 /* signal that we are down to the interrupt handler */
1998 set_bit(__IXGBE_DOWN
, &adapter
->state
);
2000 /* disable receives */
2001 rxctrl
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_RXCTRL
);
2002 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_RXCTRL
,
2003 rxctrl
& ~IXGBE_RXCTRL_RXEN
);
2005 netif_tx_disable(netdev
);
2007 /* disable transmits in the hardware */
2009 /* flush both disables */
2010 IXGBE_WRITE_FLUSH(&adapter
->hw
);
2013 ixgbe_irq_disable(adapter
);
2015 ixgbe_napi_disable_all(adapter
);
2016 del_timer_sync(&adapter
->watchdog_timer
);
2018 netif_carrier_off(netdev
);
2019 netif_stop_queue(netdev
);
2021 if (!pci_channel_offline(adapter
->pdev
))
2022 ixgbe_reset(adapter
);
2023 ixgbe_clean_all_tx_rings(adapter
);
2024 ixgbe_clean_all_rx_rings(adapter
);
2028 static int ixgbe_suspend(struct pci_dev
*pdev
, pm_message_t state
)
2030 struct net_device
*netdev
= pci_get_drvdata(pdev
);
2031 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
2036 netif_device_detach(netdev
);
2038 if (netif_running(netdev
)) {
2039 ixgbe_down(adapter
);
2040 ixgbe_free_irq(adapter
);
2044 retval
= pci_save_state(pdev
);
2049 pci_enable_wake(pdev
, PCI_D3hot
, 0);
2050 pci_enable_wake(pdev
, PCI_D3cold
, 0);
2052 ixgbe_release_hw_control(adapter
);
2054 pci_disable_device(pdev
);
2056 pci_set_power_state(pdev
, pci_choose_state(pdev
, state
));
2061 static void ixgbe_shutdown(struct pci_dev
*pdev
)
2063 ixgbe_suspend(pdev
, PMSG_SUSPEND
);
2067 * ixgbe_poll - NAPI Rx polling callback
2068 * @napi: structure for representing this polling device
2069 * @budget: how many packets driver is allowed to clean
2071 * This function is used for legacy and MSI, NAPI mode
2073 static int ixgbe_poll(struct napi_struct
*napi
, int budget
)
2075 struct ixgbe_q_vector
*q_vector
= container_of(napi
,
2076 struct ixgbe_q_vector
, napi
);
2077 struct ixgbe_adapter
*adapter
= q_vector
->adapter
;
2078 int tx_cleaned
= 0, work_done
= 0;
2081 if (adapter
->flags
& IXGBE_FLAG_DCA_ENABLED
) {
2082 ixgbe_update_tx_dca(adapter
, adapter
->tx_ring
);
2083 ixgbe_update_rx_dca(adapter
, adapter
->rx_ring
);
2087 tx_cleaned
= ixgbe_clean_tx_irq(adapter
, adapter
->tx_ring
);
2088 ixgbe_clean_rx_irq(adapter
, adapter
->rx_ring
, &work_done
, budget
);
2093 /* If budget not fully consumed, exit the polling mode */
2094 if (work_done
< budget
) {
2095 netif_rx_complete(adapter
->netdev
, napi
);
2096 if (adapter
->rx_eitr
< IXGBE_MIN_ITR_USECS
)
2097 ixgbe_set_itr(adapter
);
2098 if (!test_bit(__IXGBE_DOWN
, &adapter
->state
))
2099 ixgbe_irq_enable(adapter
);
2106 * ixgbe_tx_timeout - Respond to a Tx Hang
2107 * @netdev: network interface device structure
2109 static void ixgbe_tx_timeout(struct net_device
*netdev
)
2111 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
2113 /* Do the reset outside of interrupt context */
2114 schedule_work(&adapter
->reset_task
);
2117 static void ixgbe_reset_task(struct work_struct
*work
)
2119 struct ixgbe_adapter
*adapter
;
2120 adapter
= container_of(work
, struct ixgbe_adapter
, reset_task
);
2122 adapter
->tx_timeout_count
++;
2124 ixgbe_reinit_locked(adapter
);
2127 static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter
*adapter
,
2130 int err
, vector_threshold
;
2132 /* We'll want at least 3 (vector_threshold):
2135 * 3) Other (Link Status Change, etc.)
2136 * 4) TCP Timer (optional)
2138 vector_threshold
= MIN_MSIX_COUNT
;
2140 /* The more we get, the more we will assign to Tx/Rx Cleanup
2141 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
2142 * Right now, we simply care about how many we'll get; we'll
2143 * set them up later while requesting irq's.
2145 while (vectors
>= vector_threshold
) {
2146 err
= pci_enable_msix(adapter
->pdev
, adapter
->msix_entries
,
2148 if (!err
) /* Success in acquiring all requested vectors. */
2151 vectors
= 0; /* Nasty failure, quit now */
2152 else /* err == number of vectors we should try again with */
2156 if (vectors
< vector_threshold
) {
2157 /* Can't allocate enough MSI-X interrupts? Oh well.
2158 * This just means we'll go with either a single MSI
2159 * vector or fall back to legacy interrupts.
2161 DPRINTK(HW
, DEBUG
, "Unable to allocate MSI-X interrupts\n");
2162 adapter
->flags
&= ~IXGBE_FLAG_MSIX_ENABLED
;
2163 kfree(adapter
->msix_entries
);
2164 adapter
->msix_entries
= NULL
;
2165 adapter
->flags
&= ~IXGBE_FLAG_RSS_ENABLED
;
2166 adapter
->num_tx_queues
= 1;
2167 adapter
->num_rx_queues
= 1;
2169 adapter
->flags
|= IXGBE_FLAG_MSIX_ENABLED
; /* Woot! */
2170 adapter
->num_msix_vectors
= vectors
;
2174 static void __devinit
ixgbe_set_num_queues(struct ixgbe_adapter
*adapter
)
2177 int feature_mask
= 0, rss_i
, rss_m
;
2179 /* Number of supported queues */
2180 switch (adapter
->hw
.mac
.type
) {
2181 case ixgbe_mac_82598EB
:
2182 rss_i
= adapter
->ring_feature
[RING_F_RSS
].indices
;
2184 feature_mask
|= IXGBE_FLAG_RSS_ENABLED
;
2186 switch (adapter
->flags
& feature_mask
) {
2187 case (IXGBE_FLAG_RSS_ENABLED
):
2201 adapter
->ring_feature
[RING_F_RSS
].indices
= rss_i
;
2202 adapter
->ring_feature
[RING_F_RSS
].mask
= rss_m
;
2210 adapter
->num_rx_queues
= nrq
;
2211 adapter
->num_tx_queues
= ntq
;
2215 * ixgbe_cache_ring_register - Descriptor ring to register mapping
2216 * @adapter: board private structure to initialize
2218 * Once we know the feature-set enabled for the device, we'll cache
2219 * the register offset the descriptor ring is assigned to.
2221 static void __devinit
ixgbe_cache_ring_register(struct ixgbe_adapter
*adapter
)
2223 /* TODO: Remove all uses of the indices in the cases where multiple
2224 * features are OR'd together, if the feature set makes sense.
2226 int feature_mask
= 0, rss_i
;
2227 int i
, txr_idx
, rxr_idx
;
2229 /* Number of supported queues */
2230 switch (adapter
->hw
.mac
.type
) {
2231 case ixgbe_mac_82598EB
:
2232 rss_i
= adapter
->ring_feature
[RING_F_RSS
].indices
;
2235 feature_mask
|= IXGBE_FLAG_RSS_ENABLED
;
2236 switch (adapter
->flags
& feature_mask
) {
2237 case (IXGBE_FLAG_RSS_ENABLED
):
2238 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
2239 adapter
->rx_ring
[i
].reg_idx
= i
;
2240 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
2241 adapter
->tx_ring
[i
].reg_idx
= i
;
2254 * ixgbe_alloc_queues - Allocate memory for all rings
2255 * @adapter: board private structure to initialize
2257 * We allocate one ring per queue at run-time since we don't know the
2258 * number of queues at compile-time. The polling_netdev array is
2259 * intended for Multiqueue, but should work fine with a single queue.
2261 static int __devinit
ixgbe_alloc_queues(struct ixgbe_adapter
*adapter
)
2265 adapter
->tx_ring
= kcalloc(adapter
->num_tx_queues
,
2266 sizeof(struct ixgbe_ring
), GFP_KERNEL
);
2267 if (!adapter
->tx_ring
)
2268 goto err_tx_ring_allocation
;
2270 adapter
->rx_ring
= kcalloc(adapter
->num_rx_queues
,
2271 sizeof(struct ixgbe_ring
), GFP_KERNEL
);
2272 if (!adapter
->rx_ring
)
2273 goto err_rx_ring_allocation
;
2275 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
2276 adapter
->tx_ring
[i
].count
= IXGBE_DEFAULT_TXD
;
2277 adapter
->tx_ring
[i
].queue_index
= i
;
2279 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
2280 adapter
->rx_ring
[i
].count
= IXGBE_DEFAULT_RXD
;
2281 adapter
->rx_ring
[i
].queue_index
= i
;
2284 ixgbe_cache_ring_register(adapter
);
2288 err_rx_ring_allocation
:
2289 kfree(adapter
->tx_ring
);
2290 err_tx_ring_allocation
:
2295 * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported
2296 * @adapter: board private structure to initialize
2298 * Attempt to configure the interrupts using the best available
2299 * capabilities of the hardware and the kernel.
2301 static int __devinit
ixgbe_set_interrupt_capability(struct ixgbe_adapter
2305 int vector
, v_budget
;
2308 * It's easy to be greedy for MSI-X vectors, but it really
2309 * doesn't do us much good if we have a lot more vectors
2310 * than CPU's. So let's be conservative and only ask for
2311 * (roughly) twice the number of vectors as there are CPU's.
2313 v_budget
= min(adapter
->num_rx_queues
+ adapter
->num_tx_queues
,
2314 (int)(num_online_cpus() * 2)) + NON_Q_VECTORS
;
2317 * At the same time, hardware can only support a maximum of
2318 * MAX_MSIX_COUNT vectors. With features such as RSS and VMDq,
2319 * we can easily reach upwards of 64 Rx descriptor queues and
2320 * 32 Tx queues. Thus, we cap it off in those rare cases where
2321 * the cpu count also exceeds our vector limit.
2323 v_budget
= min(v_budget
, MAX_MSIX_COUNT
);
2325 /* A failure in MSI-X entry allocation isn't fatal, but it does
2326 * mean we disable MSI-X capabilities of the adapter. */
2327 adapter
->msix_entries
= kcalloc(v_budget
,
2328 sizeof(struct msix_entry
), GFP_KERNEL
);
2329 if (!adapter
->msix_entries
) {
2330 adapter
->flags
&= ~IXGBE_FLAG_RSS_ENABLED
;
2331 ixgbe_set_num_queues(adapter
);
2332 kfree(adapter
->tx_ring
);
2333 kfree(adapter
->rx_ring
);
2334 err
= ixgbe_alloc_queues(adapter
);
2336 DPRINTK(PROBE
, ERR
, "Unable to allocate memory "
2344 for (vector
= 0; vector
< v_budget
; vector
++)
2345 adapter
->msix_entries
[vector
].entry
= vector
;
2347 ixgbe_acquire_msix_vectors(adapter
, v_budget
);
2349 if (adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
)
2353 err
= pci_enable_msi(adapter
->pdev
);
2355 adapter
->flags
|= IXGBE_FLAG_MSI_ENABLED
;
2357 DPRINTK(HW
, DEBUG
, "Unable to allocate MSI interrupt, "
2358 "falling back to legacy. Error: %d\n", err
);
2364 /* Notify the stack of the (possibly) reduced Tx Queue count. */
2365 adapter
->netdev
->egress_subqueue_count
= adapter
->num_tx_queues
;
2370 static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter
*adapter
)
2372 if (adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
) {
2373 adapter
->flags
&= ~IXGBE_FLAG_MSIX_ENABLED
;
2374 pci_disable_msix(adapter
->pdev
);
2375 kfree(adapter
->msix_entries
);
2376 adapter
->msix_entries
= NULL
;
2377 } else if (adapter
->flags
& IXGBE_FLAG_MSI_ENABLED
) {
2378 adapter
->flags
&= ~IXGBE_FLAG_MSI_ENABLED
;
2379 pci_disable_msi(adapter
->pdev
);
2385 * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme
2386 * @adapter: board private structure to initialize
2388 * We determine which interrupt scheme to use based on...
2389 * - Kernel support (MSI, MSI-X)
2390 * - which can be user-defined (via MODULE_PARAM)
2391 * - Hardware queue count (num_*_queues)
2392 * - defined by miscellaneous hardware support/features (RSS, etc.)
2394 static int __devinit
ixgbe_init_interrupt_scheme(struct ixgbe_adapter
*adapter
)
2398 /* Number of supported queues */
2399 ixgbe_set_num_queues(adapter
);
2401 err
= ixgbe_alloc_queues(adapter
);
2403 DPRINTK(PROBE
, ERR
, "Unable to allocate memory for queues\n");
2404 goto err_alloc_queues
;
2407 err
= ixgbe_set_interrupt_capability(adapter
);
2409 DPRINTK(PROBE
, ERR
, "Unable to setup interrupt capabilities\n");
2410 goto err_set_interrupt
;
2413 DPRINTK(DRV
, INFO
, "Multiqueue %s: Rx Queue count = %u, "
2414 "Tx Queue count = %u\n",
2415 (adapter
->num_rx_queues
> 1) ? "Enabled" :
2416 "Disabled", adapter
->num_rx_queues
, adapter
->num_tx_queues
);
2418 set_bit(__IXGBE_DOWN
, &adapter
->state
);
2423 kfree(adapter
->tx_ring
);
2424 kfree(adapter
->rx_ring
);
2430 * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter)
2431 * @adapter: board private structure to initialize
2433 * ixgbe_sw_init initializes the Adapter private data structure.
2434 * Fields are initialized based on PCI device information and
2435 * OS network device settings (MTU size).
2437 static int __devinit
ixgbe_sw_init(struct ixgbe_adapter
*adapter
)
2439 struct ixgbe_hw
*hw
= &adapter
->hw
;
2440 struct pci_dev
*pdev
= adapter
->pdev
;
2443 /* Set capability flags */
2444 rss
= min(IXGBE_MAX_RSS_INDICES
, (int)num_online_cpus());
2445 adapter
->ring_feature
[RING_F_RSS
].indices
= rss
;
2446 adapter
->flags
|= IXGBE_FLAG_RSS_ENABLED
;
2448 /* Enable Dynamic interrupt throttling by default */
2449 adapter
->rx_eitr
= 1;
2450 adapter
->tx_eitr
= 1;
2452 /* default flow control settings */
2453 hw
->fc
.original_type
= ixgbe_fc_full
;
2454 hw
->fc
.type
= ixgbe_fc_full
;
2456 /* select 10G link by default */
2457 hw
->mac
.link_mode_select
= IXGBE_AUTOC_LMS_10G_LINK_NO_AN
;
2458 if (hw
->mac
.ops
.reset(hw
)) {
2459 dev_err(&pdev
->dev
, "HW Init failed\n");
2462 if (hw
->mac
.ops
.setup_link_speed(hw
, IXGBE_LINK_SPEED_10GB_FULL
, true,
2464 dev_err(&pdev
->dev
, "Link Speed setup failed\n");
2468 /* initialize eeprom parameters */
2469 if (ixgbe_init_eeprom(hw
)) {
2470 dev_err(&pdev
->dev
, "EEPROM initialization failed\n");
2474 /* enable rx csum by default */
2475 adapter
->flags
|= IXGBE_FLAG_RX_CSUM_ENABLED
;
2477 set_bit(__IXGBE_DOWN
, &adapter
->state
);
2483 * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors)
2484 * @adapter: board private structure
2485 * @txdr: tx descriptor ring (for a specific queue) to setup
2487 * Return 0 on success, negative on failure
2489 int ixgbe_setup_tx_resources(struct ixgbe_adapter
*adapter
,
2490 struct ixgbe_ring
*txdr
)
2492 struct pci_dev
*pdev
= adapter
->pdev
;
2495 size
= sizeof(struct ixgbe_tx_buffer
) * txdr
->count
;
2496 txdr
->tx_buffer_info
= vmalloc(size
);
2497 if (!txdr
->tx_buffer_info
) {
2499 "Unable to allocate memory for the transmit descriptor ring\n");
2502 memset(txdr
->tx_buffer_info
, 0, size
);
2504 /* round up to nearest 4K */
2505 txdr
->size
= txdr
->count
* sizeof(union ixgbe_adv_tx_desc
);
2506 txdr
->size
= ALIGN(txdr
->size
, 4096);
2508 txdr
->desc
= pci_alloc_consistent(pdev
, txdr
->size
, &txdr
->dma
);
2510 vfree(txdr
->tx_buffer_info
);
2512 "Memory allocation failed for the tx desc ring\n");
2516 txdr
->next_to_use
= 0;
2517 txdr
->next_to_clean
= 0;
2518 txdr
->work_limit
= txdr
->count
;
2524 * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
2525 * @adapter: board private structure
2526 * @rxdr: rx descriptor ring (for a specific queue) to setup
2528 * Returns 0 on success, negative on failure
2530 int ixgbe_setup_rx_resources(struct ixgbe_adapter
*adapter
,
2531 struct ixgbe_ring
*rxdr
)
2533 struct pci_dev
*pdev
= adapter
->pdev
;
2536 size
= sizeof(struct net_lro_desc
) * IXGBE_MAX_LRO_DESCRIPTORS
;
2537 rxdr
->lro_mgr
.lro_arr
= vmalloc(size
);
2538 if (!rxdr
->lro_mgr
.lro_arr
)
2540 memset(rxdr
->lro_mgr
.lro_arr
, 0, size
);
2542 size
= sizeof(struct ixgbe_rx_buffer
) * rxdr
->count
;
2543 rxdr
->rx_buffer_info
= vmalloc(size
);
2544 if (!rxdr
->rx_buffer_info
) {
2546 "vmalloc allocation failed for the rx desc ring\n");
2549 memset(rxdr
->rx_buffer_info
, 0, size
);
2551 /* Round up to nearest 4K */
2552 rxdr
->size
= rxdr
->count
* sizeof(union ixgbe_adv_rx_desc
);
2553 rxdr
->size
= ALIGN(rxdr
->size
, 4096);
2555 rxdr
->desc
= pci_alloc_consistent(pdev
, rxdr
->size
, &rxdr
->dma
);
2559 "Memory allocation failed for the rx desc ring\n");
2560 vfree(rxdr
->rx_buffer_info
);
2564 rxdr
->next_to_clean
= 0;
2565 rxdr
->next_to_use
= 0;
2570 vfree(rxdr
->lro_mgr
.lro_arr
);
2571 rxdr
->lro_mgr
.lro_arr
= NULL
;
2576 * ixgbe_free_tx_resources - Free Tx Resources per Queue
2577 * @adapter: board private structure
2578 * @tx_ring: Tx descriptor ring for a specific queue
2580 * Free all transmit software resources
2582 static void ixgbe_free_tx_resources(struct ixgbe_adapter
*adapter
,
2583 struct ixgbe_ring
*tx_ring
)
2585 struct pci_dev
*pdev
= adapter
->pdev
;
2587 ixgbe_clean_tx_ring(adapter
, tx_ring
);
2589 vfree(tx_ring
->tx_buffer_info
);
2590 tx_ring
->tx_buffer_info
= NULL
;
2592 pci_free_consistent(pdev
, tx_ring
->size
, tx_ring
->desc
, tx_ring
->dma
);
2594 tx_ring
->desc
= NULL
;
2598 * ixgbe_free_all_tx_resources - Free Tx Resources for All Queues
2599 * @adapter: board private structure
2601 * Free all transmit software resources
2603 static void ixgbe_free_all_tx_resources(struct ixgbe_adapter
*adapter
)
2607 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
2608 ixgbe_free_tx_resources(adapter
, &adapter
->tx_ring
[i
]);
2612 * ixgbe_free_rx_resources - Free Rx Resources
2613 * @adapter: board private structure
2614 * @rx_ring: ring to clean the resources from
2616 * Free all receive software resources
2618 static void ixgbe_free_rx_resources(struct ixgbe_adapter
*adapter
,
2619 struct ixgbe_ring
*rx_ring
)
2621 struct pci_dev
*pdev
= adapter
->pdev
;
2623 vfree(rx_ring
->lro_mgr
.lro_arr
);
2624 rx_ring
->lro_mgr
.lro_arr
= NULL
;
2626 ixgbe_clean_rx_ring(adapter
, rx_ring
);
2628 vfree(rx_ring
->rx_buffer_info
);
2629 rx_ring
->rx_buffer_info
= NULL
;
2631 pci_free_consistent(pdev
, rx_ring
->size
, rx_ring
->desc
, rx_ring
->dma
);
2633 rx_ring
->desc
= NULL
;
2637 * ixgbe_free_all_rx_resources - Free Rx Resources for All Queues
2638 * @adapter: board private structure
2640 * Free all receive software resources
2642 static void ixgbe_free_all_rx_resources(struct ixgbe_adapter
*adapter
)
2646 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
2647 ixgbe_free_rx_resources(adapter
, &adapter
->rx_ring
[i
]);
2651 * ixgbe_setup_all_tx_resources - allocate all queues Tx resources
2652 * @adapter: board private structure
2654 * If this function returns with an error, then it's possible one or
2655 * more of the rings is populated (while the rest are not). It is the
2656 * callers duty to clean those orphaned rings.
2658 * Return 0 on success, negative on failure
2660 static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter
*adapter
)
2664 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
2665 err
= ixgbe_setup_tx_resources(adapter
, &adapter
->tx_ring
[i
]);
2668 "Allocation for Tx Queue %u failed\n", i
);
2677 * ixgbe_setup_all_rx_resources - allocate all queues Rx resources
2678 * @adapter: board private structure
2680 * If this function returns with an error, then it's possible one or
2681 * more of the rings is populated (while the rest are not). It is the
2682 * callers duty to clean those orphaned rings.
2684 * Return 0 on success, negative on failure
2687 static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter
*adapter
)
2691 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
2692 err
= ixgbe_setup_rx_resources(adapter
, &adapter
->rx_ring
[i
]);
2695 "Allocation for Rx Queue %u failed\n", i
);
2704 * ixgbe_change_mtu - Change the Maximum Transfer Unit
2705 * @netdev: network interface device structure
2706 * @new_mtu: new value for maximum frame size
2708 * Returns 0 on success, negative on failure
2710 static int ixgbe_change_mtu(struct net_device
*netdev
, int new_mtu
)
2712 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
2713 int max_frame
= new_mtu
+ ETH_HLEN
+ ETH_FCS_LEN
;
2715 if ((max_frame
< (ETH_ZLEN
+ ETH_FCS_LEN
)) ||
2716 (max_frame
> IXGBE_MAX_JUMBO_FRAME_SIZE
))
2719 DPRINTK(PROBE
, INFO
, "changing MTU from %d to %d\n",
2720 netdev
->mtu
, new_mtu
);
2721 /* must set new MTU before calling down or up */
2722 netdev
->mtu
= new_mtu
;
2724 if (netif_running(netdev
))
2725 ixgbe_reinit_locked(adapter
);
2731 * ixgbe_open - Called when a network interface is made active
2732 * @netdev: network interface device structure
2734 * Returns 0 on success, negative value on failure
2736 * The open entry point is called when a network interface is made
2737 * active by the system (IFF_UP). At this point all resources needed
2738 * for transmit and receive operations are allocated, the interrupt
2739 * handler is registered with the OS, the watchdog timer is started,
2740 * and the stack is notified that the interface is ready.
2742 static int ixgbe_open(struct net_device
*netdev
)
2744 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
2747 /* disallow open during test */
2748 if (test_bit(__IXGBE_TESTING
, &adapter
->state
))
2751 /* allocate transmit descriptors */
2752 err
= ixgbe_setup_all_tx_resources(adapter
);
2756 /* allocate receive descriptors */
2757 err
= ixgbe_setup_all_rx_resources(adapter
);
2761 ixgbe_configure(adapter
);
2763 err
= ixgbe_request_irq(adapter
);
2767 err
= ixgbe_up_complete(adapter
);
2774 ixgbe_release_hw_control(adapter
);
2775 ixgbe_free_irq(adapter
);
2777 ixgbe_free_all_rx_resources(adapter
);
2779 ixgbe_free_all_tx_resources(adapter
);
2781 ixgbe_reset(adapter
);
2787 * ixgbe_close - Disables a network interface
2788 * @netdev: network interface device structure
2790 * Returns 0, this is not allowed to fail
2792 * The close entry point is called when an interface is de-activated
2793 * by the OS. The hardware is still under the drivers control, but
2794 * needs to be disabled. A global MAC reset is issued to stop the
2795 * hardware, and all transmit and receive resources are freed.
2797 static int ixgbe_close(struct net_device
*netdev
)
2799 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
2801 ixgbe_down(adapter
);
2802 ixgbe_free_irq(adapter
);
2804 ixgbe_free_all_tx_resources(adapter
);
2805 ixgbe_free_all_rx_resources(adapter
);
2807 ixgbe_release_hw_control(adapter
);
2813 * ixgbe_update_stats - Update the board statistics counters.
2814 * @adapter: board private structure
2816 void ixgbe_update_stats(struct ixgbe_adapter
*adapter
)
2818 struct ixgbe_hw
*hw
= &adapter
->hw
;
2820 u32 i
, missed_rx
= 0, mpc
, bprc
, lxon
, lxoff
, xon_off_tot
;
2822 adapter
->stats
.crcerrs
+= IXGBE_READ_REG(hw
, IXGBE_CRCERRS
);
2823 for (i
= 0; i
< 8; i
++) {
2824 /* for packet buffers not used, the register should read 0 */
2825 mpc
= IXGBE_READ_REG(hw
, IXGBE_MPC(i
));
2827 adapter
->stats
.mpc
[i
] += mpc
;
2828 total_mpc
+= adapter
->stats
.mpc
[i
];
2829 adapter
->stats
.rnbc
[i
] += IXGBE_READ_REG(hw
, IXGBE_RNBC(i
));
2831 adapter
->stats
.gprc
+= IXGBE_READ_REG(hw
, IXGBE_GPRC
);
2832 /* work around hardware counting issue */
2833 adapter
->stats
.gprc
-= missed_rx
;
2835 /* 82598 hardware only has a 32 bit counter in the high register */
2836 adapter
->stats
.gorc
+= IXGBE_READ_REG(hw
, IXGBE_GORCH
);
2837 adapter
->stats
.gotc
+= IXGBE_READ_REG(hw
, IXGBE_GOTCH
);
2838 adapter
->stats
.tor
+= IXGBE_READ_REG(hw
, IXGBE_TORH
);
2839 bprc
= IXGBE_READ_REG(hw
, IXGBE_BPRC
);
2840 adapter
->stats
.bprc
+= bprc
;
2841 adapter
->stats
.mprc
+= IXGBE_READ_REG(hw
, IXGBE_MPRC
);
2842 adapter
->stats
.mprc
-= bprc
;
2843 adapter
->stats
.roc
+= IXGBE_READ_REG(hw
, IXGBE_ROC
);
2844 adapter
->stats
.prc64
+= IXGBE_READ_REG(hw
, IXGBE_PRC64
);
2845 adapter
->stats
.prc127
+= IXGBE_READ_REG(hw
, IXGBE_PRC127
);
2846 adapter
->stats
.prc255
+= IXGBE_READ_REG(hw
, IXGBE_PRC255
);
2847 adapter
->stats
.prc511
+= IXGBE_READ_REG(hw
, IXGBE_PRC511
);
2848 adapter
->stats
.prc1023
+= IXGBE_READ_REG(hw
, IXGBE_PRC1023
);
2849 adapter
->stats
.prc1522
+= IXGBE_READ_REG(hw
, IXGBE_PRC1522
);
2850 adapter
->stats
.rlec
+= IXGBE_READ_REG(hw
, IXGBE_RLEC
);
2851 adapter
->stats
.lxonrxc
+= IXGBE_READ_REG(hw
, IXGBE_LXONRXC
);
2852 adapter
->stats
.lxoffrxc
+= IXGBE_READ_REG(hw
, IXGBE_LXOFFRXC
);
2853 lxon
= IXGBE_READ_REG(hw
, IXGBE_LXONTXC
);
2854 adapter
->stats
.lxontxc
+= lxon
;
2855 lxoff
= IXGBE_READ_REG(hw
, IXGBE_LXOFFTXC
);
2856 adapter
->stats
.lxofftxc
+= lxoff
;
2857 adapter
->stats
.ruc
+= IXGBE_READ_REG(hw
, IXGBE_RUC
);
2858 adapter
->stats
.gptc
+= IXGBE_READ_REG(hw
, IXGBE_GPTC
);
2859 adapter
->stats
.mptc
+= IXGBE_READ_REG(hw
, IXGBE_MPTC
);
2861 * 82598 errata - tx of flow control packets is included in tx counters
2863 xon_off_tot
= lxon
+ lxoff
;
2864 adapter
->stats
.gptc
-= xon_off_tot
;
2865 adapter
->stats
.mptc
-= xon_off_tot
;
2866 adapter
->stats
.gotc
-= (xon_off_tot
* (ETH_ZLEN
+ ETH_FCS_LEN
));
2867 adapter
->stats
.ruc
+= IXGBE_READ_REG(hw
, IXGBE_RUC
);
2868 adapter
->stats
.rfc
+= IXGBE_READ_REG(hw
, IXGBE_RFC
);
2869 adapter
->stats
.rjc
+= IXGBE_READ_REG(hw
, IXGBE_RJC
);
2870 adapter
->stats
.tpr
+= IXGBE_READ_REG(hw
, IXGBE_TPR
);
2871 adapter
->stats
.ptc64
+= IXGBE_READ_REG(hw
, IXGBE_PTC64
);
2872 adapter
->stats
.ptc64
-= xon_off_tot
;
2873 adapter
->stats
.ptc127
+= IXGBE_READ_REG(hw
, IXGBE_PTC127
);
2874 adapter
->stats
.ptc255
+= IXGBE_READ_REG(hw
, IXGBE_PTC255
);
2875 adapter
->stats
.ptc511
+= IXGBE_READ_REG(hw
, IXGBE_PTC511
);
2876 adapter
->stats
.ptc1023
+= IXGBE_READ_REG(hw
, IXGBE_PTC1023
);
2877 adapter
->stats
.ptc1522
+= IXGBE_READ_REG(hw
, IXGBE_PTC1522
);
2878 adapter
->stats
.bptc
+= IXGBE_READ_REG(hw
, IXGBE_BPTC
);
2880 /* Fill out the OS statistics structure */
2881 adapter
->net_stats
.multicast
= adapter
->stats
.mprc
;
2884 adapter
->net_stats
.rx_errors
= adapter
->stats
.crcerrs
+
2885 adapter
->stats
.rlec
;
2886 adapter
->net_stats
.rx_dropped
= 0;
2887 adapter
->net_stats
.rx_length_errors
= adapter
->stats
.rlec
;
2888 adapter
->net_stats
.rx_crc_errors
= adapter
->stats
.crcerrs
;
2889 adapter
->net_stats
.rx_missed_errors
= total_mpc
;
2893 * ixgbe_watchdog - Timer Call-back
2894 * @data: pointer to adapter cast into an unsigned long
2896 static void ixgbe_watchdog(unsigned long data
)
2898 struct ixgbe_adapter
*adapter
= (struct ixgbe_adapter
*)data
;
2899 struct net_device
*netdev
= adapter
->netdev
;
2904 adapter
->hw
.mac
.ops
.check_link(&adapter
->hw
, &(link_speed
), &link_up
);
2907 if (!netif_carrier_ok(netdev
)) {
2908 u32 frctl
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_FCTRL
);
2909 u32 rmcs
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_RMCS
);
2910 #define FLOW_RX (frctl & IXGBE_FCTRL_RFCE)
2911 #define FLOW_TX (rmcs & IXGBE_RMCS_TFCE_802_3X)
2912 DPRINTK(LINK
, INFO
, "NIC Link is Up %s, "
2913 "Flow Control: %s\n",
2914 (link_speed
== IXGBE_LINK_SPEED_10GB_FULL
?
2916 (link_speed
== IXGBE_LINK_SPEED_1GB_FULL
?
2917 "1 Gbps" : "unknown speed")),
2918 ((FLOW_RX
&& FLOW_TX
) ? "RX/TX" :
2920 (FLOW_TX
? "TX" : "None"))));
2922 netif_carrier_on(netdev
);
2923 netif_wake_queue(netdev
);
2924 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
2925 netif_wake_subqueue(netdev
, i
);
2927 /* Force detection of hung controller */
2928 adapter
->detect_tx_hung
= true;
2931 if (netif_carrier_ok(netdev
)) {
2932 DPRINTK(LINK
, INFO
, "NIC Link is Down\n");
2933 netif_carrier_off(netdev
);
2934 netif_stop_queue(netdev
);
2938 ixgbe_update_stats(adapter
);
2940 if (!test_bit(__IXGBE_DOWN
, &adapter
->state
)) {
2941 /* Cause software interrupt to ensure rx rings are cleaned */
2942 if (adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
) {
2944 (1 << (adapter
->num_msix_vectors
- NON_Q_VECTORS
)) - 1;
2945 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EICS
, eics
);
2947 /* for legacy and MSI interrupts don't set any bits that
2948 * are enabled for EIAM, because this operation would
2949 * set *both* EIMS and EICS for any bit in EIAM */
2950 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EICS
,
2951 (IXGBE_EICS_TCP_TIMER
| IXGBE_EICS_OTHER
));
2953 /* Reset the timer */
2954 mod_timer(&adapter
->watchdog_timer
,
2955 round_jiffies(jiffies
+ 2 * HZ
));
2959 static int ixgbe_tso(struct ixgbe_adapter
*adapter
,
2960 struct ixgbe_ring
*tx_ring
, struct sk_buff
*skb
,
2961 u32 tx_flags
, u8
*hdr_len
)
2963 struct ixgbe_adv_tx_context_desc
*context_desc
;
2966 struct ixgbe_tx_buffer
*tx_buffer_info
;
2967 u32 vlan_macip_lens
= 0, type_tucmd_mlhl
= 0;
2968 u32 mss_l4len_idx
= 0, l4len
;
2970 if (skb_is_gso(skb
)) {
2971 if (skb_header_cloned(skb
)) {
2972 err
= pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
);
2976 l4len
= tcp_hdrlen(skb
);
2979 if (skb
->protocol
== htons(ETH_P_IP
)) {
2980 struct iphdr
*iph
= ip_hdr(skb
);
2983 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(iph
->saddr
,
2987 adapter
->hw_tso_ctxt
++;
2988 } else if (skb_shinfo(skb
)->gso_type
== SKB_GSO_TCPV6
) {
2989 ipv6_hdr(skb
)->payload_len
= 0;
2990 tcp_hdr(skb
)->check
=
2991 ~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
2992 &ipv6_hdr(skb
)->daddr
,
2994 adapter
->hw_tso6_ctxt
++;
2997 i
= tx_ring
->next_to_use
;
2999 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
3000 context_desc
= IXGBE_TX_CTXTDESC_ADV(*tx_ring
, i
);
3002 /* VLAN MACLEN IPLEN */
3003 if (tx_flags
& IXGBE_TX_FLAGS_VLAN
)
3005 (tx_flags
& IXGBE_TX_FLAGS_VLAN_MASK
);
3006 vlan_macip_lens
|= ((skb_network_offset(skb
)) <<
3007 IXGBE_ADVTXD_MACLEN_SHIFT
);
3008 *hdr_len
+= skb_network_offset(skb
);
3010 (skb_transport_header(skb
) - skb_network_header(skb
));
3012 (skb_transport_header(skb
) - skb_network_header(skb
));
3013 context_desc
->vlan_macip_lens
= cpu_to_le32(vlan_macip_lens
);
3014 context_desc
->seqnum_seed
= 0;
3016 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
3017 type_tucmd_mlhl
|= (IXGBE_TXD_CMD_DEXT
|
3018 IXGBE_ADVTXD_DTYP_CTXT
);
3020 if (skb
->protocol
== htons(ETH_P_IP
))
3021 type_tucmd_mlhl
|= IXGBE_ADVTXD_TUCMD_IPV4
;
3022 type_tucmd_mlhl
|= IXGBE_ADVTXD_TUCMD_L4T_TCP
;
3023 context_desc
->type_tucmd_mlhl
= cpu_to_le32(type_tucmd_mlhl
);
3027 (skb_shinfo(skb
)->gso_size
<< IXGBE_ADVTXD_MSS_SHIFT
);
3028 mss_l4len_idx
|= (l4len
<< IXGBE_ADVTXD_L4LEN_SHIFT
);
3029 context_desc
->mss_l4len_idx
= cpu_to_le32(mss_l4len_idx
);
3031 tx_buffer_info
->time_stamp
= jiffies
;
3032 tx_buffer_info
->next_to_watch
= i
;
3035 if (i
== tx_ring
->count
)
3037 tx_ring
->next_to_use
= i
;
3044 static bool ixgbe_tx_csum(struct ixgbe_adapter
*adapter
,
3045 struct ixgbe_ring
*tx_ring
,
3046 struct sk_buff
*skb
, u32 tx_flags
)
3048 struct ixgbe_adv_tx_context_desc
*context_desc
;
3050 struct ixgbe_tx_buffer
*tx_buffer_info
;
3051 u32 vlan_macip_lens
= 0, type_tucmd_mlhl
= 0;
3053 if (skb
->ip_summed
== CHECKSUM_PARTIAL
||
3054 (tx_flags
& IXGBE_TX_FLAGS_VLAN
)) {
3055 i
= tx_ring
->next_to_use
;
3056 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
3057 context_desc
= IXGBE_TX_CTXTDESC_ADV(*tx_ring
, i
);
3059 if (tx_flags
& IXGBE_TX_FLAGS_VLAN
)
3061 (tx_flags
& IXGBE_TX_FLAGS_VLAN_MASK
);
3062 vlan_macip_lens
|= (skb_network_offset(skb
) <<
3063 IXGBE_ADVTXD_MACLEN_SHIFT
);
3064 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
3065 vlan_macip_lens
|= (skb_transport_header(skb
) -
3066 skb_network_header(skb
));
3068 context_desc
->vlan_macip_lens
= cpu_to_le32(vlan_macip_lens
);
3069 context_desc
->seqnum_seed
= 0;
3071 type_tucmd_mlhl
|= (IXGBE_TXD_CMD_DEXT
|
3072 IXGBE_ADVTXD_DTYP_CTXT
);
3074 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
3075 switch (skb
->protocol
) {
3076 case __constant_htons(ETH_P_IP
):
3077 type_tucmd_mlhl
|= IXGBE_ADVTXD_TUCMD_IPV4
;
3078 if (ip_hdr(skb
)->protocol
== IPPROTO_TCP
)
3080 IXGBE_ADVTXD_TUCMD_L4T_TCP
;
3083 case __constant_htons(ETH_P_IPV6
):
3084 /* XXX what about other V6 headers?? */
3085 if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_TCP
)
3087 IXGBE_ADVTXD_TUCMD_L4T_TCP
;
3091 if (unlikely(net_ratelimit())) {
3092 DPRINTK(PROBE
, WARNING
,
3093 "partial checksum but proto=%x!\n",
3100 context_desc
->type_tucmd_mlhl
= cpu_to_le32(type_tucmd_mlhl
);
3101 context_desc
->mss_l4len_idx
= 0;
3103 tx_buffer_info
->time_stamp
= jiffies
;
3104 tx_buffer_info
->next_to_watch
= i
;
3105 adapter
->hw_csum_tx_good
++;
3107 if (i
== tx_ring
->count
)
3109 tx_ring
->next_to_use
= i
;
3116 static int ixgbe_tx_map(struct ixgbe_adapter
*adapter
,
3117 struct ixgbe_ring
*tx_ring
,
3118 struct sk_buff
*skb
, unsigned int first
)
3120 struct ixgbe_tx_buffer
*tx_buffer_info
;
3121 unsigned int len
= skb
->len
;
3122 unsigned int offset
= 0, size
, count
= 0, i
;
3123 unsigned int nr_frags
= skb_shinfo(skb
)->nr_frags
;
3126 len
-= skb
->data_len
;
3128 i
= tx_ring
->next_to_use
;
3131 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
3132 size
= min(len
, (uint
)IXGBE_MAX_DATA_PER_TXD
);
3134 tx_buffer_info
->length
= size
;
3135 tx_buffer_info
->dma
= pci_map_single(adapter
->pdev
,
3137 size
, PCI_DMA_TODEVICE
);
3138 tx_buffer_info
->time_stamp
= jiffies
;
3139 tx_buffer_info
->next_to_watch
= i
;
3145 if (i
== tx_ring
->count
)
3149 for (f
= 0; f
< nr_frags
; f
++) {
3150 struct skb_frag_struct
*frag
;
3152 frag
= &skb_shinfo(skb
)->frags
[f
];
3154 offset
= frag
->page_offset
;
3157 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
3158 size
= min(len
, (uint
)IXGBE_MAX_DATA_PER_TXD
);
3160 tx_buffer_info
->length
= size
;
3161 tx_buffer_info
->dma
= pci_map_page(adapter
->pdev
,
3164 size
, PCI_DMA_TODEVICE
);
3165 tx_buffer_info
->time_stamp
= jiffies
;
3166 tx_buffer_info
->next_to_watch
= i
;
3172 if (i
== tx_ring
->count
)
3177 i
= tx_ring
->count
- 1;
3180 tx_ring
->tx_buffer_info
[i
].skb
= skb
;
3181 tx_ring
->tx_buffer_info
[first
].next_to_watch
= i
;
3186 static void ixgbe_tx_queue(struct ixgbe_adapter
*adapter
,
3187 struct ixgbe_ring
*tx_ring
,
3188 int tx_flags
, int count
, u32 paylen
, u8 hdr_len
)
3190 union ixgbe_adv_tx_desc
*tx_desc
= NULL
;
3191 struct ixgbe_tx_buffer
*tx_buffer_info
;
3192 u32 olinfo_status
= 0, cmd_type_len
= 0;
3194 u32 txd_cmd
= IXGBE_TXD_CMD_EOP
| IXGBE_TXD_CMD_RS
| IXGBE_TXD_CMD_IFCS
;
3196 cmd_type_len
|= IXGBE_ADVTXD_DTYP_DATA
;
3198 cmd_type_len
|= IXGBE_ADVTXD_DCMD_IFCS
| IXGBE_ADVTXD_DCMD_DEXT
;
3200 if (tx_flags
& IXGBE_TX_FLAGS_VLAN
)
3201 cmd_type_len
|= IXGBE_ADVTXD_DCMD_VLE
;
3203 if (tx_flags
& IXGBE_TX_FLAGS_TSO
) {
3204 cmd_type_len
|= IXGBE_ADVTXD_DCMD_TSE
;
3206 olinfo_status
|= IXGBE_TXD_POPTS_TXSM
<<
3207 IXGBE_ADVTXD_POPTS_SHIFT
;
3209 if (tx_flags
& IXGBE_TX_FLAGS_IPV4
)
3210 olinfo_status
|= IXGBE_TXD_POPTS_IXSM
<<
3211 IXGBE_ADVTXD_POPTS_SHIFT
;
3213 } else if (tx_flags
& IXGBE_TX_FLAGS_CSUM
)
3214 olinfo_status
|= IXGBE_TXD_POPTS_TXSM
<<
3215 IXGBE_ADVTXD_POPTS_SHIFT
;
3217 olinfo_status
|= ((paylen
- hdr_len
) << IXGBE_ADVTXD_PAYLEN_SHIFT
);
3219 i
= tx_ring
->next_to_use
;
3221 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
3222 tx_desc
= IXGBE_TX_DESC_ADV(*tx_ring
, i
);
3223 tx_desc
->read
.buffer_addr
= cpu_to_le64(tx_buffer_info
->dma
);
3224 tx_desc
->read
.cmd_type_len
=
3225 cpu_to_le32(cmd_type_len
| tx_buffer_info
->length
);
3226 tx_desc
->read
.olinfo_status
= cpu_to_le32(olinfo_status
);
3229 if (i
== tx_ring
->count
)
3233 tx_desc
->read
.cmd_type_len
|= cpu_to_le32(txd_cmd
);
3236 * Force memory writes to complete before letting h/w
3237 * know there are new descriptors to fetch. (Only
3238 * applicable for weak-ordered memory model archs,
3243 tx_ring
->next_to_use
= i
;
3244 writel(i
, adapter
->hw
.hw_addr
+ tx_ring
->tail
);
3247 static int __ixgbe_maybe_stop_tx(struct net_device
*netdev
,
3248 struct ixgbe_ring
*tx_ring
, int size
)
3250 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
3252 netif_stop_subqueue(netdev
, tx_ring
->queue_index
);
3253 /* Herbert's original patch had:
3254 * smp_mb__after_netif_stop_queue();
3255 * but since that doesn't exist yet, just open code it. */
3258 /* We need to check again in a case another CPU has just
3259 * made room available. */
3260 if (likely(IXGBE_DESC_UNUSED(tx_ring
) < size
))
3263 /* A reprieve! - use start_queue because it doesn't call schedule */
3264 netif_wake_subqueue(netdev
, tx_ring
->queue_index
);
3265 ++adapter
->restart_queue
;
3269 static int ixgbe_maybe_stop_tx(struct net_device
*netdev
,
3270 struct ixgbe_ring
*tx_ring
, int size
)
3272 if (likely(IXGBE_DESC_UNUSED(tx_ring
) >= size
))
3274 return __ixgbe_maybe_stop_tx(netdev
, tx_ring
, size
);
3278 static int ixgbe_xmit_frame(struct sk_buff
*skb
, struct net_device
*netdev
)
3280 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
3281 struct ixgbe_ring
*tx_ring
;
3282 unsigned int len
= skb
->len
;
3284 unsigned int tx_flags
= 0;
3287 unsigned int mss
= 0;
3290 unsigned int nr_frags
= skb_shinfo(skb
)->nr_frags
;
3291 len
-= skb
->data_len
;
3292 r_idx
= (adapter
->num_tx_queues
- 1) & skb
->queue_mapping
;
3293 tx_ring
= &adapter
->tx_ring
[r_idx
];
3296 if (skb
->len
<= 0) {
3298 return NETDEV_TX_OK
;
3300 mss
= skb_shinfo(skb
)->gso_size
;
3304 else if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
3307 count
+= TXD_USE_COUNT(len
);
3308 for (f
= 0; f
< nr_frags
; f
++)
3309 count
+= TXD_USE_COUNT(skb_shinfo(skb
)->frags
[f
].size
);
3311 if (ixgbe_maybe_stop_tx(netdev
, tx_ring
, count
)) {
3313 return NETDEV_TX_BUSY
;
3315 if (adapter
->vlgrp
&& vlan_tx_tag_present(skb
)) {
3316 tx_flags
|= IXGBE_TX_FLAGS_VLAN
;
3317 tx_flags
|= (vlan_tx_tag_get(skb
) << IXGBE_TX_FLAGS_VLAN_SHIFT
);
3320 if (skb
->protocol
== htons(ETH_P_IP
))
3321 tx_flags
|= IXGBE_TX_FLAGS_IPV4
;
3322 first
= tx_ring
->next_to_use
;
3323 tso
= ixgbe_tso(adapter
, tx_ring
, skb
, tx_flags
, &hdr_len
);
3325 dev_kfree_skb_any(skb
);
3326 return NETDEV_TX_OK
;
3330 tx_flags
|= IXGBE_TX_FLAGS_TSO
;
3331 else if (ixgbe_tx_csum(adapter
, tx_ring
, skb
, tx_flags
) &&
3332 (skb
->ip_summed
== CHECKSUM_PARTIAL
))
3333 tx_flags
|= IXGBE_TX_FLAGS_CSUM
;
3335 ixgbe_tx_queue(adapter
, tx_ring
, tx_flags
,
3336 ixgbe_tx_map(adapter
, tx_ring
, skb
, first
),
3339 netdev
->trans_start
= jiffies
;
3341 ixgbe_maybe_stop_tx(netdev
, tx_ring
, DESC_NEEDED
);
3343 return NETDEV_TX_OK
;
3347 * ixgbe_get_stats - Get System Network Statistics
3348 * @netdev: network interface device structure
3350 * Returns the address of the device statistics structure.
3351 * The statistics are actually updated from the timer callback.
3353 static struct net_device_stats
*ixgbe_get_stats(struct net_device
*netdev
)
3355 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
3357 /* only return the current stats */
3358 return &adapter
->net_stats
;
3362 * ixgbe_set_mac - Change the Ethernet Address of the NIC
3363 * @netdev: network interface device structure
3364 * @p: pointer to an address structure
3366 * Returns 0 on success, negative on failure
3368 static int ixgbe_set_mac(struct net_device
*netdev
, void *p
)
3370 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
3371 struct sockaddr
*addr
= p
;
3373 if (!is_valid_ether_addr(addr
->sa_data
))
3374 return -EADDRNOTAVAIL
;
3376 memcpy(netdev
->dev_addr
, addr
->sa_data
, netdev
->addr_len
);
3377 memcpy(adapter
->hw
.mac
.addr
, addr
->sa_data
, netdev
->addr_len
);
3379 ixgbe_set_rar(&adapter
->hw
, 0, adapter
->hw
.mac
.addr
, 0, IXGBE_RAH_AV
);
3384 #ifdef CONFIG_NET_POLL_CONTROLLER
3386 * Polling 'interrupt' - used by things like netconsole to send skbs
3387 * without having to re-enable interrupts. It's not called while
3388 * the interrupt routine is executing.
3390 static void ixgbe_netpoll(struct net_device
*netdev
)
3392 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
3394 disable_irq(adapter
->pdev
->irq
);
3395 adapter
->flags
|= IXGBE_FLAG_IN_NETPOLL
;
3396 ixgbe_intr(adapter
->pdev
->irq
, netdev
);
3397 adapter
->flags
&= ~IXGBE_FLAG_IN_NETPOLL
;
3398 enable_irq(adapter
->pdev
->irq
);
3403 * ixgbe_napi_add_all - prep napi structs for use
3404 * @adapter: private struct
3405 * helper function to napi_add each possible q_vector->napi
3407 static void ixgbe_napi_add_all(struct ixgbe_adapter
*adapter
)
3409 int i
, q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
3410 int (*poll
)(struct napi_struct
*, int);
3412 if (adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
) {
3413 poll
= &ixgbe_clean_rxonly
;
3416 /* only one q_vector for legacy modes */
3420 for (i
= 0; i
< q_vectors
; i
++) {
3421 struct ixgbe_q_vector
*q_vector
= &adapter
->q_vector
[i
];
3422 netif_napi_add(adapter
->netdev
, &q_vector
->napi
,
3428 * ixgbe_probe - Device Initialization Routine
3429 * @pdev: PCI device information struct
3430 * @ent: entry in ixgbe_pci_tbl
3432 * Returns 0 on success, negative on failure
3434 * ixgbe_probe initializes an adapter identified by a pci_dev structure.
3435 * The OS initialization, configuring of the adapter private structure,
3436 * and a hardware reset occur.
3438 static int __devinit
ixgbe_probe(struct pci_dev
*pdev
,
3439 const struct pci_device_id
*ent
)
3441 struct net_device
*netdev
;
3442 struct ixgbe_adapter
*adapter
= NULL
;
3443 struct ixgbe_hw
*hw
;
3444 const struct ixgbe_info
*ii
= ixgbe_info_tbl
[ent
->driver_data
];
3445 unsigned long mmio_start
, mmio_len
;
3446 static int cards_found
;
3447 int i
, err
, pci_using_dac
;
3448 u16 link_status
, link_speed
, link_width
;
3451 err
= pci_enable_device(pdev
);
3455 if (!pci_set_dma_mask(pdev
, DMA_64BIT_MASK
) &&
3456 !pci_set_consistent_dma_mask(pdev
, DMA_64BIT_MASK
)) {
3459 err
= pci_set_dma_mask(pdev
, DMA_32BIT_MASK
);
3461 err
= pci_set_consistent_dma_mask(pdev
, DMA_32BIT_MASK
);
3463 dev_err(&pdev
->dev
, "No usable DMA "
3464 "configuration, aborting\n");
3471 err
= pci_request_regions(pdev
, ixgbe_driver_name
);
3473 dev_err(&pdev
->dev
, "pci_request_regions failed 0x%x\n", err
);
3477 pci_set_master(pdev
);
3478 pci_save_state(pdev
);
3480 netdev
= alloc_etherdev_mq(sizeof(struct ixgbe_adapter
), MAX_TX_QUEUES
);
3483 goto err_alloc_etherdev
;
3486 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
3488 pci_set_drvdata(pdev
, netdev
);
3489 adapter
= netdev_priv(netdev
);
3491 adapter
->netdev
= netdev
;
3492 adapter
->pdev
= pdev
;
3495 adapter
->msg_enable
= (1 << DEFAULT_DEBUG_LEVEL_SHIFT
) - 1;
3497 mmio_start
= pci_resource_start(pdev
, 0);
3498 mmio_len
= pci_resource_len(pdev
, 0);
3500 hw
->hw_addr
= ioremap(mmio_start
, mmio_len
);
3506 for (i
= 1; i
<= 5; i
++) {
3507 if (pci_resource_len(pdev
, i
) == 0)
3511 netdev
->open
= &ixgbe_open
;
3512 netdev
->stop
= &ixgbe_close
;
3513 netdev
->hard_start_xmit
= &ixgbe_xmit_frame
;
3514 netdev
->get_stats
= &ixgbe_get_stats
;
3515 netdev
->set_multicast_list
= &ixgbe_set_multi
;
3516 netdev
->set_mac_address
= &ixgbe_set_mac
;
3517 netdev
->change_mtu
= &ixgbe_change_mtu
;
3518 ixgbe_set_ethtool_ops(netdev
);
3519 netdev
->tx_timeout
= &ixgbe_tx_timeout
;
3520 netdev
->watchdog_timeo
= 5 * HZ
;
3521 netdev
->vlan_rx_register
= ixgbe_vlan_rx_register
;
3522 netdev
->vlan_rx_add_vid
= ixgbe_vlan_rx_add_vid
;
3523 netdev
->vlan_rx_kill_vid
= ixgbe_vlan_rx_kill_vid
;
3524 #ifdef CONFIG_NET_POLL_CONTROLLER
3525 netdev
->poll_controller
= ixgbe_netpoll
;
3527 strcpy(netdev
->name
, pci_name(pdev
));
3529 netdev
->mem_start
= mmio_start
;
3530 netdev
->mem_end
= mmio_start
+ mmio_len
;
3532 adapter
->bd_number
= cards_found
;
3534 /* PCI config space info */
3535 hw
->vendor_id
= pdev
->vendor
;
3536 hw
->device_id
= pdev
->device
;
3537 hw
->revision_id
= pdev
->revision
;
3538 hw
->subsystem_vendor_id
= pdev
->subsystem_vendor
;
3539 hw
->subsystem_device_id
= pdev
->subsystem_device
;
3542 memcpy(&hw
->mac
.ops
, ii
->mac_ops
, sizeof(hw
->mac
.ops
));
3543 hw
->mac
.type
= ii
->mac
;
3545 err
= ii
->get_invariants(hw
);
3549 /* setup the private structure */
3550 err
= ixgbe_sw_init(adapter
);
3554 netdev
->features
= NETIF_F_SG
|
3556 NETIF_F_HW_VLAN_TX
|
3557 NETIF_F_HW_VLAN_RX
|
3558 NETIF_F_HW_VLAN_FILTER
;
3560 netdev
->features
|= NETIF_F_LRO
;
3561 netdev
->features
|= NETIF_F_TSO
;
3562 netdev
->features
|= NETIF_F_TSO6
;
3564 netdev
->vlan_features
|= NETIF_F_TSO
;
3565 netdev
->vlan_features
|= NETIF_F_TSO6
;
3566 netdev
->vlan_features
|= NETIF_F_HW_CSUM
;
3567 netdev
->vlan_features
|= NETIF_F_SG
;
3570 netdev
->features
|= NETIF_F_HIGHDMA
;
3572 netdev
->features
|= NETIF_F_MULTI_QUEUE
;
3574 /* make sure the EEPROM is good */
3575 if (ixgbe_validate_eeprom_checksum(hw
, NULL
) < 0) {
3576 dev_err(&pdev
->dev
, "The EEPROM Checksum Is Not Valid\n");
3581 memcpy(netdev
->dev_addr
, hw
->mac
.perm_addr
, netdev
->addr_len
);
3582 memcpy(netdev
->perm_addr
, hw
->mac
.perm_addr
, netdev
->addr_len
);
3584 if (ixgbe_validate_mac_addr(netdev
->dev_addr
)) {
3589 init_timer(&adapter
->watchdog_timer
);
3590 adapter
->watchdog_timer
.function
= &ixgbe_watchdog
;
3591 adapter
->watchdog_timer
.data
= (unsigned long)adapter
;
3593 INIT_WORK(&adapter
->reset_task
, ixgbe_reset_task
);
3595 /* initialize default flow control settings */
3596 hw
->fc
.original_type
= ixgbe_fc_full
;
3597 hw
->fc
.type
= ixgbe_fc_full
;
3598 hw
->fc
.high_water
= IXGBE_DEFAULT_FCRTH
;
3599 hw
->fc
.low_water
= IXGBE_DEFAULT_FCRTL
;
3600 hw
->fc
.pause_time
= IXGBE_DEFAULT_FCPAUSE
;
3602 err
= ixgbe_init_interrupt_scheme(adapter
);
3606 /* print bus type/speed/width info */
3607 pci_read_config_word(pdev
, IXGBE_PCI_LINK_STATUS
, &link_status
);
3608 link_speed
= link_status
& IXGBE_PCI_LINK_SPEED
;
3609 link_width
= link_status
& IXGBE_PCI_LINK_WIDTH
;
3610 dev_info(&pdev
->dev
, "(PCI Express:%s:%s) "
3611 "%02x:%02x:%02x:%02x:%02x:%02x\n",
3612 ((link_speed
== IXGBE_PCI_LINK_SPEED_5000
) ? "5.0Gb/s" :
3613 (link_speed
== IXGBE_PCI_LINK_SPEED_2500
) ? "2.5Gb/s" :
3615 ((link_width
== IXGBE_PCI_LINK_WIDTH_8
) ? "Width x8" :
3616 (link_width
== IXGBE_PCI_LINK_WIDTH_4
) ? "Width x4" :
3617 (link_width
== IXGBE_PCI_LINK_WIDTH_2
) ? "Width x2" :
3618 (link_width
== IXGBE_PCI_LINK_WIDTH_1
) ? "Width x1" :
3620 netdev
->dev_addr
[0], netdev
->dev_addr
[1], netdev
->dev_addr
[2],
3621 netdev
->dev_addr
[3], netdev
->dev_addr
[4], netdev
->dev_addr
[5]);
3622 ixgbe_read_part_num(hw
, &part_num
);
3623 dev_info(&pdev
->dev
, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
3624 hw
->mac
.type
, hw
->phy
.type
,
3625 (part_num
>> 8), (part_num
& 0xff));
3627 if (link_width
<= IXGBE_PCI_LINK_WIDTH_4
) {
3628 dev_warn(&pdev
->dev
, "PCI-Express bandwidth available for "
3629 "this card is not sufficient for optimal "
3631 dev_warn(&pdev
->dev
, "For optimal performance a x8 "
3632 "PCI-Express slot is required.\n");
3635 /* reset the hardware with the new settings */
3638 netif_carrier_off(netdev
);
3639 netif_stop_queue(netdev
);
3640 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
3641 netif_stop_subqueue(netdev
, i
);
3643 ixgbe_napi_add_all(adapter
);
3645 strcpy(netdev
->name
, "eth%d");
3646 err
= register_netdev(netdev
);
3651 if (dca_add_requester(&pdev
->dev
) == 0) {
3652 adapter
->flags
|= IXGBE_FLAG_DCA_ENABLED
;
3653 /* always use CB2 mode, difference is masked
3654 * in the CB driver */
3655 IXGBE_WRITE_REG(hw
, IXGBE_DCA_CTRL
, 2);
3656 ixgbe_setup_dca(adapter
);
3660 dev_info(&pdev
->dev
, "Intel(R) 10 Gigabit Network Connection\n");
3665 ixgbe_release_hw_control(adapter
);
3668 ixgbe_reset_interrupt_capability(adapter
);
3670 iounmap(hw
->hw_addr
);
3672 free_netdev(netdev
);
3674 pci_release_regions(pdev
);
3677 pci_disable_device(pdev
);
3682 * ixgbe_remove - Device Removal Routine
3683 * @pdev: PCI device information struct
3685 * ixgbe_remove is called by the PCI subsystem to alert the driver
3686 * that it should release a PCI device. The could be caused by a
3687 * Hot-Plug event, or because the driver is going to be removed from
3690 static void __devexit
ixgbe_remove(struct pci_dev
*pdev
)
3692 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3693 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
3695 set_bit(__IXGBE_DOWN
, &adapter
->state
);
3696 del_timer_sync(&adapter
->watchdog_timer
);
3698 flush_scheduled_work();
3701 if (adapter
->flags
& IXGBE_FLAG_DCA_ENABLED
) {
3702 adapter
->flags
&= ~IXGBE_FLAG_DCA_ENABLED
;
3703 dca_remove_requester(&pdev
->dev
);
3704 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_DCA_CTRL
, 1);
3708 unregister_netdev(netdev
);
3710 ixgbe_reset_interrupt_capability(adapter
);
3712 ixgbe_release_hw_control(adapter
);
3714 iounmap(adapter
->hw
.hw_addr
);
3715 pci_release_regions(pdev
);
3717 DPRINTK(PROBE
, INFO
, "complete\n");
3718 kfree(adapter
->tx_ring
);
3719 kfree(adapter
->rx_ring
);
3721 free_netdev(netdev
);
3723 pci_disable_device(pdev
);
3727 * ixgbe_io_error_detected - called when PCI error is detected
3728 * @pdev: Pointer to PCI device
3729 * @state: The current pci connection state
3731 * This function is called after a PCI bus error affecting
3732 * this device has been detected.
3734 static pci_ers_result_t
ixgbe_io_error_detected(struct pci_dev
*pdev
,
3735 pci_channel_state_t state
)
3737 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3738 struct ixgbe_adapter
*adapter
= netdev
->priv
;
3740 netif_device_detach(netdev
);
3742 if (netif_running(netdev
))
3743 ixgbe_down(adapter
);
3744 pci_disable_device(pdev
);
3746 /* Request a slot slot reset. */
3747 return PCI_ERS_RESULT_NEED_RESET
;
3751 * ixgbe_io_slot_reset - called after the pci bus has been reset.
3752 * @pdev: Pointer to PCI device
3754 * Restart the card from scratch, as if from a cold-boot.
3756 static pci_ers_result_t
ixgbe_io_slot_reset(struct pci_dev
*pdev
)
3758 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3759 struct ixgbe_adapter
*adapter
= netdev
->priv
;
3761 if (pci_enable_device(pdev
)) {
3763 "Cannot re-enable PCI device after reset.\n");
3764 return PCI_ERS_RESULT_DISCONNECT
;
3766 pci_set_master(pdev
);
3767 pci_restore_state(pdev
);
3769 pci_enable_wake(pdev
, PCI_D3hot
, 0);
3770 pci_enable_wake(pdev
, PCI_D3cold
, 0);
3772 ixgbe_reset(adapter
);
3774 return PCI_ERS_RESULT_RECOVERED
;
3778 * ixgbe_io_resume - called when traffic can start flowing again.
3779 * @pdev: Pointer to PCI device
3781 * This callback is called when the error recovery driver tells us that
3782 * its OK to resume normal operation.
3784 static void ixgbe_io_resume(struct pci_dev
*pdev
)
3786 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3787 struct ixgbe_adapter
*adapter
= netdev
->priv
;
3789 if (netif_running(netdev
)) {
3790 if (ixgbe_up(adapter
)) {
3791 DPRINTK(PROBE
, INFO
, "ixgbe_up failed after reset\n");
3796 netif_device_attach(netdev
);
3800 static struct pci_error_handlers ixgbe_err_handler
= {
3801 .error_detected
= ixgbe_io_error_detected
,
3802 .slot_reset
= ixgbe_io_slot_reset
,
3803 .resume
= ixgbe_io_resume
,
3806 static struct pci_driver ixgbe_driver
= {
3807 .name
= ixgbe_driver_name
,
3808 .id_table
= ixgbe_pci_tbl
,
3809 .probe
= ixgbe_probe
,
3810 .remove
= __devexit_p(ixgbe_remove
),
3812 .suspend
= ixgbe_suspend
,
3813 .resume
= ixgbe_resume
,
3815 .shutdown
= ixgbe_shutdown
,
3816 .err_handler
= &ixgbe_err_handler
3820 * ixgbe_init_module - Driver Registration Routine
3822 * ixgbe_init_module is the first routine called when the driver is
3823 * loaded. All it does is register with the PCI subsystem.
3825 static int __init
ixgbe_init_module(void)
3828 printk(KERN_INFO
"%s: %s - version %s\n", ixgbe_driver_name
,
3829 ixgbe_driver_string
, ixgbe_driver_version
);
3831 printk(KERN_INFO
"%s: %s\n", ixgbe_driver_name
, ixgbe_copyright
);
3834 dca_register_notify(&dca_notifier
);
3837 ret
= pci_register_driver(&ixgbe_driver
);
3840 module_init(ixgbe_init_module
);
3843 * ixgbe_exit_module - Driver Exit Cleanup Routine
3845 * ixgbe_exit_module is called just before the driver is removed
3848 static void __exit
ixgbe_exit_module(void)
3851 dca_unregister_notify(&dca_notifier
);
3853 pci_unregister_driver(&ixgbe_driver
);
3857 static int ixgbe_notify_dca(struct notifier_block
*nb
, unsigned long event
,
3862 ret_val
= driver_for_each_device(&ixgbe_driver
.driver
, NULL
, &event
,
3863 __ixgbe_notify_dca
);
3865 return ret_val
? NOTIFY_BAD
: NOTIFY_DONE
;
3867 #endif /* CONFIG_DCA */
3869 module_exit(ixgbe_exit_module
);