1 /*******************************************************************************
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2008 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *******************************************************************************/
28 #include <linux/types.h>
29 #include <linux/module.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32 #include <linux/vmalloc.h>
33 #include <linux/string.h>
36 #include <linux/tcp.h>
37 #include <linux/ipv6.h>
38 #include <net/checksum.h>
39 #include <net/ip6_checksum.h>
40 #include <linux/ethtool.h>
41 #include <linux/if_vlan.h>
44 #include "ixgbe_common.h"
46 char ixgbe_driver_name
[] = "ixgbe";
47 static const char ixgbe_driver_string
[] =
48 "Intel(R) 10 Gigabit PCI Express Network Driver";
50 #define DRV_VERSION "1.3.30-k2"
51 const char ixgbe_driver_version
[] = DRV_VERSION
;
52 static char ixgbe_copyright
[] = "Copyright (c) 1999-2007 Intel Corporation.";
54 static const struct ixgbe_info
*ixgbe_info_tbl
[] = {
55 [board_82598
] = &ixgbe_82598_info
,
58 /* ixgbe_pci_tbl - PCI Device ID Table
60 * Wildcard entries (PCI_ANY_ID) should come last
61 * Last entry must be all 0s
63 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
64 * Class, Class Mask, private data (not used) }
66 static struct pci_device_id ixgbe_pci_tbl
[] = {
67 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82598AF_DUAL_PORT
),
69 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82598AF_SINGLE_PORT
),
71 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82598EB_CX4
),
73 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82598_CX4_DUAL_PORT
),
75 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82598EB_XF_LR
),
78 /* required last entry */
81 MODULE_DEVICE_TABLE(pci
, ixgbe_pci_tbl
);
83 #ifdef CONFIG_IXGBE_DCA
84 static int ixgbe_notify_dca(struct notifier_block
*, unsigned long event
,
86 static struct notifier_block dca_notifier
= {
87 .notifier_call
= ixgbe_notify_dca
,
93 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
94 MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
95 MODULE_LICENSE("GPL");
96 MODULE_VERSION(DRV_VERSION
);
98 #define DEFAULT_DEBUG_LEVEL_SHIFT 3
100 static void ixgbe_release_hw_control(struct ixgbe_adapter
*adapter
)
104 /* Let firmware take over control of h/w */
105 ctrl_ext
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_CTRL_EXT
);
106 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_CTRL_EXT
,
107 ctrl_ext
& ~IXGBE_CTRL_EXT_DRV_LOAD
);
110 static void ixgbe_get_hw_control(struct ixgbe_adapter
*adapter
)
114 /* Let firmware know the driver has taken over */
115 ctrl_ext
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_CTRL_EXT
);
116 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_CTRL_EXT
,
117 ctrl_ext
| IXGBE_CTRL_EXT_DRV_LOAD
);
120 static void ixgbe_set_ivar(struct ixgbe_adapter
*adapter
, u16 int_alloc_entry
,
125 msix_vector
|= IXGBE_IVAR_ALLOC_VAL
;
126 index
= (int_alloc_entry
>> 2) & 0x1F;
127 ivar
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_IVAR(index
));
128 ivar
&= ~(0xFF << (8 * (int_alloc_entry
& 0x3)));
129 ivar
|= (msix_vector
<< (8 * (int_alloc_entry
& 0x3)));
130 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_IVAR(index
), ivar
);
133 static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter
*adapter
,
134 struct ixgbe_tx_buffer
137 if (tx_buffer_info
->dma
) {
138 pci_unmap_page(adapter
->pdev
, tx_buffer_info
->dma
,
139 tx_buffer_info
->length
, PCI_DMA_TODEVICE
);
140 tx_buffer_info
->dma
= 0;
142 if (tx_buffer_info
->skb
) {
143 dev_kfree_skb_any(tx_buffer_info
->skb
);
144 tx_buffer_info
->skb
= NULL
;
146 /* tx_buffer_info must be completely set up in the transmit path */
149 static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter
*adapter
,
150 struct ixgbe_ring
*tx_ring
,
153 struct ixgbe_hw
*hw
= &adapter
->hw
;
156 /* Detect a transmit hang in hardware, this serializes the
157 * check with the clearing of time_stamp and movement of eop */
158 head
= IXGBE_READ_REG(hw
, tx_ring
->head
);
159 tail
= IXGBE_READ_REG(hw
, tx_ring
->tail
);
160 adapter
->detect_tx_hung
= false;
161 if ((head
!= tail
) &&
162 tx_ring
->tx_buffer_info
[eop
].time_stamp
&&
163 time_after(jiffies
, tx_ring
->tx_buffer_info
[eop
].time_stamp
+ HZ
) &&
164 !(IXGBE_READ_REG(&adapter
->hw
, IXGBE_TFCS
) & IXGBE_TFCS_TXOFF
)) {
165 /* detected Tx unit hang */
166 union ixgbe_adv_tx_desc
*tx_desc
;
167 tx_desc
= IXGBE_TX_DESC_ADV(*tx_ring
, eop
);
168 DPRINTK(DRV
, ERR
, "Detected Tx Unit Hang\n"
170 " TDH, TDT <%x>, <%x>\n"
171 " next_to_use <%x>\n"
172 " next_to_clean <%x>\n"
173 "tx_buffer_info[next_to_clean]\n"
174 " time_stamp <%lx>\n"
176 tx_ring
->queue_index
,
178 tx_ring
->next_to_use
, eop
,
179 tx_ring
->tx_buffer_info
[eop
].time_stamp
, jiffies
);
186 #define IXGBE_MAX_TXD_PWR 14
187 #define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
189 /* Tx Descriptors needed, worst case */
190 #define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
191 (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
192 #define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
193 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
195 #define GET_TX_HEAD_FROM_RING(ring) (\
197 ((union ixgbe_adv_tx_desc *)(ring)->desc + (ring)->count))
198 static void ixgbe_tx_timeout(struct net_device
*netdev
);
201 * ixgbe_clean_tx_irq - Reclaim resources after transmit completes
202 * @adapter: board private structure
203 * @tx_ring: tx ring to clean
205 static bool ixgbe_clean_tx_irq(struct ixgbe_adapter
*adapter
,
206 struct ixgbe_ring
*tx_ring
)
208 union ixgbe_adv_tx_desc
*tx_desc
;
209 struct ixgbe_tx_buffer
*tx_buffer_info
;
210 struct net_device
*netdev
= adapter
->netdev
;
214 unsigned int count
= 0;
215 unsigned int total_bytes
= 0, total_packets
= 0;
218 head
= GET_TX_HEAD_FROM_RING(tx_ring
);
219 head
= le32_to_cpu(head
);
220 i
= tx_ring
->next_to_clean
;
223 tx_desc
= IXGBE_TX_DESC_ADV(*tx_ring
, i
);
224 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
225 skb
= tx_buffer_info
->skb
;
228 unsigned int segs
, bytecount
;
230 /* gso_segs is currently only valid for tcp */
231 segs
= skb_shinfo(skb
)->gso_segs
?: 1;
232 /* multiply data chunks by size of headers */
233 bytecount
= ((segs
- 1) * skb_headlen(skb
)) +
235 total_packets
+= segs
;
236 total_bytes
+= bytecount
;
239 ixgbe_unmap_and_free_tx_resource(adapter
,
243 if (i
== tx_ring
->count
)
247 if (count
== tx_ring
->count
)
252 head
= GET_TX_HEAD_FROM_RING(tx_ring
);
253 head
= le32_to_cpu(head
);
259 tx_ring
->next_to_clean
= i
;
261 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
262 if (unlikely(count
&& netif_carrier_ok(netdev
) &&
263 (IXGBE_DESC_UNUSED(tx_ring
) >= TX_WAKE_THRESHOLD
))) {
264 /* Make sure that anybody stopping the queue after this
265 * sees the new next_to_clean.
268 if (__netif_subqueue_stopped(netdev
, tx_ring
->queue_index
) &&
269 !test_bit(__IXGBE_DOWN
, &adapter
->state
)) {
270 netif_wake_subqueue(netdev
, tx_ring
->queue_index
);
271 ++adapter
->restart_queue
;
275 if (adapter
->detect_tx_hung
) {
276 if (ixgbe_check_tx_hang(adapter
, tx_ring
, i
)) {
277 /* schedule immediate reset if we believe we hung */
279 "tx hang %d detected, resetting adapter\n",
280 adapter
->tx_timeout_count
+ 1);
281 ixgbe_tx_timeout(adapter
->netdev
);
285 /* re-arm the interrupt */
286 if ((total_packets
>= tx_ring
->work_limit
) ||
287 (count
== tx_ring
->count
))
288 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EICS
, tx_ring
->v_idx
);
290 tx_ring
->total_bytes
+= total_bytes
;
291 tx_ring
->total_packets
+= total_packets
;
292 tx_ring
->stats
.bytes
+= total_bytes
;
293 tx_ring
->stats
.packets
+= total_packets
;
294 adapter
->net_stats
.tx_bytes
+= total_bytes
;
295 adapter
->net_stats
.tx_packets
+= total_packets
;
296 return (total_packets
? true : false);
299 #ifdef CONFIG_IXGBE_DCA
300 static void ixgbe_update_rx_dca(struct ixgbe_adapter
*adapter
,
301 struct ixgbe_ring
*rx_ring
)
305 int q
= rx_ring
- adapter
->rx_ring
;
307 if (rx_ring
->cpu
!= cpu
) {
308 rxctrl
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_DCA_RXCTRL(q
));
309 rxctrl
&= ~IXGBE_DCA_RXCTRL_CPUID_MASK
;
310 rxctrl
|= dca3_get_tag(&adapter
->pdev
->dev
, cpu
);
311 rxctrl
|= IXGBE_DCA_RXCTRL_DESC_DCA_EN
;
312 rxctrl
|= IXGBE_DCA_RXCTRL_HEAD_DCA_EN
;
313 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_DCA_RXCTRL(q
), rxctrl
);
319 static void ixgbe_update_tx_dca(struct ixgbe_adapter
*adapter
,
320 struct ixgbe_ring
*tx_ring
)
324 int q
= tx_ring
- adapter
->tx_ring
;
326 if (tx_ring
->cpu
!= cpu
) {
327 txctrl
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_DCA_TXCTRL(q
));
328 txctrl
&= ~IXGBE_DCA_TXCTRL_CPUID_MASK
;
329 txctrl
|= dca3_get_tag(&adapter
->pdev
->dev
, cpu
);
330 txctrl
|= IXGBE_DCA_TXCTRL_DESC_DCA_EN
;
331 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_DCA_TXCTRL(q
), txctrl
);
337 static void ixgbe_setup_dca(struct ixgbe_adapter
*adapter
)
341 if (!(adapter
->flags
& IXGBE_FLAG_DCA_ENABLED
))
344 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
345 adapter
->tx_ring
[i
].cpu
= -1;
346 ixgbe_update_tx_dca(adapter
, &adapter
->tx_ring
[i
]);
348 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
349 adapter
->rx_ring
[i
].cpu
= -1;
350 ixgbe_update_rx_dca(adapter
, &adapter
->rx_ring
[i
]);
354 static int __ixgbe_notify_dca(struct device
*dev
, void *data
)
356 struct net_device
*netdev
= dev_get_drvdata(dev
);
357 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
358 unsigned long event
= *(unsigned long *)data
;
361 case DCA_PROVIDER_ADD
:
362 /* if we're already enabled, don't do it again */
363 if (adapter
->flags
& IXGBE_FLAG_DCA_ENABLED
)
365 /* Always use CB2 mode, difference is masked
366 * in the CB driver. */
367 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_DCA_CTRL
, 2);
368 if (dca_add_requester(dev
) == 0) {
369 adapter
->flags
|= IXGBE_FLAG_DCA_ENABLED
;
370 ixgbe_setup_dca(adapter
);
373 /* Fall Through since DCA is disabled. */
374 case DCA_PROVIDER_REMOVE
:
375 if (adapter
->flags
& IXGBE_FLAG_DCA_ENABLED
) {
376 dca_remove_requester(dev
);
377 adapter
->flags
&= ~IXGBE_FLAG_DCA_ENABLED
;
378 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_DCA_CTRL
, 1);
386 #endif /* CONFIG_IXGBE_DCA */
388 * ixgbe_receive_skb - Send a completed packet up the stack
389 * @adapter: board private structure
390 * @skb: packet to send up
391 * @status: hardware indication of status of receive
392 * @rx_ring: rx descriptor ring (for a specific queue) to setup
393 * @rx_desc: rx descriptor
395 static void ixgbe_receive_skb(struct ixgbe_adapter
*adapter
,
396 struct sk_buff
*skb
, u8 status
,
397 struct ixgbe_ring
*ring
,
398 union ixgbe_adv_rx_desc
*rx_desc
)
400 bool is_vlan
= (status
& IXGBE_RXD_STAT_VP
);
401 u16 tag
= le16_to_cpu(rx_desc
->wb
.upper
.vlan
);
403 if (adapter
->netdev
->features
& NETIF_F_LRO
&&
404 skb
->ip_summed
== CHECKSUM_UNNECESSARY
) {
405 if (adapter
->vlgrp
&& is_vlan
)
406 lro_vlan_hwaccel_receive_skb(&ring
->lro_mgr
, skb
,
410 lro_receive_skb(&ring
->lro_mgr
, skb
, rx_desc
);
411 ring
->lro_used
= true;
413 if (!(adapter
->flags
& IXGBE_FLAG_IN_NETPOLL
)) {
414 if (adapter
->vlgrp
&& is_vlan
)
415 vlan_hwaccel_receive_skb(skb
, adapter
->vlgrp
, tag
);
417 netif_receive_skb(skb
);
419 if (adapter
->vlgrp
&& is_vlan
)
420 vlan_hwaccel_rx(skb
, adapter
->vlgrp
, tag
);
428 * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum
429 * @adapter: address of board private structure
430 * @status_err: hardware indication of status of receive
431 * @skb: skb currently being received and modified
433 static inline void ixgbe_rx_checksum(struct ixgbe_adapter
*adapter
,
434 u32 status_err
, struct sk_buff
*skb
)
436 skb
->ip_summed
= CHECKSUM_NONE
;
438 /* Rx csum disabled */
439 if (!(adapter
->flags
& IXGBE_FLAG_RX_CSUM_ENABLED
))
442 /* if IP and error */
443 if ((status_err
& IXGBE_RXD_STAT_IPCS
) &&
444 (status_err
& IXGBE_RXDADV_ERR_IPE
)) {
445 adapter
->hw_csum_rx_error
++;
449 if (!(status_err
& IXGBE_RXD_STAT_L4CS
))
452 if (status_err
& IXGBE_RXDADV_ERR_TCPE
) {
453 adapter
->hw_csum_rx_error
++;
457 /* It must be a TCP or UDP packet with a valid checksum */
458 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
459 adapter
->hw_csum_rx_good
++;
463 * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split
464 * @adapter: address of board private structure
466 static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter
*adapter
,
467 struct ixgbe_ring
*rx_ring
,
470 struct pci_dev
*pdev
= adapter
->pdev
;
471 union ixgbe_adv_rx_desc
*rx_desc
;
472 struct ixgbe_rx_buffer
*bi
;
474 unsigned int bufsz
= rx_ring
->rx_buf_len
+ NET_IP_ALIGN
;
476 i
= rx_ring
->next_to_use
;
477 bi
= &rx_ring
->rx_buffer_info
[i
];
479 while (cleaned_count
--) {
480 rx_desc
= IXGBE_RX_DESC_ADV(*rx_ring
, i
);
483 (adapter
->flags
& IXGBE_FLAG_RX_PS_ENABLED
)) {
485 bi
->page
= alloc_page(GFP_ATOMIC
);
487 adapter
->alloc_rx_page_failed
++;
492 /* use a half page if we're re-using */
493 bi
->page_offset
^= (PAGE_SIZE
/ 2);
496 bi
->page_dma
= pci_map_page(pdev
, bi
->page
,
503 struct sk_buff
*skb
= netdev_alloc_skb(adapter
->netdev
,
507 adapter
->alloc_rx_buff_failed
++;
512 * Make buffer alignment 2 beyond a 16 byte boundary
513 * this will result in a 16 byte aligned IP header after
514 * the 14 byte MAC header is removed
516 skb_reserve(skb
, NET_IP_ALIGN
);
519 bi
->dma
= pci_map_single(pdev
, skb
->data
, bufsz
,
522 /* Refresh the desc even if buffer_addrs didn't change because
523 * each write-back erases this info. */
524 if (adapter
->flags
& IXGBE_FLAG_RX_PS_ENABLED
) {
525 rx_desc
->read
.pkt_addr
= cpu_to_le64(bi
->page_dma
);
526 rx_desc
->read
.hdr_addr
= cpu_to_le64(bi
->dma
);
528 rx_desc
->read
.pkt_addr
= cpu_to_le64(bi
->dma
);
532 if (i
== rx_ring
->count
)
534 bi
= &rx_ring
->rx_buffer_info
[i
];
538 if (rx_ring
->next_to_use
!= i
) {
539 rx_ring
->next_to_use
= i
;
541 i
= (rx_ring
->count
- 1);
544 * Force memory writes to complete before letting h/w
545 * know there are new descriptors to fetch. (Only
546 * applicable for weak-ordered memory model archs,
550 writel(i
, adapter
->hw
.hw_addr
+ rx_ring
->tail
);
554 static inline u16
ixgbe_get_hdr_info(union ixgbe_adv_rx_desc
*rx_desc
)
556 return rx_desc
->wb
.lower
.lo_dword
.hs_rss
.hdr_info
;
559 static inline u16
ixgbe_get_pkt_info(union ixgbe_adv_rx_desc
*rx_desc
)
561 return rx_desc
->wb
.lower
.lo_dword
.hs_rss
.pkt_info
;
564 static bool ixgbe_clean_rx_irq(struct ixgbe_adapter
*adapter
,
565 struct ixgbe_ring
*rx_ring
,
566 int *work_done
, int work_to_do
)
568 struct pci_dev
*pdev
= adapter
->pdev
;
569 union ixgbe_adv_rx_desc
*rx_desc
, *next_rxd
;
570 struct ixgbe_rx_buffer
*rx_buffer_info
, *next_buffer
;
575 bool cleaned
= false;
576 int cleaned_count
= 0;
577 unsigned int total_rx_bytes
= 0, total_rx_packets
= 0;
579 i
= rx_ring
->next_to_clean
;
580 rx_desc
= IXGBE_RX_DESC_ADV(*rx_ring
, i
);
581 staterr
= le32_to_cpu(rx_desc
->wb
.upper
.status_error
);
582 rx_buffer_info
= &rx_ring
->rx_buffer_info
[i
];
584 while (staterr
& IXGBE_RXD_STAT_DD
) {
586 if (*work_done
>= work_to_do
)
590 if (adapter
->flags
& IXGBE_FLAG_RX_PS_ENABLED
) {
591 hdr_info
= le16_to_cpu(ixgbe_get_hdr_info(rx_desc
));
592 len
= (hdr_info
& IXGBE_RXDADV_HDRBUFLEN_MASK
) >>
593 IXGBE_RXDADV_HDRBUFLEN_SHIFT
;
594 if (hdr_info
& IXGBE_RXDADV_SPH
)
595 adapter
->rx_hdr_split
++;
596 if (len
> IXGBE_RX_HDR_SIZE
)
597 len
= IXGBE_RX_HDR_SIZE
;
598 upper_len
= le16_to_cpu(rx_desc
->wb
.upper
.length
);
600 len
= le16_to_cpu(rx_desc
->wb
.upper
.length
);
604 skb
= rx_buffer_info
->skb
;
605 prefetch(skb
->data
- NET_IP_ALIGN
);
606 rx_buffer_info
->skb
= NULL
;
608 if (len
&& !skb_shinfo(skb
)->nr_frags
) {
609 pci_unmap_single(pdev
, rx_buffer_info
->dma
,
610 rx_ring
->rx_buf_len
+ NET_IP_ALIGN
,
616 pci_unmap_page(pdev
, rx_buffer_info
->page_dma
,
617 PAGE_SIZE
/ 2, PCI_DMA_FROMDEVICE
);
618 rx_buffer_info
->page_dma
= 0;
619 skb_fill_page_desc(skb
, skb_shinfo(skb
)->nr_frags
,
620 rx_buffer_info
->page
,
621 rx_buffer_info
->page_offset
,
624 if ((rx_ring
->rx_buf_len
> (PAGE_SIZE
/ 2)) ||
625 (page_count(rx_buffer_info
->page
) != 1))
626 rx_buffer_info
->page
= NULL
;
628 get_page(rx_buffer_info
->page
);
630 skb
->len
+= upper_len
;
631 skb
->data_len
+= upper_len
;
632 skb
->truesize
+= upper_len
;
636 if (i
== rx_ring
->count
)
638 next_buffer
= &rx_ring
->rx_buffer_info
[i
];
640 next_rxd
= IXGBE_RX_DESC_ADV(*rx_ring
, i
);
644 if (staterr
& IXGBE_RXD_STAT_EOP
) {
645 rx_ring
->stats
.packets
++;
646 rx_ring
->stats
.bytes
+= skb
->len
;
648 rx_buffer_info
->skb
= next_buffer
->skb
;
649 rx_buffer_info
->dma
= next_buffer
->dma
;
650 next_buffer
->skb
= skb
;
651 next_buffer
->dma
= 0;
652 adapter
->non_eop_descs
++;
656 if (staterr
& IXGBE_RXDADV_ERR_FRAME_ERR_MASK
) {
657 dev_kfree_skb_irq(skb
);
661 ixgbe_rx_checksum(adapter
, staterr
, skb
);
663 /* probably a little skewed due to removing CRC */
664 total_rx_bytes
+= skb
->len
;
667 skb
->protocol
= eth_type_trans(skb
, adapter
->netdev
);
668 ixgbe_receive_skb(adapter
, skb
, staterr
, rx_ring
, rx_desc
);
669 adapter
->netdev
->last_rx
= jiffies
;
672 rx_desc
->wb
.upper
.status_error
= 0;
674 /* return some buffers to hardware, one at a time is too slow */
675 if (cleaned_count
>= IXGBE_RX_BUFFER_WRITE
) {
676 ixgbe_alloc_rx_buffers(adapter
, rx_ring
, cleaned_count
);
680 /* use prefetched values */
682 rx_buffer_info
= next_buffer
;
684 staterr
= le32_to_cpu(rx_desc
->wb
.upper
.status_error
);
687 if (rx_ring
->lro_used
) {
688 lro_flush_all(&rx_ring
->lro_mgr
);
689 rx_ring
->lro_used
= false;
692 rx_ring
->next_to_clean
= i
;
693 cleaned_count
= IXGBE_DESC_UNUSED(rx_ring
);
696 ixgbe_alloc_rx_buffers(adapter
, rx_ring
, cleaned_count
);
698 rx_ring
->total_packets
+= total_rx_packets
;
699 rx_ring
->total_bytes
+= total_rx_bytes
;
700 adapter
->net_stats
.rx_bytes
+= total_rx_bytes
;
701 adapter
->net_stats
.rx_packets
+= total_rx_packets
;
706 static int ixgbe_clean_rxonly(struct napi_struct
*, int);
708 * ixgbe_configure_msix - Configure MSI-X hardware
709 * @adapter: board private structure
711 * ixgbe_configure_msix sets up the hardware to properly generate MSI-X
714 static void ixgbe_configure_msix(struct ixgbe_adapter
*adapter
)
716 struct ixgbe_q_vector
*q_vector
;
717 int i
, j
, q_vectors
, v_idx
, r_idx
;
720 q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
722 /* Populate the IVAR table and set the ITR values to the
723 * corresponding register.
725 for (v_idx
= 0; v_idx
< q_vectors
; v_idx
++) {
726 q_vector
= &adapter
->q_vector
[v_idx
];
727 /* XXX for_each_bit(...) */
728 r_idx
= find_first_bit(q_vector
->rxr_idx
,
729 adapter
->num_rx_queues
);
731 for (i
= 0; i
< q_vector
->rxr_count
; i
++) {
732 j
= adapter
->rx_ring
[r_idx
].reg_idx
;
733 ixgbe_set_ivar(adapter
, IXGBE_IVAR_RX_QUEUE(j
), v_idx
);
734 r_idx
= find_next_bit(q_vector
->rxr_idx
,
735 adapter
->num_rx_queues
,
738 r_idx
= find_first_bit(q_vector
->txr_idx
,
739 adapter
->num_tx_queues
);
741 for (i
= 0; i
< q_vector
->txr_count
; i
++) {
742 j
= adapter
->tx_ring
[r_idx
].reg_idx
;
743 ixgbe_set_ivar(adapter
, IXGBE_IVAR_TX_QUEUE(j
), v_idx
);
744 r_idx
= find_next_bit(q_vector
->txr_idx
,
745 adapter
->num_tx_queues
,
749 /* if this is a tx only vector halve the interrupt rate */
750 if (q_vector
->txr_count
&& !q_vector
->rxr_count
)
751 q_vector
->eitr
= (adapter
->eitr_param
>> 1);
754 q_vector
->eitr
= adapter
->eitr_param
;
756 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EITR(v_idx
),
757 EITR_INTS_PER_SEC_TO_REG(q_vector
->eitr
));
760 ixgbe_set_ivar(adapter
, IXGBE_IVAR_OTHER_CAUSES_INDEX
, v_idx
);
761 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EITR(v_idx
), 1950);
763 /* set up to autoclear timer, and the vectors */
764 mask
= IXGBE_EIMS_ENABLE_MASK
;
765 mask
&= ~(IXGBE_EIMS_OTHER
| IXGBE_EIMS_LSC
);
766 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EIAC
, mask
);
773 latency_invalid
= 255
777 * ixgbe_update_itr - update the dynamic ITR value based on statistics
778 * @adapter: pointer to adapter
779 * @eitr: eitr setting (ints per sec) to give last timeslice
780 * @itr_setting: current throttle rate in ints/second
781 * @packets: the number of packets during this measurement interval
782 * @bytes: the number of bytes during this measurement interval
784 * Stores a new ITR value based on packets and byte
785 * counts during the last interrupt. The advantage of per interrupt
786 * computation is faster updates and more accurate ITR for the current
787 * traffic pattern. Constants in this function were computed
788 * based on theoretical maximum wire speed and thresholds were set based
789 * on testing data as well as attempting to minimize response time
790 * while increasing bulk throughput.
791 * this functionality is controlled by the InterruptThrottleRate module
792 * parameter (see ixgbe_param.c)
794 static u8
ixgbe_update_itr(struct ixgbe_adapter
*adapter
,
795 u32 eitr
, u8 itr_setting
,
796 int packets
, int bytes
)
798 unsigned int retval
= itr_setting
;
803 goto update_itr_done
;
806 /* simple throttlerate management
807 * 0-20MB/s lowest (100000 ints/s)
808 * 20-100MB/s low (20000 ints/s)
809 * 100-1249MB/s bulk (8000 ints/s)
811 /* what was last interrupt timeslice? */
812 timepassed_us
= 1000000/eitr
;
813 bytes_perint
= bytes
/ timepassed_us
; /* bytes/usec */
815 switch (itr_setting
) {
817 if (bytes_perint
> adapter
->eitr_low
)
818 retval
= low_latency
;
821 if (bytes_perint
> adapter
->eitr_high
)
822 retval
= bulk_latency
;
823 else if (bytes_perint
<= adapter
->eitr_low
)
824 retval
= lowest_latency
;
827 if (bytes_perint
<= adapter
->eitr_high
)
828 retval
= low_latency
;
836 static void ixgbe_set_itr_msix(struct ixgbe_q_vector
*q_vector
)
838 struct ixgbe_adapter
*adapter
= q_vector
->adapter
;
839 struct ixgbe_hw
*hw
= &adapter
->hw
;
841 u8 current_itr
, ret_itr
;
842 int i
, r_idx
, v_idx
= ((void *)q_vector
- (void *)(adapter
->q_vector
)) /
843 sizeof(struct ixgbe_q_vector
);
844 struct ixgbe_ring
*rx_ring
, *tx_ring
;
846 r_idx
= find_first_bit(q_vector
->txr_idx
, adapter
->num_tx_queues
);
847 for (i
= 0; i
< q_vector
->txr_count
; i
++) {
848 tx_ring
= &(adapter
->tx_ring
[r_idx
]);
849 ret_itr
= ixgbe_update_itr(adapter
, q_vector
->eitr
,
851 tx_ring
->total_packets
,
852 tx_ring
->total_bytes
);
853 /* if the result for this queue would decrease interrupt
854 * rate for this vector then use that result */
855 q_vector
->tx_itr
= ((q_vector
->tx_itr
> ret_itr
) ?
856 q_vector
->tx_itr
- 1 : ret_itr
);
857 r_idx
= find_next_bit(q_vector
->txr_idx
, adapter
->num_tx_queues
,
861 r_idx
= find_first_bit(q_vector
->rxr_idx
, adapter
->num_rx_queues
);
862 for (i
= 0; i
< q_vector
->rxr_count
; i
++) {
863 rx_ring
= &(adapter
->rx_ring
[r_idx
]);
864 ret_itr
= ixgbe_update_itr(adapter
, q_vector
->eitr
,
866 rx_ring
->total_packets
,
867 rx_ring
->total_bytes
);
868 /* if the result for this queue would decrease interrupt
869 * rate for this vector then use that result */
870 q_vector
->rx_itr
= ((q_vector
->rx_itr
> ret_itr
) ?
871 q_vector
->rx_itr
- 1 : ret_itr
);
872 r_idx
= find_next_bit(q_vector
->rxr_idx
, adapter
->num_rx_queues
,
876 current_itr
= max(q_vector
->rx_itr
, q_vector
->tx_itr
);
878 switch (current_itr
) {
879 /* counts and packets in update_itr are dependent on these numbers */
884 new_itr
= 20000; /* aka hwitr = ~200 */
892 if (new_itr
!= q_vector
->eitr
) {
894 /* do an exponential smoothing */
895 new_itr
= ((q_vector
->eitr
* 90)/100) + ((new_itr
* 10)/100);
896 q_vector
->eitr
= new_itr
;
897 itr_reg
= EITR_INTS_PER_SEC_TO_REG(new_itr
);
898 /* must write high and low 16 bits to reset counter */
899 DPRINTK(TX_ERR
, DEBUG
, "writing eitr(%d): %08X\n", v_idx
,
901 IXGBE_WRITE_REG(hw
, IXGBE_EITR(v_idx
), itr_reg
| (itr_reg
)<<16);
908 static void ixgbe_check_lsc(struct ixgbe_adapter
*adapter
)
910 struct ixgbe_hw
*hw
= &adapter
->hw
;
913 adapter
->flags
|= IXGBE_FLAG_NEED_LINK_UPDATE
;
914 adapter
->link_check_timeout
= jiffies
;
915 if (!test_bit(__IXGBE_DOWN
, &adapter
->state
)) {
916 IXGBE_WRITE_REG(hw
, IXGBE_EIMC
, IXGBE_EIMC_LSC
);
917 schedule_work(&adapter
->watchdog_task
);
921 static irqreturn_t
ixgbe_msix_lsc(int irq
, void *data
)
923 struct net_device
*netdev
= data
;
924 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
925 struct ixgbe_hw
*hw
= &adapter
->hw
;
926 u32 eicr
= IXGBE_READ_REG(hw
, IXGBE_EICR
);
928 if (eicr
& IXGBE_EICR_LSC
)
929 ixgbe_check_lsc(adapter
);
931 if (!test_bit(__IXGBE_DOWN
, &adapter
->state
))
932 IXGBE_WRITE_REG(hw
, IXGBE_EIMS
, IXGBE_EIMS_OTHER
);
937 static irqreturn_t
ixgbe_msix_clean_tx(int irq
, void *data
)
939 struct ixgbe_q_vector
*q_vector
= data
;
940 struct ixgbe_adapter
*adapter
= q_vector
->adapter
;
941 struct ixgbe_ring
*tx_ring
;
944 if (!q_vector
->txr_count
)
947 r_idx
= find_first_bit(q_vector
->txr_idx
, adapter
->num_tx_queues
);
948 for (i
= 0; i
< q_vector
->txr_count
; i
++) {
949 tx_ring
= &(adapter
->tx_ring
[r_idx
]);
950 #ifdef CONFIG_IXGBE_DCA
951 if (adapter
->flags
& IXGBE_FLAG_DCA_ENABLED
)
952 ixgbe_update_tx_dca(adapter
, tx_ring
);
954 tx_ring
->total_bytes
= 0;
955 tx_ring
->total_packets
= 0;
956 ixgbe_clean_tx_irq(adapter
, tx_ring
);
957 r_idx
= find_next_bit(q_vector
->txr_idx
, adapter
->num_tx_queues
,
965 * ixgbe_msix_clean_rx - single unshared vector rx clean (all queues)
967 * @data: pointer to our q_vector struct for this interrupt vector
969 static irqreturn_t
ixgbe_msix_clean_rx(int irq
, void *data
)
971 struct ixgbe_q_vector
*q_vector
= data
;
972 struct ixgbe_adapter
*adapter
= q_vector
->adapter
;
973 struct ixgbe_ring
*rx_ring
;
977 r_idx
= find_first_bit(q_vector
->rxr_idx
, adapter
->num_rx_queues
);
978 for (i
= 0; i
< q_vector
->rxr_count
; i
++) {
979 rx_ring
= &(adapter
->rx_ring
[r_idx
]);
980 rx_ring
->total_bytes
= 0;
981 rx_ring
->total_packets
= 0;
982 r_idx
= find_next_bit(q_vector
->rxr_idx
, adapter
->num_rx_queues
,
986 if (!q_vector
->rxr_count
)
989 r_idx
= find_first_bit(q_vector
->rxr_idx
, adapter
->num_rx_queues
);
990 rx_ring
= &(adapter
->rx_ring
[r_idx
]);
991 /* disable interrupts on this vector only */
992 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EIMC
, rx_ring
->v_idx
);
993 netif_rx_schedule(adapter
->netdev
, &q_vector
->napi
);
998 static irqreturn_t
ixgbe_msix_clean_many(int irq
, void *data
)
1000 ixgbe_msix_clean_rx(irq
, data
);
1001 ixgbe_msix_clean_tx(irq
, data
);
1007 * ixgbe_clean_rxonly - msix (aka one shot) rx clean routine
1008 * @napi: napi struct with our devices info in it
1009 * @budget: amount of work driver is allowed to do this pass, in packets
1011 * This function is optimized for cleaning one queue only on a single
1014 static int ixgbe_clean_rxonly(struct napi_struct
*napi
, int budget
)
1016 struct ixgbe_q_vector
*q_vector
=
1017 container_of(napi
, struct ixgbe_q_vector
, napi
);
1018 struct ixgbe_adapter
*adapter
= q_vector
->adapter
;
1019 struct ixgbe_ring
*rx_ring
= NULL
;
1023 r_idx
= find_first_bit(q_vector
->rxr_idx
, adapter
->num_rx_queues
);
1024 rx_ring
= &(adapter
->rx_ring
[r_idx
]);
1025 #ifdef CONFIG_IXGBE_DCA
1026 if (adapter
->flags
& IXGBE_FLAG_DCA_ENABLED
)
1027 ixgbe_update_rx_dca(adapter
, rx_ring
);
1030 ixgbe_clean_rx_irq(adapter
, rx_ring
, &work_done
, budget
);
1032 /* If all Rx work done, exit the polling mode */
1033 if (work_done
< budget
) {
1034 netif_rx_complete(adapter
->netdev
, napi
);
1035 if (adapter
->itr_setting
& 3)
1036 ixgbe_set_itr_msix(q_vector
);
1037 if (!test_bit(__IXGBE_DOWN
, &adapter
->state
))
1038 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EIMS
, rx_ring
->v_idx
);
1045 * ixgbe_clean_rxonly_many - msix (aka one shot) rx clean routine
1046 * @napi: napi struct with our devices info in it
1047 * @budget: amount of work driver is allowed to do this pass, in packets
1049 * This function will clean more than one rx queue associated with a
1052 static int ixgbe_clean_rxonly_many(struct napi_struct
*napi
, int budget
)
1054 struct ixgbe_q_vector
*q_vector
=
1055 container_of(napi
, struct ixgbe_q_vector
, napi
);
1056 struct ixgbe_adapter
*adapter
= q_vector
->adapter
;
1057 struct ixgbe_ring
*rx_ring
= NULL
;
1058 int work_done
= 0, i
;
1060 u16 enable_mask
= 0;
1062 /* attempt to distribute budget to each queue fairly, but don't allow
1063 * the budget to go below 1 because we'll exit polling */
1064 budget
/= (q_vector
->rxr_count
?: 1);
1065 budget
= max(budget
, 1);
1066 r_idx
= find_first_bit(q_vector
->rxr_idx
, adapter
->num_rx_queues
);
1067 for (i
= 0; i
< q_vector
->rxr_count
; i
++) {
1068 rx_ring
= &(adapter
->rx_ring
[r_idx
]);
1069 #ifdef CONFIG_IXGBE_DCA
1070 if (adapter
->flags
& IXGBE_FLAG_DCA_ENABLED
)
1071 ixgbe_update_rx_dca(adapter
, rx_ring
);
1073 ixgbe_clean_rx_irq(adapter
, rx_ring
, &work_done
, budget
);
1074 enable_mask
|= rx_ring
->v_idx
;
1075 r_idx
= find_next_bit(q_vector
->rxr_idx
, adapter
->num_rx_queues
,
1079 r_idx
= find_first_bit(q_vector
->rxr_idx
, adapter
->num_rx_queues
);
1080 rx_ring
= &(adapter
->rx_ring
[r_idx
]);
1081 /* If all Rx work done, exit the polling mode */
1082 if (work_done
< budget
) {
1083 netif_rx_complete(adapter
->netdev
, napi
);
1084 if (adapter
->itr_setting
& 3)
1085 ixgbe_set_itr_msix(q_vector
);
1086 if (!test_bit(__IXGBE_DOWN
, &adapter
->state
))
1087 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EIMS
, enable_mask
);
1093 static inline void map_vector_to_rxq(struct ixgbe_adapter
*a
, int v_idx
,
1096 a
->q_vector
[v_idx
].adapter
= a
;
1097 set_bit(r_idx
, a
->q_vector
[v_idx
].rxr_idx
);
1098 a
->q_vector
[v_idx
].rxr_count
++;
1099 a
->rx_ring
[r_idx
].v_idx
= 1 << v_idx
;
1102 static inline void map_vector_to_txq(struct ixgbe_adapter
*a
, int v_idx
,
1105 a
->q_vector
[v_idx
].adapter
= a
;
1106 set_bit(r_idx
, a
->q_vector
[v_idx
].txr_idx
);
1107 a
->q_vector
[v_idx
].txr_count
++;
1108 a
->tx_ring
[r_idx
].v_idx
= 1 << v_idx
;
1112 * ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors
1113 * @adapter: board private structure to initialize
1114 * @vectors: allotted vector count for descriptor rings
1116 * This function maps descriptor rings to the queue-specific vectors
1117 * we were allotted through the MSI-X enabling code. Ideally, we'd have
1118 * one vector per ring/queue, but on a constrained vector budget, we
1119 * group the rings as "efficiently" as possible. You would add new
1120 * mapping configurations in here.
1122 static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter
*adapter
,
1126 int rxr_idx
= 0, txr_idx
= 0;
1127 int rxr_remaining
= adapter
->num_rx_queues
;
1128 int txr_remaining
= adapter
->num_tx_queues
;
1133 /* No mapping required if MSI-X is disabled. */
1134 if (!(adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
))
1138 * The ideal configuration...
1139 * We have enough vectors to map one per queue.
1141 if (vectors
== adapter
->num_rx_queues
+ adapter
->num_tx_queues
) {
1142 for (; rxr_idx
< rxr_remaining
; v_start
++, rxr_idx
++)
1143 map_vector_to_rxq(adapter
, v_start
, rxr_idx
);
1145 for (; txr_idx
< txr_remaining
; v_start
++, txr_idx
++)
1146 map_vector_to_txq(adapter
, v_start
, txr_idx
);
1152 * If we don't have enough vectors for a 1-to-1
1153 * mapping, we'll have to group them so there are
1154 * multiple queues per vector.
1156 /* Re-adjusting *qpv takes care of the remainder. */
1157 for (i
= v_start
; i
< vectors
; i
++) {
1158 rqpv
= DIV_ROUND_UP(rxr_remaining
, vectors
- i
);
1159 for (j
= 0; j
< rqpv
; j
++) {
1160 map_vector_to_rxq(adapter
, i
, rxr_idx
);
1165 for (i
= v_start
; i
< vectors
; i
++) {
1166 tqpv
= DIV_ROUND_UP(txr_remaining
, vectors
- i
);
1167 for (j
= 0; j
< tqpv
; j
++) {
1168 map_vector_to_txq(adapter
, i
, txr_idx
);
1179 * ixgbe_request_msix_irqs - Initialize MSI-X interrupts
1180 * @adapter: board private structure
1182 * ixgbe_request_msix_irqs allocates MSI-X vectors and requests
1183 * interrupts from the kernel.
1185 static int ixgbe_request_msix_irqs(struct ixgbe_adapter
*adapter
)
1187 struct net_device
*netdev
= adapter
->netdev
;
1188 irqreturn_t (*handler
)(int, void *);
1189 int i
, vector
, q_vectors
, err
;
1191 /* Decrement for Other and TCP Timer vectors */
1192 q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
1194 /* Map the Tx/Rx rings to the vectors we were allotted. */
1195 err
= ixgbe_map_rings_to_vectors(adapter
, q_vectors
);
1199 #define SET_HANDLER(_v) ((!(_v)->rxr_count) ? &ixgbe_msix_clean_tx : \
1200 (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \
1201 &ixgbe_msix_clean_many)
1202 for (vector
= 0; vector
< q_vectors
; vector
++) {
1203 handler
= SET_HANDLER(&adapter
->q_vector
[vector
]);
1204 sprintf(adapter
->name
[vector
], "%s:v%d-%s",
1205 netdev
->name
, vector
,
1206 (handler
== &ixgbe_msix_clean_rx
) ? "Rx" :
1207 ((handler
== &ixgbe_msix_clean_tx
) ? "Tx" : "TxRx"));
1208 err
= request_irq(adapter
->msix_entries
[vector
].vector
,
1209 handler
, 0, adapter
->name
[vector
],
1210 &(adapter
->q_vector
[vector
]));
1213 "request_irq failed for MSIX interrupt "
1214 "Error: %d\n", err
);
1215 goto free_queue_irqs
;
1219 sprintf(adapter
->name
[vector
], "%s:lsc", netdev
->name
);
1220 err
= request_irq(adapter
->msix_entries
[vector
].vector
,
1221 &ixgbe_msix_lsc
, 0, adapter
->name
[vector
], netdev
);
1224 "request_irq for msix_lsc failed: %d\n", err
);
1225 goto free_queue_irqs
;
1231 for (i
= vector
- 1; i
>= 0; i
--)
1232 free_irq(adapter
->msix_entries
[--vector
].vector
,
1233 &(adapter
->q_vector
[i
]));
1234 adapter
->flags
&= ~IXGBE_FLAG_MSIX_ENABLED
;
1235 pci_disable_msix(adapter
->pdev
);
1236 kfree(adapter
->msix_entries
);
1237 adapter
->msix_entries
= NULL
;
1242 static void ixgbe_set_itr(struct ixgbe_adapter
*adapter
)
1244 struct ixgbe_hw
*hw
= &adapter
->hw
;
1245 struct ixgbe_q_vector
*q_vector
= adapter
->q_vector
;
1247 u32 new_itr
= q_vector
->eitr
;
1248 struct ixgbe_ring
*rx_ring
= &adapter
->rx_ring
[0];
1249 struct ixgbe_ring
*tx_ring
= &adapter
->tx_ring
[0];
1251 q_vector
->tx_itr
= ixgbe_update_itr(adapter
, new_itr
,
1253 tx_ring
->total_packets
,
1254 tx_ring
->total_bytes
);
1255 q_vector
->rx_itr
= ixgbe_update_itr(adapter
, new_itr
,
1257 rx_ring
->total_packets
,
1258 rx_ring
->total_bytes
);
1260 current_itr
= max(q_vector
->rx_itr
, q_vector
->tx_itr
);
1262 switch (current_itr
) {
1263 /* counts and packets in update_itr are dependent on these numbers */
1264 case lowest_latency
:
1268 new_itr
= 20000; /* aka hwitr = ~200 */
1277 if (new_itr
!= q_vector
->eitr
) {
1279 /* do an exponential smoothing */
1280 new_itr
= ((q_vector
->eitr
* 90)/100) + ((new_itr
* 10)/100);
1281 q_vector
->eitr
= new_itr
;
1282 itr_reg
= EITR_INTS_PER_SEC_TO_REG(new_itr
);
1283 /* must write high and low 16 bits to reset counter */
1284 IXGBE_WRITE_REG(hw
, IXGBE_EITR(0), itr_reg
| (itr_reg
)<<16);
1291 * ixgbe_irq_disable - Mask off interrupt generation on the NIC
1292 * @adapter: board private structure
1294 static inline void ixgbe_irq_disable(struct ixgbe_adapter
*adapter
)
1296 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EIMC
, ~0);
1297 IXGBE_WRITE_FLUSH(&adapter
->hw
);
1298 if (adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
) {
1300 for (i
= 0; i
< adapter
->num_msix_vectors
; i
++)
1301 synchronize_irq(adapter
->msix_entries
[i
].vector
);
1303 synchronize_irq(adapter
->pdev
->irq
);
1308 * ixgbe_irq_enable - Enable default interrupt generation settings
1309 * @adapter: board private structure
1311 static inline void ixgbe_irq_enable(struct ixgbe_adapter
*adapter
)
1314 mask
= IXGBE_EIMS_ENABLE_MASK
;
1315 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EIMS
, mask
);
1316 IXGBE_WRITE_FLUSH(&adapter
->hw
);
1320 * ixgbe_intr - legacy mode Interrupt Handler
1321 * @irq: interrupt number
1322 * @data: pointer to a network interface device structure
1323 * @pt_regs: CPU registers structure
1325 static irqreturn_t
ixgbe_intr(int irq
, void *data
)
1327 struct net_device
*netdev
= data
;
1328 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
1329 struct ixgbe_hw
*hw
= &adapter
->hw
;
1332 /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read
1333 * therefore no explict interrupt disable is necessary */
1334 eicr
= IXGBE_READ_REG(hw
, IXGBE_EICR
);
1336 /* shared interrupt alert!
1337 * make sure interrupts are enabled because the read will
1338 * have disabled interrupts due to EIAM */
1339 ixgbe_irq_enable(adapter
);
1340 return IRQ_NONE
; /* Not our interrupt */
1343 if (eicr
& IXGBE_EICR_LSC
)
1344 ixgbe_check_lsc(adapter
);
1346 if (netif_rx_schedule_prep(netdev
, &adapter
->q_vector
[0].napi
)) {
1347 adapter
->tx_ring
[0].total_packets
= 0;
1348 adapter
->tx_ring
[0].total_bytes
= 0;
1349 adapter
->rx_ring
[0].total_packets
= 0;
1350 adapter
->rx_ring
[0].total_bytes
= 0;
1351 /* would disable interrupts here but EIAM disabled it */
1352 __netif_rx_schedule(netdev
, &adapter
->q_vector
[0].napi
);
1358 static inline void ixgbe_reset_q_vectors(struct ixgbe_adapter
*adapter
)
1360 int i
, q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
1362 for (i
= 0; i
< q_vectors
; i
++) {
1363 struct ixgbe_q_vector
*q_vector
= &adapter
->q_vector
[i
];
1364 bitmap_zero(q_vector
->rxr_idx
, MAX_RX_QUEUES
);
1365 bitmap_zero(q_vector
->txr_idx
, MAX_TX_QUEUES
);
1366 q_vector
->rxr_count
= 0;
1367 q_vector
->txr_count
= 0;
1372 * ixgbe_request_irq - initialize interrupts
1373 * @adapter: board private structure
1375 * Attempts to configure interrupts using the best available
1376 * capabilities of the hardware and kernel.
1378 static int ixgbe_request_irq(struct ixgbe_adapter
*adapter
)
1380 struct net_device
*netdev
= adapter
->netdev
;
1383 if (adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
) {
1384 err
= ixgbe_request_msix_irqs(adapter
);
1385 } else if (adapter
->flags
& IXGBE_FLAG_MSI_ENABLED
) {
1386 err
= request_irq(adapter
->pdev
->irq
, &ixgbe_intr
, 0,
1387 netdev
->name
, netdev
);
1389 err
= request_irq(adapter
->pdev
->irq
, &ixgbe_intr
, IRQF_SHARED
,
1390 netdev
->name
, netdev
);
1394 DPRINTK(PROBE
, ERR
, "request_irq failed, Error %d\n", err
);
1399 static void ixgbe_free_irq(struct ixgbe_adapter
*adapter
)
1401 struct net_device
*netdev
= adapter
->netdev
;
1403 if (adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
) {
1406 q_vectors
= adapter
->num_msix_vectors
;
1409 free_irq(adapter
->msix_entries
[i
].vector
, netdev
);
1412 for (; i
>= 0; i
--) {
1413 free_irq(adapter
->msix_entries
[i
].vector
,
1414 &(adapter
->q_vector
[i
]));
1417 ixgbe_reset_q_vectors(adapter
);
1419 free_irq(adapter
->pdev
->irq
, netdev
);
1424 * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts
1427 static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter
*adapter
)
1429 struct ixgbe_hw
*hw
= &adapter
->hw
;
1431 IXGBE_WRITE_REG(hw
, IXGBE_EITR(0),
1432 EITR_INTS_PER_SEC_TO_REG(adapter
->eitr_param
));
1434 ixgbe_set_ivar(adapter
, IXGBE_IVAR_RX_QUEUE(0), 0);
1435 ixgbe_set_ivar(adapter
, IXGBE_IVAR_TX_QUEUE(0), 0);
1437 map_vector_to_rxq(adapter
, 0, 0);
1438 map_vector_to_txq(adapter
, 0, 0);
1440 DPRINTK(HW
, INFO
, "Legacy interrupt IVAR setup done\n");
1444 * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset
1445 * @adapter: board private structure
1447 * Configure the Tx unit of the MAC after a reset.
1449 static void ixgbe_configure_tx(struct ixgbe_adapter
*adapter
)
1452 struct ixgbe_hw
*hw
= &adapter
->hw
;
1453 u32 i
, j
, tdlen
, txctrl
;
1455 /* Setup the HW Tx Head and Tail descriptor pointers */
1456 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
1457 struct ixgbe_ring
*ring
= &adapter
->tx_ring
[i
];
1460 tdlen
= ring
->count
* sizeof(union ixgbe_adv_tx_desc
);
1461 IXGBE_WRITE_REG(hw
, IXGBE_TDBAL(j
),
1462 (tdba
& DMA_32BIT_MASK
));
1463 IXGBE_WRITE_REG(hw
, IXGBE_TDBAH(j
), (tdba
>> 32));
1465 (ring
->count
* sizeof(union ixgbe_adv_tx_desc
));
1466 tdwba
|= IXGBE_TDWBAL_HEAD_WB_ENABLE
;
1467 IXGBE_WRITE_REG(hw
, IXGBE_TDWBAL(j
), tdwba
& DMA_32BIT_MASK
);
1468 IXGBE_WRITE_REG(hw
, IXGBE_TDWBAH(j
), (tdwba
>> 32));
1469 IXGBE_WRITE_REG(hw
, IXGBE_TDLEN(j
), tdlen
);
1470 IXGBE_WRITE_REG(hw
, IXGBE_TDH(j
), 0);
1471 IXGBE_WRITE_REG(hw
, IXGBE_TDT(j
), 0);
1472 adapter
->tx_ring
[i
].head
= IXGBE_TDH(j
);
1473 adapter
->tx_ring
[i
].tail
= IXGBE_TDT(j
);
1474 /* Disable Tx Head Writeback RO bit, since this hoses
1475 * bookkeeping if things aren't delivered in order.
1477 txctrl
= IXGBE_READ_REG(hw
, IXGBE_DCA_TXCTRL(j
));
1478 txctrl
&= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN
;
1479 IXGBE_WRITE_REG(hw
, IXGBE_DCA_TXCTRL(j
), txctrl
);
1483 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1485 static void ixgbe_configure_srrctl(struct ixgbe_adapter
*adapter
, int index
)
1487 struct ixgbe_ring
*rx_ring
;
1492 /* program one srrctl register per VMDq index */
1493 if (adapter
->flags
& IXGBE_FLAG_VMDQ_ENABLED
) {
1495 mask
= (unsigned long) adapter
->ring_feature
[RING_F_RSS
].mask
;
1496 len
= sizeof(adapter
->ring_feature
[RING_F_VMDQ
].mask
) * 8;
1497 shift
= find_first_bit(&mask
, len
);
1498 queue0
= index
& mask
;
1499 index
= (index
& mask
) >> shift
;
1500 /* program one srrctl per RSS queue since RDRXCTL.MVMEN is enabled */
1502 mask
= (unsigned long) adapter
->ring_feature
[RING_F_RSS
].mask
;
1503 queue0
= index
& mask
;
1504 index
= index
& mask
;
1507 rx_ring
= &adapter
->rx_ring
[queue0
];
1509 srrctl
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_SRRCTL(index
));
1511 srrctl
&= ~IXGBE_SRRCTL_BSIZEHDR_MASK
;
1512 srrctl
&= ~IXGBE_SRRCTL_BSIZEPKT_MASK
;
1514 if (adapter
->flags
& IXGBE_FLAG_RX_PS_ENABLED
) {
1515 srrctl
|= IXGBE_RXBUFFER_2048
>> IXGBE_SRRCTL_BSIZEPKT_SHIFT
;
1516 srrctl
|= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS
;
1517 srrctl
|= ((IXGBE_RX_HDR_SIZE
<<
1518 IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT
) &
1519 IXGBE_SRRCTL_BSIZEHDR_MASK
);
1521 srrctl
|= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF
;
1523 if (rx_ring
->rx_buf_len
== MAXIMUM_ETHERNET_VLAN_SIZE
)
1524 srrctl
|= IXGBE_RXBUFFER_2048
>>
1525 IXGBE_SRRCTL_BSIZEPKT_SHIFT
;
1527 srrctl
|= rx_ring
->rx_buf_len
>>
1528 IXGBE_SRRCTL_BSIZEPKT_SHIFT
;
1530 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_SRRCTL(index
), srrctl
);
1534 * ixgbe_get_skb_hdr - helper function for LRO header processing
1535 * @skb: pointer to sk_buff to be added to LRO packet
1536 * @iphdr: pointer to ip header structure
1537 * @tcph: pointer to tcp header structure
1538 * @hdr_flags: pointer to header flags
1539 * @priv: private data
1541 static int ixgbe_get_skb_hdr(struct sk_buff
*skb
, void **iphdr
, void **tcph
,
1542 u64
*hdr_flags
, void *priv
)
1544 union ixgbe_adv_rx_desc
*rx_desc
= priv
;
1546 /* Verify that this is a valid IPv4 TCP packet */
1547 if (!((ixgbe_get_pkt_info(rx_desc
) & IXGBE_RXDADV_PKTTYPE_IPV4
) &&
1548 (ixgbe_get_pkt_info(rx_desc
) & IXGBE_RXDADV_PKTTYPE_TCP
)))
1551 /* Set network headers */
1552 skb_reset_network_header(skb
);
1553 skb_set_transport_header(skb
, ip_hdrlen(skb
));
1554 *iphdr
= ip_hdr(skb
);
1555 *tcph
= tcp_hdr(skb
);
1556 *hdr_flags
= LRO_IPV4
| LRO_TCP
;
1560 #define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
1561 (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
1564 * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset
1565 * @adapter: board private structure
1567 * Configure the Rx unit of the MAC after a reset.
1569 static void ixgbe_configure_rx(struct ixgbe_adapter
*adapter
)
1572 struct ixgbe_hw
*hw
= &adapter
->hw
;
1573 struct net_device
*netdev
= adapter
->netdev
;
1574 int max_frame
= netdev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
;
1576 u32 rdlen
, rxctrl
, rxcsum
;
1577 static const u32 seed
[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D,
1578 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE,
1579 0x6A3E67EA, 0x14364D17, 0x3BED200D};
1586 /* Decide whether to use packet split mode or not */
1587 adapter
->flags
|= IXGBE_FLAG_RX_PS_ENABLED
;
1589 /* Set the RX buffer length according to the mode */
1590 if (adapter
->flags
& IXGBE_FLAG_RX_PS_ENABLED
) {
1591 rx_buf_len
= IXGBE_RX_HDR_SIZE
;
1593 if (netdev
->mtu
<= ETH_DATA_LEN
)
1594 rx_buf_len
= MAXIMUM_ETHERNET_VLAN_SIZE
;
1596 rx_buf_len
= ALIGN(max_frame
, 1024);
1599 fctrl
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_FCTRL
);
1600 fctrl
|= IXGBE_FCTRL_BAM
;
1601 fctrl
|= IXGBE_FCTRL_DPF
; /* discard pause frames when FC enabled */
1602 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_FCTRL
, fctrl
);
1604 hlreg0
= IXGBE_READ_REG(hw
, IXGBE_HLREG0
);
1605 if (adapter
->netdev
->mtu
<= ETH_DATA_LEN
)
1606 hlreg0
&= ~IXGBE_HLREG0_JUMBOEN
;
1608 hlreg0
|= IXGBE_HLREG0_JUMBOEN
;
1609 IXGBE_WRITE_REG(hw
, IXGBE_HLREG0
, hlreg0
);
1611 pages
= PAGE_USE_COUNT(adapter
->netdev
->mtu
);
1613 rdlen
= adapter
->rx_ring
[0].count
* sizeof(union ixgbe_adv_rx_desc
);
1614 /* disable receives while setting up the descriptors */
1615 rxctrl
= IXGBE_READ_REG(hw
, IXGBE_RXCTRL
);
1616 IXGBE_WRITE_REG(hw
, IXGBE_RXCTRL
, rxctrl
& ~IXGBE_RXCTRL_RXEN
);
1618 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1619 * the Base and Length of the Rx Descriptor Ring */
1620 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
1621 rdba
= adapter
->rx_ring
[i
].dma
;
1622 j
= adapter
->rx_ring
[i
].reg_idx
;
1623 IXGBE_WRITE_REG(hw
, IXGBE_RDBAL(j
), (rdba
& DMA_32BIT_MASK
));
1624 IXGBE_WRITE_REG(hw
, IXGBE_RDBAH(j
), (rdba
>> 32));
1625 IXGBE_WRITE_REG(hw
, IXGBE_RDLEN(j
), rdlen
);
1626 IXGBE_WRITE_REG(hw
, IXGBE_RDH(j
), 0);
1627 IXGBE_WRITE_REG(hw
, IXGBE_RDT(j
), 0);
1628 adapter
->rx_ring
[i
].head
= IXGBE_RDH(j
);
1629 adapter
->rx_ring
[i
].tail
= IXGBE_RDT(j
);
1630 adapter
->rx_ring
[i
].rx_buf_len
= rx_buf_len
;
1631 /* Intitial LRO Settings */
1632 adapter
->rx_ring
[i
].lro_mgr
.max_aggr
= IXGBE_MAX_LRO_AGGREGATE
;
1633 adapter
->rx_ring
[i
].lro_mgr
.max_desc
= IXGBE_MAX_LRO_DESCRIPTORS
;
1634 adapter
->rx_ring
[i
].lro_mgr
.get_skb_header
= ixgbe_get_skb_hdr
;
1635 adapter
->rx_ring
[i
].lro_mgr
.features
= LRO_F_EXTRACT_VLAN_ID
;
1636 if (!(adapter
->flags
& IXGBE_FLAG_IN_NETPOLL
))
1637 adapter
->rx_ring
[i
].lro_mgr
.features
|= LRO_F_NAPI
;
1638 adapter
->rx_ring
[i
].lro_mgr
.dev
= adapter
->netdev
;
1639 adapter
->rx_ring
[i
].lro_mgr
.ip_summed
= CHECKSUM_UNNECESSARY
;
1640 adapter
->rx_ring
[i
].lro_mgr
.ip_summed_aggr
= CHECKSUM_UNNECESSARY
;
1642 ixgbe_configure_srrctl(adapter
, j
);
1646 * For VMDq support of different descriptor types or
1647 * buffer sizes through the use of multiple SRRCTL
1648 * registers, RDRXCTL.MVMEN must be set to 1
1650 * also, the manual doesn't mention it clearly but DCA hints
1651 * will only use queue 0's tags unless this bit is set. Side
1652 * effects of setting this bit are only that SRRCTL must be
1653 * fully programmed [0..15]
1655 rdrxctl
= IXGBE_READ_REG(hw
, IXGBE_RDRXCTL
);
1656 rdrxctl
|= IXGBE_RDRXCTL_MVMEN
;
1657 IXGBE_WRITE_REG(hw
, IXGBE_RDRXCTL
, rdrxctl
);
1660 if (adapter
->flags
& IXGBE_FLAG_RSS_ENABLED
) {
1661 /* Fill out redirection table */
1662 for (i
= 0, j
= 0; i
< 128; i
++, j
++) {
1663 if (j
== adapter
->ring_feature
[RING_F_RSS
].indices
)
1665 /* reta = 4-byte sliding window of
1666 * 0x00..(indices-1)(indices-1)00..etc. */
1667 reta
= (reta
<< 8) | (j
* 0x11);
1669 IXGBE_WRITE_REG(hw
, IXGBE_RETA(i
>> 2), reta
);
1672 /* Fill out hash function seeds */
1673 for (i
= 0; i
< 10; i
++)
1674 IXGBE_WRITE_REG(hw
, IXGBE_RSSRK(i
), seed
[i
]);
1676 mrqc
= IXGBE_MRQC_RSSEN
1677 /* Perform hash on these packet types */
1678 | IXGBE_MRQC_RSS_FIELD_IPV4
1679 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
1680 | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
1681 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
1682 | IXGBE_MRQC_RSS_FIELD_IPV6_EX
1683 | IXGBE_MRQC_RSS_FIELD_IPV6
1684 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
1685 | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
1686 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP
;
1687 IXGBE_WRITE_REG(hw
, IXGBE_MRQC
, mrqc
);
1690 rxcsum
= IXGBE_READ_REG(hw
, IXGBE_RXCSUM
);
1692 if (adapter
->flags
& IXGBE_FLAG_RSS_ENABLED
||
1693 adapter
->flags
& IXGBE_FLAG_RX_CSUM_ENABLED
) {
1694 /* Disable indicating checksum in descriptor, enables
1696 rxcsum
|= IXGBE_RXCSUM_PCSD
;
1698 if (!(rxcsum
& IXGBE_RXCSUM_PCSD
)) {
1699 /* Enable IPv4 payload checksum for UDP fragments
1700 * if PCSD is not set */
1701 rxcsum
|= IXGBE_RXCSUM_IPPCSE
;
1704 IXGBE_WRITE_REG(hw
, IXGBE_RXCSUM
, rxcsum
);
1707 static void ixgbe_vlan_rx_register(struct net_device
*netdev
,
1708 struct vlan_group
*grp
)
1710 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
1713 if (!test_bit(__IXGBE_DOWN
, &adapter
->state
))
1714 ixgbe_irq_disable(adapter
);
1715 adapter
->vlgrp
= grp
;
1718 /* enable VLAN tag insert/strip */
1719 ctrl
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_VLNCTRL
);
1720 ctrl
|= IXGBE_VLNCTRL_VME
;
1721 ctrl
&= ~IXGBE_VLNCTRL_CFIEN
;
1722 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_VLNCTRL
, ctrl
);
1725 if (!test_bit(__IXGBE_DOWN
, &adapter
->state
))
1726 ixgbe_irq_enable(adapter
);
1729 static void ixgbe_vlan_rx_add_vid(struct net_device
*netdev
, u16 vid
)
1731 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
1732 struct ixgbe_hw
*hw
= &adapter
->hw
;
1734 /* add VID to filter table */
1735 hw
->mac
.ops
.set_vfta(&adapter
->hw
, vid
, 0, true);
1738 static void ixgbe_vlan_rx_kill_vid(struct net_device
*netdev
, u16 vid
)
1740 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
1741 struct ixgbe_hw
*hw
= &adapter
->hw
;
1743 if (!test_bit(__IXGBE_DOWN
, &adapter
->state
))
1744 ixgbe_irq_disable(adapter
);
1746 vlan_group_set_device(adapter
->vlgrp
, vid
, NULL
);
1748 if (!test_bit(__IXGBE_DOWN
, &adapter
->state
))
1749 ixgbe_irq_enable(adapter
);
1751 /* remove VID from filter table */
1752 hw
->mac
.ops
.set_vfta(&adapter
->hw
, vid
, 0, false);
1755 static void ixgbe_restore_vlan(struct ixgbe_adapter
*adapter
)
1757 ixgbe_vlan_rx_register(adapter
->netdev
, adapter
->vlgrp
);
1759 if (adapter
->vlgrp
) {
1761 for (vid
= 0; vid
< VLAN_GROUP_ARRAY_LEN
; vid
++) {
1762 if (!vlan_group_get_device(adapter
->vlgrp
, vid
))
1764 ixgbe_vlan_rx_add_vid(adapter
->netdev
, vid
);
1769 static u8
*ixgbe_addr_list_itr(struct ixgbe_hw
*hw
, u8
**mc_addr_ptr
, u32
*vmdq
)
1771 struct dev_mc_list
*mc_ptr
;
1772 u8
*addr
= *mc_addr_ptr
;
1775 mc_ptr
= container_of(addr
, struct dev_mc_list
, dmi_addr
[0]);
1777 *mc_addr_ptr
= mc_ptr
->next
->dmi_addr
;
1779 *mc_addr_ptr
= NULL
;
1785 * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set
1786 * @netdev: network interface device structure
1788 * The set_rx_method entry point is called whenever the unicast/multicast
1789 * address list or the network interface flags are updated. This routine is
1790 * responsible for configuring the hardware for proper unicast, multicast and
1793 static void ixgbe_set_rx_mode(struct net_device
*netdev
)
1795 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
1796 struct ixgbe_hw
*hw
= &adapter
->hw
;
1798 u8
*addr_list
= NULL
;
1801 /* Check for Promiscuous and All Multicast modes */
1803 fctrl
= IXGBE_READ_REG(hw
, IXGBE_FCTRL
);
1804 vlnctrl
= IXGBE_READ_REG(hw
, IXGBE_VLNCTRL
);
1806 if (netdev
->flags
& IFF_PROMISC
) {
1807 hw
->addr_ctrl
.user_set_promisc
= 1;
1808 fctrl
|= (IXGBE_FCTRL_UPE
| IXGBE_FCTRL_MPE
);
1809 vlnctrl
&= ~IXGBE_VLNCTRL_VFE
;
1811 if (netdev
->flags
& IFF_ALLMULTI
) {
1812 fctrl
|= IXGBE_FCTRL_MPE
;
1813 fctrl
&= ~IXGBE_FCTRL_UPE
;
1815 fctrl
&= ~(IXGBE_FCTRL_UPE
| IXGBE_FCTRL_MPE
);
1817 vlnctrl
|= IXGBE_VLNCTRL_VFE
;
1818 hw
->addr_ctrl
.user_set_promisc
= 0;
1821 IXGBE_WRITE_REG(hw
, IXGBE_FCTRL
, fctrl
);
1822 IXGBE_WRITE_REG(hw
, IXGBE_VLNCTRL
, vlnctrl
);
1824 /* reprogram secondary unicast list */
1825 addr_count
= netdev
->uc_count
;
1827 addr_list
= netdev
->uc_list
->dmi_addr
;
1828 hw
->mac
.ops
.update_uc_addr_list(hw
, addr_list
, addr_count
,
1829 ixgbe_addr_list_itr
);
1831 /* reprogram multicast list */
1832 addr_count
= netdev
->mc_count
;
1834 addr_list
= netdev
->mc_list
->dmi_addr
;
1835 hw
->mac
.ops
.update_mc_addr_list(hw
, addr_list
, addr_count
,
1836 ixgbe_addr_list_itr
);
1839 static void ixgbe_napi_enable_all(struct ixgbe_adapter
*adapter
)
1842 struct ixgbe_q_vector
*q_vector
;
1843 int q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
1845 /* legacy and MSI only use one vector */
1846 if (!(adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
))
1849 for (q_idx
= 0; q_idx
< q_vectors
; q_idx
++) {
1850 struct napi_struct
*napi
;
1851 q_vector
= &adapter
->q_vector
[q_idx
];
1852 if (!q_vector
->rxr_count
)
1854 napi
= &q_vector
->napi
;
1855 if ((adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
) &&
1856 (q_vector
->rxr_count
> 1))
1857 napi
->poll
= &ixgbe_clean_rxonly_many
;
1863 static void ixgbe_napi_disable_all(struct ixgbe_adapter
*adapter
)
1866 struct ixgbe_q_vector
*q_vector
;
1867 int q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
1869 /* legacy and MSI only use one vector */
1870 if (!(adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
))
1873 for (q_idx
= 0; q_idx
< q_vectors
; q_idx
++) {
1874 q_vector
= &adapter
->q_vector
[q_idx
];
1875 if (!q_vector
->rxr_count
)
1877 napi_disable(&q_vector
->napi
);
1881 static void ixgbe_configure(struct ixgbe_adapter
*adapter
)
1883 struct net_device
*netdev
= adapter
->netdev
;
1886 ixgbe_set_rx_mode(netdev
);
1888 ixgbe_restore_vlan(adapter
);
1890 ixgbe_configure_tx(adapter
);
1891 ixgbe_configure_rx(adapter
);
1892 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
1893 ixgbe_alloc_rx_buffers(adapter
, &adapter
->rx_ring
[i
],
1894 (adapter
->rx_ring
[i
].count
- 1));
1897 static int ixgbe_up_complete(struct ixgbe_adapter
*adapter
)
1899 struct net_device
*netdev
= adapter
->netdev
;
1900 struct ixgbe_hw
*hw
= &adapter
->hw
;
1902 int max_frame
= netdev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
;
1903 u32 txdctl
, rxdctl
, mhadd
;
1906 ixgbe_get_hw_control(adapter
);
1908 if ((adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
) ||
1909 (adapter
->flags
& IXGBE_FLAG_MSI_ENABLED
)) {
1910 if (adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
) {
1911 gpie
= (IXGBE_GPIE_MSIX_MODE
| IXGBE_GPIE_EIAME
|
1912 IXGBE_GPIE_PBA_SUPPORT
| IXGBE_GPIE_OCD
);
1917 /* XXX: to interrupt immediately for EICS writes, enable this */
1918 /* gpie |= IXGBE_GPIE_EIMEN; */
1919 IXGBE_WRITE_REG(hw
, IXGBE_GPIE
, gpie
);
1922 if (!(adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
)) {
1923 /* legacy interrupts, use EIAM to auto-mask when reading EICR,
1924 * specifically only auto mask tx and rx interrupts */
1925 IXGBE_WRITE_REG(hw
, IXGBE_EIAM
, IXGBE_EICS_RTX_QUEUE
);
1928 mhadd
= IXGBE_READ_REG(hw
, IXGBE_MHADD
);
1929 if (max_frame
!= (mhadd
>> IXGBE_MHADD_MFS_SHIFT
)) {
1930 mhadd
&= ~IXGBE_MHADD_MFS_MASK
;
1931 mhadd
|= max_frame
<< IXGBE_MHADD_MFS_SHIFT
;
1933 IXGBE_WRITE_REG(hw
, IXGBE_MHADD
, mhadd
);
1936 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
1937 j
= adapter
->tx_ring
[i
].reg_idx
;
1938 txdctl
= IXGBE_READ_REG(hw
, IXGBE_TXDCTL(j
));
1939 /* enable WTHRESH=8 descriptors, to encourage burst writeback */
1940 txdctl
|= (8 << 16);
1941 txdctl
|= IXGBE_TXDCTL_ENABLE
;
1942 IXGBE_WRITE_REG(hw
, IXGBE_TXDCTL(j
), txdctl
);
1945 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
1946 j
= adapter
->rx_ring
[i
].reg_idx
;
1947 rxdctl
= IXGBE_READ_REG(hw
, IXGBE_RXDCTL(j
));
1948 /* enable PTHRESH=32 descriptors (half the internal cache)
1949 * and HTHRESH=0 descriptors (to minimize latency on fetch),
1950 * this also removes a pesky rx_no_buffer_count increment */
1952 rxdctl
|= IXGBE_RXDCTL_ENABLE
;
1953 IXGBE_WRITE_REG(hw
, IXGBE_RXDCTL(j
), rxdctl
);
1955 /* enable all receives */
1956 rxdctl
= IXGBE_READ_REG(hw
, IXGBE_RXCTRL
);
1957 rxdctl
|= (IXGBE_RXCTRL_DMBYPS
| IXGBE_RXCTRL_RXEN
);
1958 IXGBE_WRITE_REG(hw
, IXGBE_RXCTRL
, rxdctl
);
1960 if (adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
)
1961 ixgbe_configure_msix(adapter
);
1963 ixgbe_configure_msi_and_legacy(adapter
);
1965 clear_bit(__IXGBE_DOWN
, &adapter
->state
);
1966 ixgbe_napi_enable_all(adapter
);
1968 /* clear any pending interrupts, may auto mask */
1969 IXGBE_READ_REG(hw
, IXGBE_EICR
);
1971 ixgbe_irq_enable(adapter
);
1973 /* bring the link up in the watchdog, this could race with our first
1974 * link up interrupt but shouldn't be a problem */
1975 adapter
->flags
|= IXGBE_FLAG_NEED_LINK_UPDATE
;
1976 adapter
->link_check_timeout
= jiffies
;
1977 mod_timer(&adapter
->watchdog_timer
, jiffies
);
1981 void ixgbe_reinit_locked(struct ixgbe_adapter
*adapter
)
1983 WARN_ON(in_interrupt());
1984 while (test_and_set_bit(__IXGBE_RESETTING
, &adapter
->state
))
1986 ixgbe_down(adapter
);
1988 clear_bit(__IXGBE_RESETTING
, &adapter
->state
);
1991 int ixgbe_up(struct ixgbe_adapter
*adapter
)
1993 /* hardware has been reset, we need to reload some things */
1994 ixgbe_configure(adapter
);
1996 return ixgbe_up_complete(adapter
);
1999 void ixgbe_reset(struct ixgbe_adapter
*adapter
)
2001 struct ixgbe_hw
*hw
= &adapter
->hw
;
2002 if (hw
->mac
.ops
.init_hw(hw
))
2003 dev_err(&adapter
->pdev
->dev
, "Hardware Error\n");
2005 /* reprogram the RAR[0] in case user changed it. */
2006 hw
->mac
.ops
.set_rar(hw
, 0, hw
->mac
.addr
, 0, IXGBE_RAH_AV
);
2011 * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
2012 * @adapter: board private structure
2013 * @rx_ring: ring to free buffers from
2015 static void ixgbe_clean_rx_ring(struct ixgbe_adapter
*adapter
,
2016 struct ixgbe_ring
*rx_ring
)
2018 struct pci_dev
*pdev
= adapter
->pdev
;
2022 /* Free all the Rx ring sk_buffs */
2024 for (i
= 0; i
< rx_ring
->count
; i
++) {
2025 struct ixgbe_rx_buffer
*rx_buffer_info
;
2027 rx_buffer_info
= &rx_ring
->rx_buffer_info
[i
];
2028 if (rx_buffer_info
->dma
) {
2029 pci_unmap_single(pdev
, rx_buffer_info
->dma
,
2030 rx_ring
->rx_buf_len
,
2031 PCI_DMA_FROMDEVICE
);
2032 rx_buffer_info
->dma
= 0;
2034 if (rx_buffer_info
->skb
) {
2035 dev_kfree_skb(rx_buffer_info
->skb
);
2036 rx_buffer_info
->skb
= NULL
;
2038 if (!rx_buffer_info
->page
)
2040 pci_unmap_page(pdev
, rx_buffer_info
->page_dma
, PAGE_SIZE
/ 2,
2041 PCI_DMA_FROMDEVICE
);
2042 rx_buffer_info
->page_dma
= 0;
2043 put_page(rx_buffer_info
->page
);
2044 rx_buffer_info
->page
= NULL
;
2045 rx_buffer_info
->page_offset
= 0;
2048 size
= sizeof(struct ixgbe_rx_buffer
) * rx_ring
->count
;
2049 memset(rx_ring
->rx_buffer_info
, 0, size
);
2051 /* Zero out the descriptor ring */
2052 memset(rx_ring
->desc
, 0, rx_ring
->size
);
2054 rx_ring
->next_to_clean
= 0;
2055 rx_ring
->next_to_use
= 0;
2057 writel(0, adapter
->hw
.hw_addr
+ rx_ring
->head
);
2058 writel(0, adapter
->hw
.hw_addr
+ rx_ring
->tail
);
2062 * ixgbe_clean_tx_ring - Free Tx Buffers
2063 * @adapter: board private structure
2064 * @tx_ring: ring to be cleaned
2066 static void ixgbe_clean_tx_ring(struct ixgbe_adapter
*adapter
,
2067 struct ixgbe_ring
*tx_ring
)
2069 struct ixgbe_tx_buffer
*tx_buffer_info
;
2073 /* Free all the Tx ring sk_buffs */
2075 for (i
= 0; i
< tx_ring
->count
; i
++) {
2076 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
2077 ixgbe_unmap_and_free_tx_resource(adapter
, tx_buffer_info
);
2080 size
= sizeof(struct ixgbe_tx_buffer
) * tx_ring
->count
;
2081 memset(tx_ring
->tx_buffer_info
, 0, size
);
2083 /* Zero out the descriptor ring */
2084 memset(tx_ring
->desc
, 0, tx_ring
->size
);
2086 tx_ring
->next_to_use
= 0;
2087 tx_ring
->next_to_clean
= 0;
2089 writel(0, adapter
->hw
.hw_addr
+ tx_ring
->head
);
2090 writel(0, adapter
->hw
.hw_addr
+ tx_ring
->tail
);
2094 * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues
2095 * @adapter: board private structure
2097 static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter
*adapter
)
2101 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
2102 ixgbe_clean_rx_ring(adapter
, &adapter
->rx_ring
[i
]);
2106 * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues
2107 * @adapter: board private structure
2109 static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter
*adapter
)
2113 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
2114 ixgbe_clean_tx_ring(adapter
, &adapter
->tx_ring
[i
]);
2117 void ixgbe_down(struct ixgbe_adapter
*adapter
)
2119 struct net_device
*netdev
= adapter
->netdev
;
2120 struct ixgbe_hw
*hw
= &adapter
->hw
;
2125 /* signal that we are down to the interrupt handler */
2126 set_bit(__IXGBE_DOWN
, &adapter
->state
);
2128 /* disable receives */
2129 rxctrl
= IXGBE_READ_REG(hw
, IXGBE_RXCTRL
);
2130 IXGBE_WRITE_REG(hw
, IXGBE_RXCTRL
, rxctrl
& ~IXGBE_RXCTRL_RXEN
);
2132 netif_tx_disable(netdev
);
2134 IXGBE_WRITE_FLUSH(hw
);
2137 netif_tx_stop_all_queues(netdev
);
2139 ixgbe_irq_disable(adapter
);
2141 ixgbe_napi_disable_all(adapter
);
2143 del_timer_sync(&adapter
->watchdog_timer
);
2144 cancel_work_sync(&adapter
->watchdog_task
);
2146 /* disable transmits in the hardware now that interrupts are off */
2147 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
2148 j
= adapter
->tx_ring
[i
].reg_idx
;
2149 txdctl
= IXGBE_READ_REG(hw
, IXGBE_TXDCTL(j
));
2150 IXGBE_WRITE_REG(hw
, IXGBE_TXDCTL(j
),
2151 (txdctl
& ~IXGBE_TXDCTL_ENABLE
));
2154 netif_carrier_off(netdev
);
2156 #ifdef CONFIG_IXGBE_DCA
2157 if (adapter
->flags
& IXGBE_FLAG_DCA_ENABLED
) {
2158 adapter
->flags
&= ~IXGBE_FLAG_DCA_ENABLED
;
2159 dca_remove_requester(&adapter
->pdev
->dev
);
2163 if (!pci_channel_offline(adapter
->pdev
))
2164 ixgbe_reset(adapter
);
2165 ixgbe_clean_all_tx_rings(adapter
);
2166 ixgbe_clean_all_rx_rings(adapter
);
2168 #ifdef CONFIG_IXGBE_DCA
2169 /* since we reset the hardware DCA settings were cleared */
2170 if (dca_add_requester(&adapter
->pdev
->dev
) == 0) {
2171 adapter
->flags
|= IXGBE_FLAG_DCA_ENABLED
;
2172 /* always use CB2 mode, difference is masked
2173 * in the CB driver */
2174 IXGBE_WRITE_REG(hw
, IXGBE_DCA_CTRL
, 2);
2175 ixgbe_setup_dca(adapter
);
2181 * ixgbe_poll - NAPI Rx polling callback
2182 * @napi: structure for representing this polling device
2183 * @budget: how many packets driver is allowed to clean
2185 * This function is used for legacy and MSI, NAPI mode
2187 static int ixgbe_poll(struct napi_struct
*napi
, int budget
)
2189 struct ixgbe_q_vector
*q_vector
= container_of(napi
,
2190 struct ixgbe_q_vector
, napi
);
2191 struct ixgbe_adapter
*adapter
= q_vector
->adapter
;
2192 int tx_cleaned
, work_done
= 0;
2194 #ifdef CONFIG_IXGBE_DCA
2195 if (adapter
->flags
& IXGBE_FLAG_DCA_ENABLED
) {
2196 ixgbe_update_tx_dca(adapter
, adapter
->tx_ring
);
2197 ixgbe_update_rx_dca(adapter
, adapter
->rx_ring
);
2201 tx_cleaned
= ixgbe_clean_tx_irq(adapter
, adapter
->tx_ring
);
2202 ixgbe_clean_rx_irq(adapter
, adapter
->rx_ring
, &work_done
, budget
);
2207 /* If budget not fully consumed, exit the polling mode */
2208 if (work_done
< budget
) {
2209 netif_rx_complete(adapter
->netdev
, napi
);
2210 if (adapter
->itr_setting
& 3)
2211 ixgbe_set_itr(adapter
);
2212 if (!test_bit(__IXGBE_DOWN
, &adapter
->state
))
2213 ixgbe_irq_enable(adapter
);
2219 * ixgbe_tx_timeout - Respond to a Tx Hang
2220 * @netdev: network interface device structure
2222 static void ixgbe_tx_timeout(struct net_device
*netdev
)
2224 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
2226 /* Do the reset outside of interrupt context */
2227 schedule_work(&adapter
->reset_task
);
2230 static void ixgbe_reset_task(struct work_struct
*work
)
2232 struct ixgbe_adapter
*adapter
;
2233 adapter
= container_of(work
, struct ixgbe_adapter
, reset_task
);
2235 adapter
->tx_timeout_count
++;
2237 ixgbe_reinit_locked(adapter
);
2240 static void ixgbe_set_num_queues(struct ixgbe_adapter
*adapter
)
2242 int nrq
= 1, ntq
= 1;
2243 int feature_mask
= 0, rss_i
, rss_m
;
2245 /* Number of supported queues */
2246 switch (adapter
->hw
.mac
.type
) {
2247 case ixgbe_mac_82598EB
:
2248 rss_i
= adapter
->ring_feature
[RING_F_RSS
].indices
;
2250 feature_mask
|= IXGBE_FLAG_RSS_ENABLED
;
2252 switch (adapter
->flags
& feature_mask
) {
2253 case (IXGBE_FLAG_RSS_ENABLED
):
2267 adapter
->ring_feature
[RING_F_RSS
].indices
= rss_i
;
2268 adapter
->ring_feature
[RING_F_RSS
].mask
= rss_m
;
2276 adapter
->num_rx_queues
= nrq
;
2277 adapter
->num_tx_queues
= ntq
;
2280 static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter
*adapter
,
2283 int err
, vector_threshold
;
2285 /* We'll want at least 3 (vector_threshold):
2288 * 3) Other (Link Status Change, etc.)
2289 * 4) TCP Timer (optional)
2291 vector_threshold
= MIN_MSIX_COUNT
;
2293 /* The more we get, the more we will assign to Tx/Rx Cleanup
2294 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
2295 * Right now, we simply care about how many we'll get; we'll
2296 * set them up later while requesting irq's.
2298 while (vectors
>= vector_threshold
) {
2299 err
= pci_enable_msix(adapter
->pdev
, adapter
->msix_entries
,
2301 if (!err
) /* Success in acquiring all requested vectors. */
2304 vectors
= 0; /* Nasty failure, quit now */
2305 else /* err == number of vectors we should try again with */
2309 if (vectors
< vector_threshold
) {
2310 /* Can't allocate enough MSI-X interrupts? Oh well.
2311 * This just means we'll go with either a single MSI
2312 * vector or fall back to legacy interrupts.
2314 DPRINTK(HW
, DEBUG
, "Unable to allocate MSI-X interrupts\n");
2315 adapter
->flags
&= ~IXGBE_FLAG_MSIX_ENABLED
;
2316 kfree(adapter
->msix_entries
);
2317 adapter
->msix_entries
= NULL
;
2318 adapter
->flags
&= ~IXGBE_FLAG_RSS_ENABLED
;
2319 ixgbe_set_num_queues(adapter
);
2321 adapter
->flags
|= IXGBE_FLAG_MSIX_ENABLED
; /* Woot! */
2322 adapter
->num_msix_vectors
= vectors
;
2327 * ixgbe_cache_ring_register - Descriptor ring to register mapping
2328 * @adapter: board private structure to initialize
2330 * Once we know the feature-set enabled for the device, we'll cache
2331 * the register offset the descriptor ring is assigned to.
2333 static void ixgbe_cache_ring_register(struct ixgbe_adapter
*adapter
)
2335 int feature_mask
= 0, rss_i
;
2336 int i
, txr_idx
, rxr_idx
;
2338 /* Number of supported queues */
2339 switch (adapter
->hw
.mac
.type
) {
2340 case ixgbe_mac_82598EB
:
2341 rss_i
= adapter
->ring_feature
[RING_F_RSS
].indices
;
2344 feature_mask
|= IXGBE_FLAG_RSS_ENABLED
;
2345 switch (adapter
->flags
& feature_mask
) {
2346 case (IXGBE_FLAG_RSS_ENABLED
):
2347 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
2348 adapter
->rx_ring
[i
].reg_idx
= i
;
2349 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
2350 adapter
->tx_ring
[i
].reg_idx
= i
;
2363 * ixgbe_alloc_queues - Allocate memory for all rings
2364 * @adapter: board private structure to initialize
2366 * We allocate one ring per queue at run-time since we don't know the
2367 * number of queues at compile-time. The polling_netdev array is
2368 * intended for Multiqueue, but should work fine with a single queue.
2370 static int ixgbe_alloc_queues(struct ixgbe_adapter
*adapter
)
2374 adapter
->tx_ring
= kcalloc(adapter
->num_tx_queues
,
2375 sizeof(struct ixgbe_ring
), GFP_KERNEL
);
2376 if (!adapter
->tx_ring
)
2377 goto err_tx_ring_allocation
;
2379 adapter
->rx_ring
= kcalloc(adapter
->num_rx_queues
,
2380 sizeof(struct ixgbe_ring
), GFP_KERNEL
);
2381 if (!adapter
->rx_ring
)
2382 goto err_rx_ring_allocation
;
2384 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
2385 adapter
->tx_ring
[i
].count
= adapter
->tx_ring_count
;
2386 adapter
->tx_ring
[i
].queue_index
= i
;
2389 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
2390 adapter
->rx_ring
[i
].count
= adapter
->rx_ring_count
;
2391 adapter
->rx_ring
[i
].queue_index
= i
;
2394 ixgbe_cache_ring_register(adapter
);
2398 err_rx_ring_allocation
:
2399 kfree(adapter
->tx_ring
);
2400 err_tx_ring_allocation
:
2405 * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported
2406 * @adapter: board private structure to initialize
2408 * Attempt to configure the interrupts using the best available
2409 * capabilities of the hardware and the kernel.
2411 static int ixgbe_set_interrupt_capability(struct ixgbe_adapter
*adapter
)
2414 int vector
, v_budget
;
2417 * It's easy to be greedy for MSI-X vectors, but it really
2418 * doesn't do us much good if we have a lot more vectors
2419 * than CPU's. So let's be conservative and only ask for
2420 * (roughly) twice the number of vectors as there are CPU's.
2422 v_budget
= min(adapter
->num_rx_queues
+ adapter
->num_tx_queues
,
2423 (int)(num_online_cpus() * 2)) + NON_Q_VECTORS
;
2426 * At the same time, hardware can only support a maximum of
2427 * MAX_MSIX_COUNT vectors. With features such as RSS and VMDq,
2428 * we can easily reach upwards of 64 Rx descriptor queues and
2429 * 32 Tx queues. Thus, we cap it off in those rare cases where
2430 * the cpu count also exceeds our vector limit.
2432 v_budget
= min(v_budget
, MAX_MSIX_COUNT
);
2434 /* A failure in MSI-X entry allocation isn't fatal, but it does
2435 * mean we disable MSI-X capabilities of the adapter. */
2436 adapter
->msix_entries
= kcalloc(v_budget
,
2437 sizeof(struct msix_entry
), GFP_KERNEL
);
2438 if (!adapter
->msix_entries
) {
2439 adapter
->flags
&= ~IXGBE_FLAG_RSS_ENABLED
;
2440 ixgbe_set_num_queues(adapter
);
2441 kfree(adapter
->tx_ring
);
2442 kfree(adapter
->rx_ring
);
2443 err
= ixgbe_alloc_queues(adapter
);
2445 DPRINTK(PROBE
, ERR
, "Unable to allocate memory "
2453 for (vector
= 0; vector
< v_budget
; vector
++)
2454 adapter
->msix_entries
[vector
].entry
= vector
;
2456 ixgbe_acquire_msix_vectors(adapter
, v_budget
);
2458 if (adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
)
2462 err
= pci_enable_msi(adapter
->pdev
);
2464 adapter
->flags
|= IXGBE_FLAG_MSI_ENABLED
;
2466 DPRINTK(HW
, DEBUG
, "Unable to allocate MSI interrupt, "
2467 "falling back to legacy. Error: %d\n", err
);
2473 /* Notify the stack of the (possibly) reduced Tx Queue count. */
2474 adapter
->netdev
->real_num_tx_queues
= adapter
->num_tx_queues
;
2479 static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter
*adapter
)
2481 if (adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
) {
2482 adapter
->flags
&= ~IXGBE_FLAG_MSIX_ENABLED
;
2483 pci_disable_msix(adapter
->pdev
);
2484 kfree(adapter
->msix_entries
);
2485 adapter
->msix_entries
= NULL
;
2486 } else if (adapter
->flags
& IXGBE_FLAG_MSI_ENABLED
) {
2487 adapter
->flags
&= ~IXGBE_FLAG_MSI_ENABLED
;
2488 pci_disable_msi(adapter
->pdev
);
2494 * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme
2495 * @adapter: board private structure to initialize
2497 * We determine which interrupt scheme to use based on...
2498 * - Kernel support (MSI, MSI-X)
2499 * - which can be user-defined (via MODULE_PARAM)
2500 * - Hardware queue count (num_*_queues)
2501 * - defined by miscellaneous hardware support/features (RSS, etc.)
2503 static int ixgbe_init_interrupt_scheme(struct ixgbe_adapter
*adapter
)
2507 /* Number of supported queues */
2508 ixgbe_set_num_queues(adapter
);
2510 err
= ixgbe_alloc_queues(adapter
);
2512 DPRINTK(PROBE
, ERR
, "Unable to allocate memory for queues\n");
2513 goto err_alloc_queues
;
2516 err
= ixgbe_set_interrupt_capability(adapter
);
2518 DPRINTK(PROBE
, ERR
, "Unable to setup interrupt capabilities\n");
2519 goto err_set_interrupt
;
2522 DPRINTK(DRV
, INFO
, "Multiqueue %s: Rx Queue count = %u, "
2523 "Tx Queue count = %u\n",
2524 (adapter
->num_rx_queues
> 1) ? "Enabled" :
2525 "Disabled", adapter
->num_rx_queues
, adapter
->num_tx_queues
);
2527 set_bit(__IXGBE_DOWN
, &adapter
->state
);
2532 kfree(adapter
->tx_ring
);
2533 kfree(adapter
->rx_ring
);
2539 * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter)
2540 * @adapter: board private structure to initialize
2542 * ixgbe_sw_init initializes the Adapter private data structure.
2543 * Fields are initialized based on PCI device information and
2544 * OS network device settings (MTU size).
2546 static int __devinit
ixgbe_sw_init(struct ixgbe_adapter
*adapter
)
2548 struct ixgbe_hw
*hw
= &adapter
->hw
;
2549 struct pci_dev
*pdev
= adapter
->pdev
;
2552 /* PCI config space info */
2554 hw
->vendor_id
= pdev
->vendor
;
2555 hw
->device_id
= pdev
->device
;
2556 hw
->revision_id
= pdev
->revision
;
2557 hw
->subsystem_vendor_id
= pdev
->subsystem_vendor
;
2558 hw
->subsystem_device_id
= pdev
->subsystem_device
;
2560 /* Set capability flags */
2561 rss
= min(IXGBE_MAX_RSS_INDICES
, (int)num_online_cpus());
2562 adapter
->ring_feature
[RING_F_RSS
].indices
= rss
;
2563 adapter
->flags
|= IXGBE_FLAG_RSS_ENABLED
;
2565 /* default flow control settings */
2566 hw
->fc
.original_type
= ixgbe_fc_none
;
2567 hw
->fc
.type
= ixgbe_fc_none
;
2568 hw
->fc
.high_water
= IXGBE_DEFAULT_FCRTH
;
2569 hw
->fc
.low_water
= IXGBE_DEFAULT_FCRTL
;
2570 hw
->fc
.pause_time
= IXGBE_DEFAULT_FCPAUSE
;
2571 hw
->fc
.send_xon
= true;
2573 /* select 10G link by default */
2574 hw
->mac
.link_mode_select
= IXGBE_AUTOC_LMS_10G_LINK_NO_AN
;
2576 /* enable itr by default in dynamic mode */
2577 adapter
->itr_setting
= 1;
2578 adapter
->eitr_param
= 20000;
2580 /* set defaults for eitr in MegaBytes */
2581 adapter
->eitr_low
= 10;
2582 adapter
->eitr_high
= 20;
2584 /* set default ring sizes */
2585 adapter
->tx_ring_count
= IXGBE_DEFAULT_TXD
;
2586 adapter
->rx_ring_count
= IXGBE_DEFAULT_RXD
;
2588 /* initialize eeprom parameters */
2589 if (ixgbe_init_eeprom_params_generic(hw
)) {
2590 dev_err(&pdev
->dev
, "EEPROM initialization failed\n");
2594 /* enable rx csum by default */
2595 adapter
->flags
|= IXGBE_FLAG_RX_CSUM_ENABLED
;
2597 set_bit(__IXGBE_DOWN
, &adapter
->state
);
2603 * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors)
2604 * @adapter: board private structure
2605 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2607 * Return 0 on success, negative on failure
2609 int ixgbe_setup_tx_resources(struct ixgbe_adapter
*adapter
,
2610 struct ixgbe_ring
*tx_ring
)
2612 struct pci_dev
*pdev
= adapter
->pdev
;
2615 size
= sizeof(struct ixgbe_tx_buffer
) * tx_ring
->count
;
2616 tx_ring
->tx_buffer_info
= vmalloc(size
);
2617 if (!tx_ring
->tx_buffer_info
)
2619 memset(tx_ring
->tx_buffer_info
, 0, size
);
2621 /* round up to nearest 4K */
2622 tx_ring
->size
= tx_ring
->count
* sizeof(union ixgbe_adv_tx_desc
) +
2624 tx_ring
->size
= ALIGN(tx_ring
->size
, 4096);
2626 tx_ring
->desc
= pci_alloc_consistent(pdev
, tx_ring
->size
,
2631 tx_ring
->next_to_use
= 0;
2632 tx_ring
->next_to_clean
= 0;
2633 tx_ring
->work_limit
= tx_ring
->count
;
2637 vfree(tx_ring
->tx_buffer_info
);
2638 tx_ring
->tx_buffer_info
= NULL
;
2639 DPRINTK(PROBE
, ERR
, "Unable to allocate memory for the transmit "
2640 "descriptor ring\n");
2645 * ixgbe_setup_all_tx_resources - allocate all queues Tx resources
2646 * @adapter: board private structure
2648 * If this function returns with an error, then it's possible one or
2649 * more of the rings is populated (while the rest are not). It is the
2650 * callers duty to clean those orphaned rings.
2652 * Return 0 on success, negative on failure
2654 static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter
*adapter
)
2658 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
2659 err
= ixgbe_setup_tx_resources(adapter
, &adapter
->tx_ring
[i
]);
2662 DPRINTK(PROBE
, ERR
, "Allocation for Tx Queue %u failed\n", i
);
2670 * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
2671 * @adapter: board private structure
2672 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2674 * Returns 0 on success, negative on failure
2676 int ixgbe_setup_rx_resources(struct ixgbe_adapter
*adapter
,
2677 struct ixgbe_ring
*rx_ring
)
2679 struct pci_dev
*pdev
= adapter
->pdev
;
2682 size
= sizeof(struct net_lro_desc
) * IXGBE_MAX_LRO_DESCRIPTORS
;
2683 rx_ring
->lro_mgr
.lro_arr
= vmalloc(size
);
2684 if (!rx_ring
->lro_mgr
.lro_arr
)
2686 memset(rx_ring
->lro_mgr
.lro_arr
, 0, size
);
2688 size
= sizeof(struct ixgbe_rx_buffer
) * rx_ring
->count
;
2689 rx_ring
->rx_buffer_info
= vmalloc(size
);
2690 if (!rx_ring
->rx_buffer_info
) {
2692 "vmalloc allocation failed for the rx desc ring\n");
2695 memset(rx_ring
->rx_buffer_info
, 0, size
);
2697 /* Round up to nearest 4K */
2698 rx_ring
->size
= rx_ring
->count
* sizeof(union ixgbe_adv_rx_desc
);
2699 rx_ring
->size
= ALIGN(rx_ring
->size
, 4096);
2701 rx_ring
->desc
= pci_alloc_consistent(pdev
, rx_ring
->size
, &rx_ring
->dma
);
2703 if (!rx_ring
->desc
) {
2705 "Memory allocation failed for the rx desc ring\n");
2706 vfree(rx_ring
->rx_buffer_info
);
2710 rx_ring
->next_to_clean
= 0;
2711 rx_ring
->next_to_use
= 0;
2716 vfree(rx_ring
->lro_mgr
.lro_arr
);
2717 rx_ring
->lro_mgr
.lro_arr
= NULL
;
2722 * ixgbe_setup_all_rx_resources - allocate all queues Rx resources
2723 * @adapter: board private structure
2725 * If this function returns with an error, then it's possible one or
2726 * more of the rings is populated (while the rest are not). It is the
2727 * callers duty to clean those orphaned rings.
2729 * Return 0 on success, negative on failure
2732 static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter
*adapter
)
2736 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
2737 err
= ixgbe_setup_rx_resources(adapter
, &adapter
->rx_ring
[i
]);
2740 DPRINTK(PROBE
, ERR
, "Allocation for Rx Queue %u failed\n", i
);
2748 * ixgbe_free_tx_resources - Free Tx Resources per Queue
2749 * @adapter: board private structure
2750 * @tx_ring: Tx descriptor ring for a specific queue
2752 * Free all transmit software resources
2754 void ixgbe_free_tx_resources(struct ixgbe_adapter
*adapter
,
2755 struct ixgbe_ring
*tx_ring
)
2757 struct pci_dev
*pdev
= adapter
->pdev
;
2759 ixgbe_clean_tx_ring(adapter
, tx_ring
);
2761 vfree(tx_ring
->tx_buffer_info
);
2762 tx_ring
->tx_buffer_info
= NULL
;
2764 pci_free_consistent(pdev
, tx_ring
->size
, tx_ring
->desc
, tx_ring
->dma
);
2766 tx_ring
->desc
= NULL
;
2770 * ixgbe_free_all_tx_resources - Free Tx Resources for All Queues
2771 * @adapter: board private structure
2773 * Free all transmit software resources
2775 static void ixgbe_free_all_tx_resources(struct ixgbe_adapter
*adapter
)
2779 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
2780 ixgbe_free_tx_resources(adapter
, &adapter
->tx_ring
[i
]);
2784 * ixgbe_free_rx_resources - Free Rx Resources
2785 * @adapter: board private structure
2786 * @rx_ring: ring to clean the resources from
2788 * Free all receive software resources
2790 void ixgbe_free_rx_resources(struct ixgbe_adapter
*adapter
,
2791 struct ixgbe_ring
*rx_ring
)
2793 struct pci_dev
*pdev
= adapter
->pdev
;
2795 vfree(rx_ring
->lro_mgr
.lro_arr
);
2796 rx_ring
->lro_mgr
.lro_arr
= NULL
;
2798 ixgbe_clean_rx_ring(adapter
, rx_ring
);
2800 vfree(rx_ring
->rx_buffer_info
);
2801 rx_ring
->rx_buffer_info
= NULL
;
2803 pci_free_consistent(pdev
, rx_ring
->size
, rx_ring
->desc
, rx_ring
->dma
);
2805 rx_ring
->desc
= NULL
;
2809 * ixgbe_free_all_rx_resources - Free Rx Resources for All Queues
2810 * @adapter: board private structure
2812 * Free all receive software resources
2814 static void ixgbe_free_all_rx_resources(struct ixgbe_adapter
*adapter
)
2818 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
2819 ixgbe_free_rx_resources(adapter
, &adapter
->rx_ring
[i
]);
2823 * ixgbe_change_mtu - Change the Maximum Transfer Unit
2824 * @netdev: network interface device structure
2825 * @new_mtu: new value for maximum frame size
2827 * Returns 0 on success, negative on failure
2829 static int ixgbe_change_mtu(struct net_device
*netdev
, int new_mtu
)
2831 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
2832 int max_frame
= new_mtu
+ ETH_HLEN
+ ETH_FCS_LEN
;
2834 /* MTU < 68 is an error and causes problems on some kernels */
2835 if ((new_mtu
< 68) || (max_frame
> IXGBE_MAX_JUMBO_FRAME_SIZE
))
2838 DPRINTK(PROBE
, INFO
, "changing MTU from %d to %d\n",
2839 netdev
->mtu
, new_mtu
);
2840 /* must set new MTU before calling down or up */
2841 netdev
->mtu
= new_mtu
;
2843 if (netif_running(netdev
))
2844 ixgbe_reinit_locked(adapter
);
2850 * ixgbe_open - Called when a network interface is made active
2851 * @netdev: network interface device structure
2853 * Returns 0 on success, negative value on failure
2855 * The open entry point is called when a network interface is made
2856 * active by the system (IFF_UP). At this point all resources needed
2857 * for transmit and receive operations are allocated, the interrupt
2858 * handler is registered with the OS, the watchdog timer is started,
2859 * and the stack is notified that the interface is ready.
2861 static int ixgbe_open(struct net_device
*netdev
)
2863 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
2866 /* disallow open during test */
2867 if (test_bit(__IXGBE_TESTING
, &adapter
->state
))
2870 /* allocate transmit descriptors */
2871 err
= ixgbe_setup_all_tx_resources(adapter
);
2875 /* allocate receive descriptors */
2876 err
= ixgbe_setup_all_rx_resources(adapter
);
2880 ixgbe_configure(adapter
);
2882 err
= ixgbe_request_irq(adapter
);
2886 err
= ixgbe_up_complete(adapter
);
2890 netif_tx_start_all_queues(netdev
);
2895 ixgbe_release_hw_control(adapter
);
2896 ixgbe_free_irq(adapter
);
2898 ixgbe_free_all_rx_resources(adapter
);
2900 ixgbe_free_all_tx_resources(adapter
);
2902 ixgbe_reset(adapter
);
2908 * ixgbe_close - Disables a network interface
2909 * @netdev: network interface device structure
2911 * Returns 0, this is not allowed to fail
2913 * The close entry point is called when an interface is de-activated
2914 * by the OS. The hardware is still under the drivers control, but
2915 * needs to be disabled. A global MAC reset is issued to stop the
2916 * hardware, and all transmit and receive resources are freed.
2918 static int ixgbe_close(struct net_device
*netdev
)
2920 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
2922 ixgbe_down(adapter
);
2923 ixgbe_free_irq(adapter
);
2925 ixgbe_free_all_tx_resources(adapter
);
2926 ixgbe_free_all_rx_resources(adapter
);
2928 ixgbe_release_hw_control(adapter
);
2934 * ixgbe_napi_add_all - prep napi structs for use
2935 * @adapter: private struct
2936 * helper function to napi_add each possible q_vector->napi
2938 static void ixgbe_napi_add_all(struct ixgbe_adapter
*adapter
)
2940 int q_idx
, q_vectors
;
2941 int (*poll
)(struct napi_struct
*, int);
2943 if (adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
) {
2944 poll
= &ixgbe_clean_rxonly
;
2945 /* Only enable as many vectors as we have rx queues. */
2946 q_vectors
= adapter
->num_rx_queues
;
2949 /* only one q_vector for legacy modes */
2953 for (q_idx
= 0; q_idx
< q_vectors
; q_idx
++) {
2954 struct ixgbe_q_vector
*q_vector
= &adapter
->q_vector
[q_idx
];
2955 netif_napi_add(adapter
->netdev
, &q_vector
->napi
, (*poll
), 64);
2959 static void ixgbe_napi_del_all(struct ixgbe_adapter
*adapter
)
2962 int q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
2964 /* legacy and MSI only use one vector */
2965 if (!(adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
))
2968 for (q_idx
= 0; q_idx
< q_vectors
; q_idx
++) {
2969 struct ixgbe_q_vector
*q_vector
= &adapter
->q_vector
[q_idx
];
2970 if (!q_vector
->rxr_count
)
2972 netif_napi_del(&q_vector
->napi
);
2977 static int ixgbe_resume(struct pci_dev
*pdev
)
2979 struct net_device
*netdev
= pci_get_drvdata(pdev
);
2980 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
2983 pci_set_power_state(pdev
, PCI_D0
);
2984 pci_restore_state(pdev
);
2985 err
= pci_enable_device(pdev
);
2987 printk(KERN_ERR
"ixgbe: Cannot enable PCI device from "
2991 pci_set_master(pdev
);
2993 pci_enable_wake(pdev
, PCI_D3hot
, 0);
2994 pci_enable_wake(pdev
, PCI_D3cold
, 0);
2996 err
= ixgbe_init_interrupt_scheme(adapter
);
2998 printk(KERN_ERR
"ixgbe: Cannot initialize interrupts for "
3003 ixgbe_napi_add_all(adapter
);
3004 ixgbe_reset(adapter
);
3006 if (netif_running(netdev
)) {
3007 err
= ixgbe_open(adapter
->netdev
);
3012 netif_device_attach(netdev
);
3017 #endif /* CONFIG_PM */
3018 static int ixgbe_suspend(struct pci_dev
*pdev
, pm_message_t state
)
3020 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3021 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
3026 netif_device_detach(netdev
);
3028 if (netif_running(netdev
)) {
3029 ixgbe_down(adapter
);
3030 ixgbe_free_irq(adapter
);
3031 ixgbe_free_all_tx_resources(adapter
);
3032 ixgbe_free_all_rx_resources(adapter
);
3034 ixgbe_reset_interrupt_capability(adapter
);
3035 ixgbe_napi_del_all(adapter
);
3036 kfree(adapter
->tx_ring
);
3037 kfree(adapter
->rx_ring
);
3040 retval
= pci_save_state(pdev
);
3045 pci_enable_wake(pdev
, PCI_D3hot
, 0);
3046 pci_enable_wake(pdev
, PCI_D3cold
, 0);
3048 ixgbe_release_hw_control(adapter
);
3050 pci_disable_device(pdev
);
3052 pci_set_power_state(pdev
, pci_choose_state(pdev
, state
));
3057 static void ixgbe_shutdown(struct pci_dev
*pdev
)
3059 ixgbe_suspend(pdev
, PMSG_SUSPEND
);
3063 * ixgbe_update_stats - Update the board statistics counters.
3064 * @adapter: board private structure
3066 void ixgbe_update_stats(struct ixgbe_adapter
*adapter
)
3068 struct ixgbe_hw
*hw
= &adapter
->hw
;
3070 u32 i
, missed_rx
= 0, mpc
, bprc
, lxon
, lxoff
, xon_off_tot
;
3072 adapter
->stats
.crcerrs
+= IXGBE_READ_REG(hw
, IXGBE_CRCERRS
);
3073 for (i
= 0; i
< 8; i
++) {
3074 /* for packet buffers not used, the register should read 0 */
3075 mpc
= IXGBE_READ_REG(hw
, IXGBE_MPC(i
));
3077 adapter
->stats
.mpc
[i
] += mpc
;
3078 total_mpc
+= adapter
->stats
.mpc
[i
];
3079 adapter
->stats
.rnbc
[i
] += IXGBE_READ_REG(hw
, IXGBE_RNBC(i
));
3081 adapter
->stats
.gprc
+= IXGBE_READ_REG(hw
, IXGBE_GPRC
);
3082 /* work around hardware counting issue */
3083 adapter
->stats
.gprc
-= missed_rx
;
3085 /* 82598 hardware only has a 32 bit counter in the high register */
3086 adapter
->stats
.gorc
+= IXGBE_READ_REG(hw
, IXGBE_GORCH
);
3087 adapter
->stats
.gotc
+= IXGBE_READ_REG(hw
, IXGBE_GOTCH
);
3088 adapter
->stats
.tor
+= IXGBE_READ_REG(hw
, IXGBE_TORH
);
3089 bprc
= IXGBE_READ_REG(hw
, IXGBE_BPRC
);
3090 adapter
->stats
.bprc
+= bprc
;
3091 adapter
->stats
.mprc
+= IXGBE_READ_REG(hw
, IXGBE_MPRC
);
3092 adapter
->stats
.mprc
-= bprc
;
3093 adapter
->stats
.roc
+= IXGBE_READ_REG(hw
, IXGBE_ROC
);
3094 adapter
->stats
.prc64
+= IXGBE_READ_REG(hw
, IXGBE_PRC64
);
3095 adapter
->stats
.prc127
+= IXGBE_READ_REG(hw
, IXGBE_PRC127
);
3096 adapter
->stats
.prc255
+= IXGBE_READ_REG(hw
, IXGBE_PRC255
);
3097 adapter
->stats
.prc511
+= IXGBE_READ_REG(hw
, IXGBE_PRC511
);
3098 adapter
->stats
.prc1023
+= IXGBE_READ_REG(hw
, IXGBE_PRC1023
);
3099 adapter
->stats
.prc1522
+= IXGBE_READ_REG(hw
, IXGBE_PRC1522
);
3100 adapter
->stats
.rlec
+= IXGBE_READ_REG(hw
, IXGBE_RLEC
);
3101 adapter
->stats
.lxonrxc
+= IXGBE_READ_REG(hw
, IXGBE_LXONRXC
);
3102 adapter
->stats
.lxoffrxc
+= IXGBE_READ_REG(hw
, IXGBE_LXOFFRXC
);
3103 lxon
= IXGBE_READ_REG(hw
, IXGBE_LXONTXC
);
3104 adapter
->stats
.lxontxc
+= lxon
;
3105 lxoff
= IXGBE_READ_REG(hw
, IXGBE_LXOFFTXC
);
3106 adapter
->stats
.lxofftxc
+= lxoff
;
3107 adapter
->stats
.ruc
+= IXGBE_READ_REG(hw
, IXGBE_RUC
);
3108 adapter
->stats
.gptc
+= IXGBE_READ_REG(hw
, IXGBE_GPTC
);
3109 adapter
->stats
.mptc
+= IXGBE_READ_REG(hw
, IXGBE_MPTC
);
3111 * 82598 errata - tx of flow control packets is included in tx counters
3113 xon_off_tot
= lxon
+ lxoff
;
3114 adapter
->stats
.gptc
-= xon_off_tot
;
3115 adapter
->stats
.mptc
-= xon_off_tot
;
3116 adapter
->stats
.gotc
-= (xon_off_tot
* (ETH_ZLEN
+ ETH_FCS_LEN
));
3117 adapter
->stats
.ruc
+= IXGBE_READ_REG(hw
, IXGBE_RUC
);
3118 adapter
->stats
.rfc
+= IXGBE_READ_REG(hw
, IXGBE_RFC
);
3119 adapter
->stats
.rjc
+= IXGBE_READ_REG(hw
, IXGBE_RJC
);
3120 adapter
->stats
.tpr
+= IXGBE_READ_REG(hw
, IXGBE_TPR
);
3121 adapter
->stats
.ptc64
+= IXGBE_READ_REG(hw
, IXGBE_PTC64
);
3122 adapter
->stats
.ptc64
-= xon_off_tot
;
3123 adapter
->stats
.ptc127
+= IXGBE_READ_REG(hw
, IXGBE_PTC127
);
3124 adapter
->stats
.ptc255
+= IXGBE_READ_REG(hw
, IXGBE_PTC255
);
3125 adapter
->stats
.ptc511
+= IXGBE_READ_REG(hw
, IXGBE_PTC511
);
3126 adapter
->stats
.ptc1023
+= IXGBE_READ_REG(hw
, IXGBE_PTC1023
);
3127 adapter
->stats
.ptc1522
+= IXGBE_READ_REG(hw
, IXGBE_PTC1522
);
3128 adapter
->stats
.bptc
+= IXGBE_READ_REG(hw
, IXGBE_BPTC
);
3130 /* Fill out the OS statistics structure */
3131 adapter
->net_stats
.multicast
= adapter
->stats
.mprc
;
3134 adapter
->net_stats
.rx_errors
= adapter
->stats
.crcerrs
+
3135 adapter
->stats
.rlec
;
3136 adapter
->net_stats
.rx_dropped
= 0;
3137 adapter
->net_stats
.rx_length_errors
= adapter
->stats
.rlec
;
3138 adapter
->net_stats
.rx_crc_errors
= adapter
->stats
.crcerrs
;
3139 adapter
->net_stats
.rx_missed_errors
= total_mpc
;
3143 * ixgbe_watchdog - Timer Call-back
3144 * @data: pointer to adapter cast into an unsigned long
3146 static void ixgbe_watchdog(unsigned long data
)
3148 struct ixgbe_adapter
*adapter
= (struct ixgbe_adapter
*)data
;
3149 struct ixgbe_hw
*hw
= &adapter
->hw
;
3151 /* Do the watchdog outside of interrupt context due to the lovely
3152 * delays that some of the newer hardware requires */
3153 if (!test_bit(__IXGBE_DOWN
, &adapter
->state
)) {
3154 /* Cause software interrupt to ensure rx rings are cleaned */
3155 if (adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
) {
3157 (1 << (adapter
->num_msix_vectors
- NON_Q_VECTORS
)) - 1;
3158 IXGBE_WRITE_REG(hw
, IXGBE_EICS
, eics
);
3160 /* For legacy and MSI interrupts don't set any bits that
3161 * are enabled for EIAM, because this operation would
3162 * set *both* EIMS and EICS for any bit in EIAM */
3163 IXGBE_WRITE_REG(hw
, IXGBE_EICS
,
3164 (IXGBE_EICS_TCP_TIMER
| IXGBE_EICS_OTHER
));
3166 /* Reset the timer */
3167 mod_timer(&adapter
->watchdog_timer
,
3168 round_jiffies(jiffies
+ 2 * HZ
));
3171 schedule_work(&adapter
->watchdog_task
);
3175 * ixgbe_watchdog_task - worker thread to bring link up
3176 * @work: pointer to work_struct containing our data
3178 static void ixgbe_watchdog_task(struct work_struct
*work
)
3180 struct ixgbe_adapter
*adapter
= container_of(work
,
3181 struct ixgbe_adapter
,
3183 struct net_device
*netdev
= adapter
->netdev
;
3184 struct ixgbe_hw
*hw
= &adapter
->hw
;
3185 u32 link_speed
= adapter
->link_speed
;
3186 bool link_up
= adapter
->link_up
;
3188 adapter
->flags
|= IXGBE_FLAG_IN_WATCHDOG_TASK
;
3190 if (adapter
->flags
& IXGBE_FLAG_NEED_LINK_UPDATE
) {
3191 hw
->mac
.ops
.check_link(hw
, &link_speed
, &link_up
, false);
3193 time_after(jiffies
, (adapter
->link_check_timeout
+
3194 IXGBE_TRY_LINK_TIMEOUT
))) {
3195 IXGBE_WRITE_REG(hw
, IXGBE_EIMS
, IXGBE_EIMC_LSC
);
3196 adapter
->flags
&= ~IXGBE_FLAG_NEED_LINK_UPDATE
;
3198 adapter
->link_up
= link_up
;
3199 adapter
->link_speed
= link_speed
;
3203 if (!netif_carrier_ok(netdev
)) {
3204 u32 frctl
= IXGBE_READ_REG(hw
, IXGBE_FCTRL
);
3205 u32 rmcs
= IXGBE_READ_REG(hw
, IXGBE_RMCS
);
3206 #define FLOW_RX (frctl & IXGBE_FCTRL_RFCE)
3207 #define FLOW_TX (rmcs & IXGBE_RMCS_TFCE_802_3X)
3208 DPRINTK(LINK
, INFO
, "NIC Link is Up %s, "
3209 "Flow Control: %s\n",
3210 (link_speed
== IXGBE_LINK_SPEED_10GB_FULL
?
3212 (link_speed
== IXGBE_LINK_SPEED_1GB_FULL
?
3213 "1 Gbps" : "unknown speed")),
3214 ((FLOW_RX
&& FLOW_TX
) ? "RX/TX" :
3216 (FLOW_TX
? "TX" : "None"))));
3218 netif_carrier_on(netdev
);
3219 netif_tx_wake_all_queues(netdev
);
3221 /* Force detection of hung controller */
3222 adapter
->detect_tx_hung
= true;
3225 adapter
->link_up
= false;
3226 adapter
->link_speed
= 0;
3227 if (netif_carrier_ok(netdev
)) {
3228 DPRINTK(LINK
, INFO
, "NIC Link is Down\n");
3229 netif_carrier_off(netdev
);
3230 netif_tx_stop_all_queues(netdev
);
3234 ixgbe_update_stats(adapter
);
3235 adapter
->flags
&= ~IXGBE_FLAG_IN_WATCHDOG_TASK
;
3238 static int ixgbe_tso(struct ixgbe_adapter
*adapter
,
3239 struct ixgbe_ring
*tx_ring
, struct sk_buff
*skb
,
3240 u32 tx_flags
, u8
*hdr_len
)
3242 struct ixgbe_adv_tx_context_desc
*context_desc
;
3245 struct ixgbe_tx_buffer
*tx_buffer_info
;
3246 u32 vlan_macip_lens
= 0, type_tucmd_mlhl
;
3247 u32 mss_l4len_idx
, l4len
;
3249 if (skb_is_gso(skb
)) {
3250 if (skb_header_cloned(skb
)) {
3251 err
= pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
);
3255 l4len
= tcp_hdrlen(skb
);
3258 if (skb
->protocol
== htons(ETH_P_IP
)) {
3259 struct iphdr
*iph
= ip_hdr(skb
);
3262 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(iph
->saddr
,
3266 adapter
->hw_tso_ctxt
++;
3267 } else if (skb_shinfo(skb
)->gso_type
== SKB_GSO_TCPV6
) {
3268 ipv6_hdr(skb
)->payload_len
= 0;
3269 tcp_hdr(skb
)->check
=
3270 ~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
3271 &ipv6_hdr(skb
)->daddr
,
3273 adapter
->hw_tso6_ctxt
++;
3276 i
= tx_ring
->next_to_use
;
3278 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
3279 context_desc
= IXGBE_TX_CTXTDESC_ADV(*tx_ring
, i
);
3281 /* VLAN MACLEN IPLEN */
3282 if (tx_flags
& IXGBE_TX_FLAGS_VLAN
)
3284 (tx_flags
& IXGBE_TX_FLAGS_VLAN_MASK
);
3285 vlan_macip_lens
|= ((skb_network_offset(skb
)) <<
3286 IXGBE_ADVTXD_MACLEN_SHIFT
);
3287 *hdr_len
+= skb_network_offset(skb
);
3289 (skb_transport_header(skb
) - skb_network_header(skb
));
3291 (skb_transport_header(skb
) - skb_network_header(skb
));
3292 context_desc
->vlan_macip_lens
= cpu_to_le32(vlan_macip_lens
);
3293 context_desc
->seqnum_seed
= 0;
3295 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
3296 type_tucmd_mlhl
= (IXGBE_TXD_CMD_DEXT
|
3297 IXGBE_ADVTXD_DTYP_CTXT
);
3299 if (skb
->protocol
== htons(ETH_P_IP
))
3300 type_tucmd_mlhl
|= IXGBE_ADVTXD_TUCMD_IPV4
;
3301 type_tucmd_mlhl
|= IXGBE_ADVTXD_TUCMD_L4T_TCP
;
3302 context_desc
->type_tucmd_mlhl
= cpu_to_le32(type_tucmd_mlhl
);
3306 (skb_shinfo(skb
)->gso_size
<< IXGBE_ADVTXD_MSS_SHIFT
);
3307 mss_l4len_idx
|= (l4len
<< IXGBE_ADVTXD_L4LEN_SHIFT
);
3308 /* use index 1 for TSO */
3309 mss_l4len_idx
|= (1 << IXGBE_ADVTXD_IDX_SHIFT
);
3310 context_desc
->mss_l4len_idx
= cpu_to_le32(mss_l4len_idx
);
3312 tx_buffer_info
->time_stamp
= jiffies
;
3313 tx_buffer_info
->next_to_watch
= i
;
3316 if (i
== tx_ring
->count
)
3318 tx_ring
->next_to_use
= i
;
3325 static bool ixgbe_tx_csum(struct ixgbe_adapter
*adapter
,
3326 struct ixgbe_ring
*tx_ring
,
3327 struct sk_buff
*skb
, u32 tx_flags
)
3329 struct ixgbe_adv_tx_context_desc
*context_desc
;
3331 struct ixgbe_tx_buffer
*tx_buffer_info
;
3332 u32 vlan_macip_lens
= 0, type_tucmd_mlhl
= 0;
3334 if (skb
->ip_summed
== CHECKSUM_PARTIAL
||
3335 (tx_flags
& IXGBE_TX_FLAGS_VLAN
)) {
3336 i
= tx_ring
->next_to_use
;
3337 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
3338 context_desc
= IXGBE_TX_CTXTDESC_ADV(*tx_ring
, i
);
3340 if (tx_flags
& IXGBE_TX_FLAGS_VLAN
)
3342 (tx_flags
& IXGBE_TX_FLAGS_VLAN_MASK
);
3343 vlan_macip_lens
|= (skb_network_offset(skb
) <<
3344 IXGBE_ADVTXD_MACLEN_SHIFT
);
3345 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
3346 vlan_macip_lens
|= (skb_transport_header(skb
) -
3347 skb_network_header(skb
));
3349 context_desc
->vlan_macip_lens
= cpu_to_le32(vlan_macip_lens
);
3350 context_desc
->seqnum_seed
= 0;
3352 type_tucmd_mlhl
|= (IXGBE_TXD_CMD_DEXT
|
3353 IXGBE_ADVTXD_DTYP_CTXT
);
3355 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
3356 switch (skb
->protocol
) {
3357 case __constant_htons(ETH_P_IP
):
3358 type_tucmd_mlhl
|= IXGBE_ADVTXD_TUCMD_IPV4
;
3359 if (ip_hdr(skb
)->protocol
== IPPROTO_TCP
)
3361 IXGBE_ADVTXD_TUCMD_L4T_TCP
;
3363 case __constant_htons(ETH_P_IPV6
):
3364 /* XXX what about other V6 headers?? */
3365 if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_TCP
)
3367 IXGBE_ADVTXD_TUCMD_L4T_TCP
;
3370 if (unlikely(net_ratelimit())) {
3371 DPRINTK(PROBE
, WARNING
,
3372 "partial checksum but proto=%x!\n",
3379 context_desc
->type_tucmd_mlhl
= cpu_to_le32(type_tucmd_mlhl
);
3380 /* use index zero for tx checksum offload */
3381 context_desc
->mss_l4len_idx
= 0;
3383 tx_buffer_info
->time_stamp
= jiffies
;
3384 tx_buffer_info
->next_to_watch
= i
;
3386 adapter
->hw_csum_tx_good
++;
3388 if (i
== tx_ring
->count
)
3390 tx_ring
->next_to_use
= i
;
3398 static int ixgbe_tx_map(struct ixgbe_adapter
*adapter
,
3399 struct ixgbe_ring
*tx_ring
,
3400 struct sk_buff
*skb
, unsigned int first
)
3402 struct ixgbe_tx_buffer
*tx_buffer_info
;
3403 unsigned int len
= skb
->len
;
3404 unsigned int offset
= 0, size
, count
= 0, i
;
3405 unsigned int nr_frags
= skb_shinfo(skb
)->nr_frags
;
3408 len
-= skb
->data_len
;
3410 i
= tx_ring
->next_to_use
;
3413 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
3414 size
= min(len
, (uint
)IXGBE_MAX_DATA_PER_TXD
);
3416 tx_buffer_info
->length
= size
;
3417 tx_buffer_info
->dma
= pci_map_single(adapter
->pdev
,
3419 size
, PCI_DMA_TODEVICE
);
3420 tx_buffer_info
->time_stamp
= jiffies
;
3421 tx_buffer_info
->next_to_watch
= i
;
3427 if (i
== tx_ring
->count
)
3431 for (f
= 0; f
< nr_frags
; f
++) {
3432 struct skb_frag_struct
*frag
;
3434 frag
= &skb_shinfo(skb
)->frags
[f
];
3436 offset
= frag
->page_offset
;
3439 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
3440 size
= min(len
, (uint
)IXGBE_MAX_DATA_PER_TXD
);
3442 tx_buffer_info
->length
= size
;
3443 tx_buffer_info
->dma
= pci_map_page(adapter
->pdev
,
3448 tx_buffer_info
->time_stamp
= jiffies
;
3449 tx_buffer_info
->next_to_watch
= i
;
3455 if (i
== tx_ring
->count
)
3460 i
= tx_ring
->count
- 1;
3463 tx_ring
->tx_buffer_info
[i
].skb
= skb
;
3464 tx_ring
->tx_buffer_info
[first
].next_to_watch
= i
;
3469 static void ixgbe_tx_queue(struct ixgbe_adapter
*adapter
,
3470 struct ixgbe_ring
*tx_ring
,
3471 int tx_flags
, int count
, u32 paylen
, u8 hdr_len
)
3473 union ixgbe_adv_tx_desc
*tx_desc
= NULL
;
3474 struct ixgbe_tx_buffer
*tx_buffer_info
;
3475 u32 olinfo_status
= 0, cmd_type_len
= 0;
3477 u32 txd_cmd
= IXGBE_TXD_CMD_EOP
| IXGBE_TXD_CMD_RS
| IXGBE_TXD_CMD_IFCS
;
3479 cmd_type_len
|= IXGBE_ADVTXD_DTYP_DATA
;
3481 cmd_type_len
|= IXGBE_ADVTXD_DCMD_IFCS
| IXGBE_ADVTXD_DCMD_DEXT
;
3483 if (tx_flags
& IXGBE_TX_FLAGS_VLAN
)
3484 cmd_type_len
|= IXGBE_ADVTXD_DCMD_VLE
;
3486 if (tx_flags
& IXGBE_TX_FLAGS_TSO
) {
3487 cmd_type_len
|= IXGBE_ADVTXD_DCMD_TSE
;
3489 olinfo_status
|= IXGBE_TXD_POPTS_TXSM
<<
3490 IXGBE_ADVTXD_POPTS_SHIFT
;
3492 /* use index 1 context for tso */
3493 olinfo_status
|= (1 << IXGBE_ADVTXD_IDX_SHIFT
);
3494 if (tx_flags
& IXGBE_TX_FLAGS_IPV4
)
3495 olinfo_status
|= IXGBE_TXD_POPTS_IXSM
<<
3496 IXGBE_ADVTXD_POPTS_SHIFT
;
3498 } else if (tx_flags
& IXGBE_TX_FLAGS_CSUM
)
3499 olinfo_status
|= IXGBE_TXD_POPTS_TXSM
<<
3500 IXGBE_ADVTXD_POPTS_SHIFT
;
3502 olinfo_status
|= ((paylen
- hdr_len
) << IXGBE_ADVTXD_PAYLEN_SHIFT
);
3504 i
= tx_ring
->next_to_use
;
3506 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
3507 tx_desc
= IXGBE_TX_DESC_ADV(*tx_ring
, i
);
3508 tx_desc
->read
.buffer_addr
= cpu_to_le64(tx_buffer_info
->dma
);
3509 tx_desc
->read
.cmd_type_len
=
3510 cpu_to_le32(cmd_type_len
| tx_buffer_info
->length
);
3511 tx_desc
->read
.olinfo_status
= cpu_to_le32(olinfo_status
);
3513 if (i
== tx_ring
->count
)
3517 tx_desc
->read
.cmd_type_len
|= cpu_to_le32(txd_cmd
);
3520 * Force memory writes to complete before letting h/w
3521 * know there are new descriptors to fetch. (Only
3522 * applicable for weak-ordered memory model archs,
3527 tx_ring
->next_to_use
= i
;
3528 writel(i
, adapter
->hw
.hw_addr
+ tx_ring
->tail
);
3531 static int __ixgbe_maybe_stop_tx(struct net_device
*netdev
,
3532 struct ixgbe_ring
*tx_ring
, int size
)
3534 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
3536 netif_stop_subqueue(netdev
, tx_ring
->queue_index
);
3537 /* Herbert's original patch had:
3538 * smp_mb__after_netif_stop_queue();
3539 * but since that doesn't exist yet, just open code it. */
3542 /* We need to check again in a case another CPU has just
3543 * made room available. */
3544 if (likely(IXGBE_DESC_UNUSED(tx_ring
) < size
))
3547 /* A reprieve! - use start_queue because it doesn't call schedule */
3548 netif_start_subqueue(netdev
, tx_ring
->queue_index
);
3549 ++adapter
->restart_queue
;
3553 static int ixgbe_maybe_stop_tx(struct net_device
*netdev
,
3554 struct ixgbe_ring
*tx_ring
, int size
)
3556 if (likely(IXGBE_DESC_UNUSED(tx_ring
) >= size
))
3558 return __ixgbe_maybe_stop_tx(netdev
, tx_ring
, size
);
3561 static int ixgbe_xmit_frame(struct sk_buff
*skb
, struct net_device
*netdev
)
3563 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
3564 struct ixgbe_ring
*tx_ring
;
3566 unsigned int tx_flags
= 0;
3572 r_idx
= (adapter
->num_tx_queues
- 1) & skb
->queue_mapping
;
3573 tx_ring
= &adapter
->tx_ring
[r_idx
];
3575 if (adapter
->vlgrp
&& vlan_tx_tag_present(skb
)) {
3576 tx_flags
|= vlan_tx_tag_get(skb
);
3577 tx_flags
<<= IXGBE_TX_FLAGS_VLAN_SHIFT
;
3578 tx_flags
|= IXGBE_TX_FLAGS_VLAN
;
3580 /* three things can cause us to need a context descriptor */
3581 if (skb_is_gso(skb
) ||
3582 (skb
->ip_summed
== CHECKSUM_PARTIAL
) ||
3583 (tx_flags
& IXGBE_TX_FLAGS_VLAN
))
3586 count
+= TXD_USE_COUNT(skb_headlen(skb
));
3587 for (f
= 0; f
< skb_shinfo(skb
)->nr_frags
; f
++)
3588 count
+= TXD_USE_COUNT(skb_shinfo(skb
)->frags
[f
].size
);
3590 if (ixgbe_maybe_stop_tx(netdev
, tx_ring
, count
)) {
3592 return NETDEV_TX_BUSY
;
3595 if (skb
->protocol
== htons(ETH_P_IP
))
3596 tx_flags
|= IXGBE_TX_FLAGS_IPV4
;
3597 first
= tx_ring
->next_to_use
;
3598 tso
= ixgbe_tso(adapter
, tx_ring
, skb
, tx_flags
, &hdr_len
);
3600 dev_kfree_skb_any(skb
);
3601 return NETDEV_TX_OK
;
3605 tx_flags
|= IXGBE_TX_FLAGS_TSO
;
3606 else if (ixgbe_tx_csum(adapter
, tx_ring
, skb
, tx_flags
) &&
3607 (skb
->ip_summed
== CHECKSUM_PARTIAL
))
3608 tx_flags
|= IXGBE_TX_FLAGS_CSUM
;
3610 ixgbe_tx_queue(adapter
, tx_ring
, tx_flags
,
3611 ixgbe_tx_map(adapter
, tx_ring
, skb
, first
),
3614 netdev
->trans_start
= jiffies
;
3616 ixgbe_maybe_stop_tx(netdev
, tx_ring
, DESC_NEEDED
);
3618 return NETDEV_TX_OK
;
3622 * ixgbe_get_stats - Get System Network Statistics
3623 * @netdev: network interface device structure
3625 * Returns the address of the device statistics structure.
3626 * The statistics are actually updated from the timer callback.
3628 static struct net_device_stats
*ixgbe_get_stats(struct net_device
*netdev
)
3630 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
3632 /* only return the current stats */
3633 return &adapter
->net_stats
;
3637 * ixgbe_set_mac - Change the Ethernet Address of the NIC
3638 * @netdev: network interface device structure
3639 * @p: pointer to an address structure
3641 * Returns 0 on success, negative on failure
3643 static int ixgbe_set_mac(struct net_device
*netdev
, void *p
)
3645 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
3646 struct ixgbe_hw
*hw
= &adapter
->hw
;
3647 struct sockaddr
*addr
= p
;
3649 if (!is_valid_ether_addr(addr
->sa_data
))
3650 return -EADDRNOTAVAIL
;
3652 memcpy(netdev
->dev_addr
, addr
->sa_data
, netdev
->addr_len
);
3653 memcpy(hw
->mac
.addr
, addr
->sa_data
, netdev
->addr_len
);
3655 hw
->mac
.ops
.set_rar(hw
, 0, hw
->mac
.addr
, 0, IXGBE_RAH_AV
);
3660 #ifdef CONFIG_NET_POLL_CONTROLLER
3662 * Polling 'interrupt' - used by things like netconsole to send skbs
3663 * without having to re-enable interrupts. It's not called while
3664 * the interrupt routine is executing.
3666 static void ixgbe_netpoll(struct net_device
*netdev
)
3668 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
3670 disable_irq(adapter
->pdev
->irq
);
3671 adapter
->flags
|= IXGBE_FLAG_IN_NETPOLL
;
3672 ixgbe_intr(adapter
->pdev
->irq
, netdev
);
3673 adapter
->flags
&= ~IXGBE_FLAG_IN_NETPOLL
;
3674 enable_irq(adapter
->pdev
->irq
);
3679 * ixgbe_link_config - set up initial link with default speed and duplex
3680 * @hw: pointer to private hardware struct
3682 * Returns 0 on success, negative on failure
3684 static int ixgbe_link_config(struct ixgbe_hw
*hw
)
3686 u32 autoneg
= IXGBE_LINK_SPEED_10GB_FULL
;
3688 /* must always autoneg for both 1G and 10G link */
3689 hw
->mac
.autoneg
= true;
3691 return hw
->mac
.ops
.setup_link_speed(hw
, autoneg
, true, true);
3695 * ixgbe_probe - Device Initialization Routine
3696 * @pdev: PCI device information struct
3697 * @ent: entry in ixgbe_pci_tbl
3699 * Returns 0 on success, negative on failure
3701 * ixgbe_probe initializes an adapter identified by a pci_dev structure.
3702 * The OS initialization, configuring of the adapter private structure,
3703 * and a hardware reset occur.
3705 static int __devinit
ixgbe_probe(struct pci_dev
*pdev
,
3706 const struct pci_device_id
*ent
)
3708 struct net_device
*netdev
;
3709 struct ixgbe_adapter
*adapter
= NULL
;
3710 struct ixgbe_hw
*hw
;
3711 const struct ixgbe_info
*ii
= ixgbe_info_tbl
[ent
->driver_data
];
3712 static int cards_found
;
3713 int i
, err
, pci_using_dac
;
3714 u16 link_status
, link_speed
, link_width
;
3717 err
= pci_enable_device(pdev
);
3721 if (!pci_set_dma_mask(pdev
, DMA_64BIT_MASK
) &&
3722 !pci_set_consistent_dma_mask(pdev
, DMA_64BIT_MASK
)) {
3725 err
= pci_set_dma_mask(pdev
, DMA_32BIT_MASK
);
3727 err
= pci_set_consistent_dma_mask(pdev
, DMA_32BIT_MASK
);
3729 dev_err(&pdev
->dev
, "No usable DMA "
3730 "configuration, aborting\n");
3737 err
= pci_request_regions(pdev
, ixgbe_driver_name
);
3739 dev_err(&pdev
->dev
, "pci_request_regions failed 0x%x\n", err
);
3743 pci_set_master(pdev
);
3744 pci_save_state(pdev
);
3746 netdev
= alloc_etherdev_mq(sizeof(struct ixgbe_adapter
), MAX_TX_QUEUES
);
3749 goto err_alloc_etherdev
;
3752 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
3754 pci_set_drvdata(pdev
, netdev
);
3755 adapter
= netdev_priv(netdev
);
3757 adapter
->netdev
= netdev
;
3758 adapter
->pdev
= pdev
;
3761 adapter
->msg_enable
= (1 << DEFAULT_DEBUG_LEVEL_SHIFT
) - 1;
3763 hw
->hw_addr
= ioremap(pci_resource_start(pdev
, 0),
3764 pci_resource_len(pdev
, 0));
3770 for (i
= 1; i
<= 5; i
++) {
3771 if (pci_resource_len(pdev
, i
) == 0)
3775 netdev
->open
= &ixgbe_open
;
3776 netdev
->stop
= &ixgbe_close
;
3777 netdev
->hard_start_xmit
= &ixgbe_xmit_frame
;
3778 netdev
->get_stats
= &ixgbe_get_stats
;
3779 netdev
->set_rx_mode
= &ixgbe_set_rx_mode
;
3780 netdev
->set_multicast_list
= &ixgbe_set_rx_mode
;
3781 netdev
->set_mac_address
= &ixgbe_set_mac
;
3782 netdev
->change_mtu
= &ixgbe_change_mtu
;
3783 ixgbe_set_ethtool_ops(netdev
);
3784 netdev
->tx_timeout
= &ixgbe_tx_timeout
;
3785 netdev
->watchdog_timeo
= 5 * HZ
;
3786 netdev
->vlan_rx_register
= ixgbe_vlan_rx_register
;
3787 netdev
->vlan_rx_add_vid
= ixgbe_vlan_rx_add_vid
;
3788 netdev
->vlan_rx_kill_vid
= ixgbe_vlan_rx_kill_vid
;
3789 #ifdef CONFIG_NET_POLL_CONTROLLER
3790 netdev
->poll_controller
= ixgbe_netpoll
;
3792 strcpy(netdev
->name
, pci_name(pdev
));
3794 adapter
->bd_number
= cards_found
;
3797 memcpy(&hw
->mac
.ops
, ii
->mac_ops
, sizeof(hw
->mac
.ops
));
3798 hw
->mac
.type
= ii
->mac
;
3801 memcpy(&hw
->eeprom
.ops
, ii
->eeprom_ops
, sizeof(hw
->eeprom
.ops
));
3802 eec
= IXGBE_READ_REG(hw
, IXGBE_EEC
);
3803 /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */
3804 if (!(eec
& (1 << 8)))
3805 hw
->eeprom
.ops
.read
= &ixgbe_read_eeprom_bit_bang_generic
;
3808 memcpy(&hw
->phy
.ops
, ii
->phy_ops
, sizeof(hw
->phy
.ops
));
3809 /* phy->sfp_type = ixgbe_sfp_type_unknown; */
3811 err
= ii
->get_invariants(hw
);
3815 /* setup the private structure */
3816 err
= ixgbe_sw_init(adapter
);
3820 /* reset_hw fills in the perm_addr as well */
3821 err
= hw
->mac
.ops
.reset_hw(hw
);
3823 dev_err(&adapter
->pdev
->dev
, "HW Init failed: %d\n", err
);
3827 netdev
->features
= NETIF_F_SG
|
3829 NETIF_F_HW_VLAN_TX
|
3830 NETIF_F_HW_VLAN_RX
|
3831 NETIF_F_HW_VLAN_FILTER
;
3833 netdev
->features
|= NETIF_F_IPV6_CSUM
;
3834 netdev
->features
|= NETIF_F_TSO
;
3835 netdev
->features
|= NETIF_F_TSO6
;
3836 netdev
->features
|= NETIF_F_LRO
;
3838 netdev
->vlan_features
|= NETIF_F_TSO
;
3839 netdev
->vlan_features
|= NETIF_F_TSO6
;
3840 netdev
->vlan_features
|= NETIF_F_IP_CSUM
;
3841 netdev
->vlan_features
|= NETIF_F_SG
;
3844 netdev
->features
|= NETIF_F_HIGHDMA
;
3846 /* make sure the EEPROM is good */
3847 if (hw
->eeprom
.ops
.validate_checksum(hw
, NULL
) < 0) {
3848 dev_err(&pdev
->dev
, "The EEPROM Checksum Is Not Valid\n");
3853 memcpy(netdev
->dev_addr
, hw
->mac
.perm_addr
, netdev
->addr_len
);
3854 memcpy(netdev
->perm_addr
, hw
->mac
.perm_addr
, netdev
->addr_len
);
3856 if (ixgbe_validate_mac_addr(netdev
->perm_addr
)) {
3857 dev_err(&pdev
->dev
, "invalid MAC address\n");
3862 init_timer(&adapter
->watchdog_timer
);
3863 adapter
->watchdog_timer
.function
= &ixgbe_watchdog
;
3864 adapter
->watchdog_timer
.data
= (unsigned long)adapter
;
3866 INIT_WORK(&adapter
->reset_task
, ixgbe_reset_task
);
3867 INIT_WORK(&adapter
->watchdog_task
, ixgbe_watchdog_task
);
3869 err
= ixgbe_init_interrupt_scheme(adapter
);
3873 /* print bus type/speed/width info */
3874 pci_read_config_word(pdev
, IXGBE_PCI_LINK_STATUS
, &link_status
);
3875 link_speed
= link_status
& IXGBE_PCI_LINK_SPEED
;
3876 link_width
= link_status
& IXGBE_PCI_LINK_WIDTH
;
3877 dev_info(&pdev
->dev
, "(PCI Express:%s:%s) "
3878 "%02x:%02x:%02x:%02x:%02x:%02x\n",
3879 ((link_speed
== IXGBE_PCI_LINK_SPEED_5000
) ? "5.0Gb/s" :
3880 (link_speed
== IXGBE_PCI_LINK_SPEED_2500
) ? "2.5Gb/s" :
3882 ((link_width
== IXGBE_PCI_LINK_WIDTH_8
) ? "Width x8" :
3883 (link_width
== IXGBE_PCI_LINK_WIDTH_4
) ? "Width x4" :
3884 (link_width
== IXGBE_PCI_LINK_WIDTH_2
) ? "Width x2" :
3885 (link_width
== IXGBE_PCI_LINK_WIDTH_1
) ? "Width x1" :
3887 netdev
->dev_addr
[0], netdev
->dev_addr
[1], netdev
->dev_addr
[2],
3888 netdev
->dev_addr
[3], netdev
->dev_addr
[4], netdev
->dev_addr
[5]);
3889 ixgbe_read_pba_num_generic(hw
, &part_num
);
3890 dev_info(&pdev
->dev
, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
3891 hw
->mac
.type
, hw
->phy
.type
,
3892 (part_num
>> 8), (part_num
& 0xff));
3894 if (link_width
<= IXGBE_PCI_LINK_WIDTH_4
) {
3895 dev_warn(&pdev
->dev
, "PCI-Express bandwidth available for "
3896 "this card is not sufficient for optimal "
3898 dev_warn(&pdev
->dev
, "For optimal performance a x8 "
3899 "PCI-Express slot is required.\n");
3902 /* reset the hardware with the new settings */
3903 hw
->mac
.ops
.start_hw(hw
);
3905 /* link_config depends on start_hw being called at least once */
3906 err
= ixgbe_link_config(hw
);
3908 dev_err(&pdev
->dev
, "setup_link_speed FAILED %d\n", err
);
3912 netif_carrier_off(netdev
);
3913 netif_tx_stop_all_queues(netdev
);
3915 ixgbe_napi_add_all(adapter
);
3917 strcpy(netdev
->name
, "eth%d");
3918 err
= register_netdev(netdev
);
3922 #ifdef CONFIG_IXGBE_DCA
3923 if (dca_add_requester(&pdev
->dev
) == 0) {
3924 adapter
->flags
|= IXGBE_FLAG_DCA_ENABLED
;
3925 /* always use CB2 mode, difference is masked
3926 * in the CB driver */
3927 IXGBE_WRITE_REG(hw
, IXGBE_DCA_CTRL
, 2);
3928 ixgbe_setup_dca(adapter
);
3932 dev_info(&pdev
->dev
, "Intel(R) 10 Gigabit Network Connection\n");
3937 ixgbe_release_hw_control(adapter
);
3940 ixgbe_reset_interrupt_capability(adapter
);
3942 iounmap(hw
->hw_addr
);
3944 free_netdev(netdev
);
3946 pci_release_regions(pdev
);
3949 pci_disable_device(pdev
);
3954 * ixgbe_remove - Device Removal Routine
3955 * @pdev: PCI device information struct
3957 * ixgbe_remove is called by the PCI subsystem to alert the driver
3958 * that it should release a PCI device. The could be caused by a
3959 * Hot-Plug event, or because the driver is going to be removed from
3962 static void __devexit
ixgbe_remove(struct pci_dev
*pdev
)
3964 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3965 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
3967 set_bit(__IXGBE_DOWN
, &adapter
->state
);
3968 del_timer_sync(&adapter
->watchdog_timer
);
3970 flush_scheduled_work();
3972 #ifdef CONFIG_IXGBE_DCA
3973 if (adapter
->flags
& IXGBE_FLAG_DCA_ENABLED
) {
3974 adapter
->flags
&= ~IXGBE_FLAG_DCA_ENABLED
;
3975 dca_remove_requester(&pdev
->dev
);
3976 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_DCA_CTRL
, 1);
3980 unregister_netdev(netdev
);
3982 ixgbe_reset_interrupt_capability(adapter
);
3984 ixgbe_release_hw_control(adapter
);
3986 iounmap(adapter
->hw
.hw_addr
);
3987 pci_release_regions(pdev
);
3989 DPRINTK(PROBE
, INFO
, "complete\n");
3990 ixgbe_napi_del_all(adapter
);
3991 kfree(adapter
->tx_ring
);
3992 kfree(adapter
->rx_ring
);
3994 free_netdev(netdev
);
3996 pci_disable_device(pdev
);
4000 * ixgbe_io_error_detected - called when PCI error is detected
4001 * @pdev: Pointer to PCI device
4002 * @state: The current pci connection state
4004 * This function is called after a PCI bus error affecting
4005 * this device has been detected.
4007 static pci_ers_result_t
ixgbe_io_error_detected(struct pci_dev
*pdev
,
4008 pci_channel_state_t state
)
4010 struct net_device
*netdev
= pci_get_drvdata(pdev
);
4011 struct ixgbe_adapter
*adapter
= netdev
->priv
;
4013 netif_device_detach(netdev
);
4015 if (netif_running(netdev
))
4016 ixgbe_down(adapter
);
4017 pci_disable_device(pdev
);
4019 /* Request a slot reset. */
4020 return PCI_ERS_RESULT_NEED_RESET
;
4024 * ixgbe_io_slot_reset - called after the pci bus has been reset.
4025 * @pdev: Pointer to PCI device
4027 * Restart the card from scratch, as if from a cold-boot.
4029 static pci_ers_result_t
ixgbe_io_slot_reset(struct pci_dev
*pdev
)
4031 struct net_device
*netdev
= pci_get_drvdata(pdev
);
4032 struct ixgbe_adapter
*adapter
= netdev
->priv
;
4034 if (pci_enable_device(pdev
)) {
4036 "Cannot re-enable PCI device after reset.\n");
4037 return PCI_ERS_RESULT_DISCONNECT
;
4039 pci_set_master(pdev
);
4040 pci_restore_state(pdev
);
4042 pci_enable_wake(pdev
, PCI_D3hot
, 0);
4043 pci_enable_wake(pdev
, PCI_D3cold
, 0);
4045 ixgbe_reset(adapter
);
4047 return PCI_ERS_RESULT_RECOVERED
;
4051 * ixgbe_io_resume - called when traffic can start flowing again.
4052 * @pdev: Pointer to PCI device
4054 * This callback is called when the error recovery driver tells us that
4055 * its OK to resume normal operation.
4057 static void ixgbe_io_resume(struct pci_dev
*pdev
)
4059 struct net_device
*netdev
= pci_get_drvdata(pdev
);
4060 struct ixgbe_adapter
*adapter
= netdev
->priv
;
4062 if (netif_running(netdev
)) {
4063 if (ixgbe_up(adapter
)) {
4064 DPRINTK(PROBE
, INFO
, "ixgbe_up failed after reset\n");
4069 netif_device_attach(netdev
);
4072 static struct pci_error_handlers ixgbe_err_handler
= {
4073 .error_detected
= ixgbe_io_error_detected
,
4074 .slot_reset
= ixgbe_io_slot_reset
,
4075 .resume
= ixgbe_io_resume
,
4078 static struct pci_driver ixgbe_driver
= {
4079 .name
= ixgbe_driver_name
,
4080 .id_table
= ixgbe_pci_tbl
,
4081 .probe
= ixgbe_probe
,
4082 .remove
= __devexit_p(ixgbe_remove
),
4084 .suspend
= ixgbe_suspend
,
4085 .resume
= ixgbe_resume
,
4087 .shutdown
= ixgbe_shutdown
,
4088 .err_handler
= &ixgbe_err_handler
4092 * ixgbe_init_module - Driver Registration Routine
4094 * ixgbe_init_module is the first routine called when the driver is
4095 * loaded. All it does is register with the PCI subsystem.
4097 static int __init
ixgbe_init_module(void)
4100 printk(KERN_INFO
"%s: %s - version %s\n", ixgbe_driver_name
,
4101 ixgbe_driver_string
, ixgbe_driver_version
);
4103 printk(KERN_INFO
"%s: %s\n", ixgbe_driver_name
, ixgbe_copyright
);
4105 #ifdef CONFIG_IXGBE_DCA
4106 dca_register_notify(&dca_notifier
);
4109 ret
= pci_register_driver(&ixgbe_driver
);
4113 module_init(ixgbe_init_module
);
4116 * ixgbe_exit_module - Driver Exit Cleanup Routine
4118 * ixgbe_exit_module is called just before the driver is removed
4121 static void __exit
ixgbe_exit_module(void)
4123 #ifdef CONFIG_IXGBE_DCA
4124 dca_unregister_notify(&dca_notifier
);
4126 pci_unregister_driver(&ixgbe_driver
);
4129 #ifdef CONFIG_IXGBE_DCA
4130 static int ixgbe_notify_dca(struct notifier_block
*nb
, unsigned long event
,
4135 ret_val
= driver_for_each_device(&ixgbe_driver
.driver
, NULL
, &event
,
4136 __ixgbe_notify_dca
);
4138 return ret_val
? NOTIFY_BAD
: NOTIFY_DONE
;
4140 #endif /* CONFIG_IXGBE_DCA */
4142 module_exit(ixgbe_exit_module
);