1 /*******************************************************************************
3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2015 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, see <http://www.gnu.org/licenses/>.
18 The full GNU General Public License is included in this distribution in
19 the file called "COPYING".
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *******************************************************************************/
27 /******************************************************************************
28 Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
29 ******************************************************************************/
31 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33 #include <linux/types.h>
34 #include <linux/bitops.h>
35 #include <linux/module.h>
36 #include <linux/pci.h>
37 #include <linux/netdevice.h>
38 #include <linux/vmalloc.h>
39 #include <linux/string.h>
42 #include <linux/tcp.h>
43 #include <linux/sctp.h>
44 #include <linux/ipv6.h>
45 #include <linux/slab.h>
46 #include <net/checksum.h>
47 #include <net/ip6_checksum.h>
48 #include <linux/ethtool.h>
50 #include <linux/if_vlan.h>
51 #include <linux/prefetch.h>
55 const char ixgbevf_driver_name
[] = "ixgbevf";
56 static const char ixgbevf_driver_string
[] =
57 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
59 #define DRV_VERSION "2.12.1-k"
60 const char ixgbevf_driver_version
[] = DRV_VERSION
;
61 static char ixgbevf_copyright
[] =
62 "Copyright (c) 2009 - 2012 Intel Corporation.";
64 static const struct ixgbevf_info
*ixgbevf_info_tbl
[] = {
65 [board_82599_vf
] = &ixgbevf_82599_vf_info
,
66 [board_X540_vf
] = &ixgbevf_X540_vf_info
,
67 [board_X550_vf
] = &ixgbevf_X550_vf_info
,
68 [board_X550EM_x_vf
] = &ixgbevf_X550EM_x_vf_info
,
71 /* ixgbevf_pci_tbl - PCI Device ID Table
73 * Wildcard entries (PCI_ANY_ID) should come last
74 * Last entry must be all 0s
76 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
77 * Class, Class Mask, private data (not used) }
79 static const struct pci_device_id ixgbevf_pci_tbl
[] = {
80 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82599_VF
), board_82599_vf
},
81 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_X540_VF
), board_X540_vf
},
82 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_X550_VF
), board_X550_vf
},
83 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_X550EM_X_VF
), board_X550EM_x_vf
},
84 /* required last entry */
87 MODULE_DEVICE_TABLE(pci
, ixgbevf_pci_tbl
);
89 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
90 MODULE_DESCRIPTION("Intel(R) 10 Gigabit Virtual Function Network Driver");
91 MODULE_LICENSE("GPL");
92 MODULE_VERSION(DRV_VERSION
);
94 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
95 static int debug
= -1;
96 module_param(debug
, int, 0);
97 MODULE_PARM_DESC(debug
, "Debug level (0=none,...,16=all)");
99 static void ixgbevf_service_event_schedule(struct ixgbevf_adapter
*adapter
)
101 if (!test_bit(__IXGBEVF_DOWN
, &adapter
->state
) &&
102 !test_bit(__IXGBEVF_REMOVING
, &adapter
->state
) &&
103 !test_and_set_bit(__IXGBEVF_SERVICE_SCHED
, &adapter
->state
))
104 schedule_work(&adapter
->service_task
);
107 static void ixgbevf_service_event_complete(struct ixgbevf_adapter
*adapter
)
109 BUG_ON(!test_bit(__IXGBEVF_SERVICE_SCHED
, &adapter
->state
));
111 /* flush memory to make sure state is correct before next watchdog */
112 smp_mb__before_atomic();
113 clear_bit(__IXGBEVF_SERVICE_SCHED
, &adapter
->state
);
117 static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter
*adapter
);
118 static void ixgbevf_set_itr(struct ixgbevf_q_vector
*q_vector
);
119 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter
*adapter
);
121 static void ixgbevf_remove_adapter(struct ixgbe_hw
*hw
)
123 struct ixgbevf_adapter
*adapter
= hw
->back
;
128 dev_err(&adapter
->pdev
->dev
, "Adapter removed\n");
129 if (test_bit(__IXGBEVF_SERVICE_INITED
, &adapter
->state
))
130 ixgbevf_service_event_schedule(adapter
);
133 static void ixgbevf_check_remove(struct ixgbe_hw
*hw
, u32 reg
)
137 /* The following check not only optimizes a bit by not
138 * performing a read on the status register when the
139 * register just read was a status register read that
140 * returned IXGBE_FAILED_READ_REG. It also blocks any
141 * potential recursion.
143 if (reg
== IXGBE_VFSTATUS
) {
144 ixgbevf_remove_adapter(hw
);
147 value
= ixgbevf_read_reg(hw
, IXGBE_VFSTATUS
);
148 if (value
== IXGBE_FAILED_READ_REG
)
149 ixgbevf_remove_adapter(hw
);
152 u32
ixgbevf_read_reg(struct ixgbe_hw
*hw
, u32 reg
)
154 u8 __iomem
*reg_addr
= ACCESS_ONCE(hw
->hw_addr
);
157 if (IXGBE_REMOVED(reg_addr
))
158 return IXGBE_FAILED_READ_REG
;
159 value
= readl(reg_addr
+ reg
);
160 if (unlikely(value
== IXGBE_FAILED_READ_REG
))
161 ixgbevf_check_remove(hw
, reg
);
166 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
167 * @adapter: pointer to adapter struct
168 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
169 * @queue: queue to map the corresponding interrupt to
170 * @msix_vector: the vector to map to the corresponding queue
172 static void ixgbevf_set_ivar(struct ixgbevf_adapter
*adapter
, s8 direction
,
173 u8 queue
, u8 msix_vector
)
176 struct ixgbe_hw
*hw
= &adapter
->hw
;
178 if (direction
== -1) {
180 msix_vector
|= IXGBE_IVAR_ALLOC_VAL
;
181 ivar
= IXGBE_READ_REG(hw
, IXGBE_VTIVAR_MISC
);
184 IXGBE_WRITE_REG(hw
, IXGBE_VTIVAR_MISC
, ivar
);
186 /* Tx or Rx causes */
187 msix_vector
|= IXGBE_IVAR_ALLOC_VAL
;
188 index
= ((16 * (queue
& 1)) + (8 * direction
));
189 ivar
= IXGBE_READ_REG(hw
, IXGBE_VTIVAR(queue
>> 1));
190 ivar
&= ~(0xFF << index
);
191 ivar
|= (msix_vector
<< index
);
192 IXGBE_WRITE_REG(hw
, IXGBE_VTIVAR(queue
>> 1), ivar
);
196 static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring
*tx_ring
,
197 struct ixgbevf_tx_buffer
*tx_buffer
)
199 if (tx_buffer
->skb
) {
200 dev_kfree_skb_any(tx_buffer
->skb
);
201 if (dma_unmap_len(tx_buffer
, len
))
202 dma_unmap_single(tx_ring
->dev
,
203 dma_unmap_addr(tx_buffer
, dma
),
204 dma_unmap_len(tx_buffer
, len
),
206 } else if (dma_unmap_len(tx_buffer
, len
)) {
207 dma_unmap_page(tx_ring
->dev
,
208 dma_unmap_addr(tx_buffer
, dma
),
209 dma_unmap_len(tx_buffer
, len
),
212 tx_buffer
->next_to_watch
= NULL
;
213 tx_buffer
->skb
= NULL
;
214 dma_unmap_len_set(tx_buffer
, len
, 0);
215 /* tx_buffer must be completely set up in the transmit path */
218 static u64
ixgbevf_get_tx_completed(struct ixgbevf_ring
*ring
)
220 return ring
->stats
.packets
;
223 static u32
ixgbevf_get_tx_pending(struct ixgbevf_ring
*ring
)
225 struct ixgbevf_adapter
*adapter
= netdev_priv(ring
->netdev
);
226 struct ixgbe_hw
*hw
= &adapter
->hw
;
228 u32 head
= IXGBE_READ_REG(hw
, IXGBE_VFTDH(ring
->reg_idx
));
229 u32 tail
= IXGBE_READ_REG(hw
, IXGBE_VFTDT(ring
->reg_idx
));
232 return (head
< tail
) ?
233 tail
- head
: (tail
+ ring
->count
- head
);
238 static inline bool ixgbevf_check_tx_hang(struct ixgbevf_ring
*tx_ring
)
240 u32 tx_done
= ixgbevf_get_tx_completed(tx_ring
);
241 u32 tx_done_old
= tx_ring
->tx_stats
.tx_done_old
;
242 u32 tx_pending
= ixgbevf_get_tx_pending(tx_ring
);
244 clear_check_for_tx_hang(tx_ring
);
246 /* Check for a hung queue, but be thorough. This verifies
247 * that a transmit has been completed since the previous
248 * check AND there is at least one packet pending. The
249 * ARMED bit is set to indicate a potential hang.
251 if ((tx_done_old
== tx_done
) && tx_pending
) {
252 /* make sure it is true for two checks in a row */
253 return test_and_set_bit(__IXGBEVF_HANG_CHECK_ARMED
,
256 /* reset the countdown */
257 clear_bit(__IXGBEVF_HANG_CHECK_ARMED
, &tx_ring
->state
);
259 /* update completed stats and continue */
260 tx_ring
->tx_stats
.tx_done_old
= tx_done
;
265 static void ixgbevf_tx_timeout_reset(struct ixgbevf_adapter
*adapter
)
267 /* Do the reset outside of interrupt context */
268 if (!test_bit(__IXGBEVF_DOWN
, &adapter
->state
)) {
269 adapter
->flags
|= IXGBEVF_FLAG_RESET_REQUESTED
;
270 ixgbevf_service_event_schedule(adapter
);
275 * ixgbevf_tx_timeout - Respond to a Tx Hang
276 * @netdev: network interface device structure
278 static void ixgbevf_tx_timeout(struct net_device
*netdev
)
280 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
282 ixgbevf_tx_timeout_reset(adapter
);
286 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
287 * @q_vector: board private structure
288 * @tx_ring: tx ring to clean
290 static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector
*q_vector
,
291 struct ixgbevf_ring
*tx_ring
)
293 struct ixgbevf_adapter
*adapter
= q_vector
->adapter
;
294 struct ixgbevf_tx_buffer
*tx_buffer
;
295 union ixgbe_adv_tx_desc
*tx_desc
;
296 unsigned int total_bytes
= 0, total_packets
= 0;
297 unsigned int budget
= tx_ring
->count
/ 2;
298 unsigned int i
= tx_ring
->next_to_clean
;
300 if (test_bit(__IXGBEVF_DOWN
, &adapter
->state
))
303 tx_buffer
= &tx_ring
->tx_buffer_info
[i
];
304 tx_desc
= IXGBEVF_TX_DESC(tx_ring
, i
);
308 union ixgbe_adv_tx_desc
*eop_desc
= tx_buffer
->next_to_watch
;
310 /* if next_to_watch is not set then there is no work pending */
314 /* prevent any other reads prior to eop_desc */
315 read_barrier_depends();
317 /* if DD is not set pending work has not been completed */
318 if (!(eop_desc
->wb
.status
& cpu_to_le32(IXGBE_TXD_STAT_DD
)))
321 /* clear next_to_watch to prevent false hangs */
322 tx_buffer
->next_to_watch
= NULL
;
324 /* update the statistics for this packet */
325 total_bytes
+= tx_buffer
->bytecount
;
326 total_packets
+= tx_buffer
->gso_segs
;
329 dev_kfree_skb_any(tx_buffer
->skb
);
331 /* unmap skb header data */
332 dma_unmap_single(tx_ring
->dev
,
333 dma_unmap_addr(tx_buffer
, dma
),
334 dma_unmap_len(tx_buffer
, len
),
337 /* clear tx_buffer data */
338 tx_buffer
->skb
= NULL
;
339 dma_unmap_len_set(tx_buffer
, len
, 0);
341 /* unmap remaining buffers */
342 while (tx_desc
!= eop_desc
) {
348 tx_buffer
= tx_ring
->tx_buffer_info
;
349 tx_desc
= IXGBEVF_TX_DESC(tx_ring
, 0);
352 /* unmap any remaining paged data */
353 if (dma_unmap_len(tx_buffer
, len
)) {
354 dma_unmap_page(tx_ring
->dev
,
355 dma_unmap_addr(tx_buffer
, dma
),
356 dma_unmap_len(tx_buffer
, len
),
358 dma_unmap_len_set(tx_buffer
, len
, 0);
362 /* move us one more past the eop_desc for start of next pkt */
368 tx_buffer
= tx_ring
->tx_buffer_info
;
369 tx_desc
= IXGBEVF_TX_DESC(tx_ring
, 0);
372 /* issue prefetch for next Tx descriptor */
375 /* update budget accounting */
377 } while (likely(budget
));
380 tx_ring
->next_to_clean
= i
;
381 u64_stats_update_begin(&tx_ring
->syncp
);
382 tx_ring
->stats
.bytes
+= total_bytes
;
383 tx_ring
->stats
.packets
+= total_packets
;
384 u64_stats_update_end(&tx_ring
->syncp
);
385 q_vector
->tx
.total_bytes
+= total_bytes
;
386 q_vector
->tx
.total_packets
+= total_packets
;
388 if (check_for_tx_hang(tx_ring
) && ixgbevf_check_tx_hang(tx_ring
)) {
389 struct ixgbe_hw
*hw
= &adapter
->hw
;
390 union ixgbe_adv_tx_desc
*eop_desc
;
392 eop_desc
= tx_ring
->tx_buffer_info
[i
].next_to_watch
;
394 pr_err("Detected Tx Unit Hang\n"
396 " TDH, TDT <%x>, <%x>\n"
397 " next_to_use <%x>\n"
398 " next_to_clean <%x>\n"
399 "tx_buffer_info[next_to_clean]\n"
400 " next_to_watch <%p>\n"
401 " eop_desc->wb.status <%x>\n"
402 " time_stamp <%lx>\n"
404 tx_ring
->queue_index
,
405 IXGBE_READ_REG(hw
, IXGBE_VFTDH(tx_ring
->reg_idx
)),
406 IXGBE_READ_REG(hw
, IXGBE_VFTDT(tx_ring
->reg_idx
)),
407 tx_ring
->next_to_use
, i
,
408 eop_desc
, (eop_desc
? eop_desc
->wb
.status
: 0),
409 tx_ring
->tx_buffer_info
[i
].time_stamp
, jiffies
);
411 netif_stop_subqueue(tx_ring
->netdev
, tx_ring
->queue_index
);
413 /* schedule immediate reset if we believe we hung */
414 ixgbevf_tx_timeout_reset(adapter
);
419 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
420 if (unlikely(total_packets
&& netif_carrier_ok(tx_ring
->netdev
) &&
421 (ixgbevf_desc_unused(tx_ring
) >= TX_WAKE_THRESHOLD
))) {
422 /* Make sure that anybody stopping the queue after this
423 * sees the new next_to_clean.
427 if (__netif_subqueue_stopped(tx_ring
->netdev
,
428 tx_ring
->queue_index
) &&
429 !test_bit(__IXGBEVF_DOWN
, &adapter
->state
)) {
430 netif_wake_subqueue(tx_ring
->netdev
,
431 tx_ring
->queue_index
);
432 ++tx_ring
->tx_stats
.restart_queue
;
440 * ixgbevf_rx_skb - Helper function to determine proper Rx method
441 * @q_vector: structure containing interrupt and ring information
442 * @skb: packet to send up
444 static void ixgbevf_rx_skb(struct ixgbevf_q_vector
*q_vector
,
447 #ifdef CONFIG_NET_RX_BUSY_POLL
448 skb_mark_napi_id(skb
, &q_vector
->napi
);
450 if (ixgbevf_qv_busy_polling(q_vector
)) {
451 netif_receive_skb(skb
);
452 /* exit early if we busy polled */
455 #endif /* CONFIG_NET_RX_BUSY_POLL */
457 napi_gro_receive(&q_vector
->napi
, skb
);
461 * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
462 * @ring: structure containig ring specific data
463 * @rx_desc: current Rx descriptor being processed
464 * @skb: skb currently being received and modified
466 static inline void ixgbevf_rx_checksum(struct ixgbevf_ring
*ring
,
467 union ixgbe_adv_rx_desc
*rx_desc
,
470 skb_checksum_none_assert(skb
);
472 /* Rx csum disabled */
473 if (!(ring
->netdev
->features
& NETIF_F_RXCSUM
))
476 /* if IP and error */
477 if (ixgbevf_test_staterr(rx_desc
, IXGBE_RXD_STAT_IPCS
) &&
478 ixgbevf_test_staterr(rx_desc
, IXGBE_RXDADV_ERR_IPE
)) {
479 ring
->rx_stats
.csum_err
++;
483 if (!ixgbevf_test_staterr(rx_desc
, IXGBE_RXD_STAT_L4CS
))
486 if (ixgbevf_test_staterr(rx_desc
, IXGBE_RXDADV_ERR_TCPE
)) {
487 ring
->rx_stats
.csum_err
++;
491 /* It must be a TCP or UDP packet with a valid checksum */
492 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
496 * ixgbevf_process_skb_fields - Populate skb header fields from Rx descriptor
497 * @rx_ring: rx descriptor ring packet is being transacted on
498 * @rx_desc: pointer to the EOP Rx descriptor
499 * @skb: pointer to current skb being populated
501 * This function checks the ring, descriptor, and packet information in
502 * order to populate the checksum, VLAN, protocol, and other fields within
505 static void ixgbevf_process_skb_fields(struct ixgbevf_ring
*rx_ring
,
506 union ixgbe_adv_rx_desc
*rx_desc
,
509 ixgbevf_rx_checksum(rx_ring
, rx_desc
, skb
);
511 if (ixgbevf_test_staterr(rx_desc
, IXGBE_RXD_STAT_VP
)) {
512 u16 vid
= le16_to_cpu(rx_desc
->wb
.upper
.vlan
);
513 unsigned long *active_vlans
= netdev_priv(rx_ring
->netdev
);
515 if (test_bit(vid
& VLAN_VID_MASK
, active_vlans
))
516 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), vid
);
519 skb
->protocol
= eth_type_trans(skb
, rx_ring
->netdev
);
523 * ixgbevf_is_non_eop - process handling of non-EOP buffers
524 * @rx_ring: Rx ring being processed
525 * @rx_desc: Rx descriptor for current buffer
526 * @skb: current socket buffer containing buffer in progress
528 * This function updates next to clean. If the buffer is an EOP buffer
529 * this function exits returning false, otherwise it will place the
530 * sk_buff in the next buffer to be chained and return true indicating
531 * that this is in fact a non-EOP buffer.
533 static bool ixgbevf_is_non_eop(struct ixgbevf_ring
*rx_ring
,
534 union ixgbe_adv_rx_desc
*rx_desc
)
536 u32 ntc
= rx_ring
->next_to_clean
+ 1;
538 /* fetch, update, and store next to clean */
539 ntc
= (ntc
< rx_ring
->count
) ? ntc
: 0;
540 rx_ring
->next_to_clean
= ntc
;
542 prefetch(IXGBEVF_RX_DESC(rx_ring
, ntc
));
544 if (likely(ixgbevf_test_staterr(rx_desc
, IXGBE_RXD_STAT_EOP
)))
550 static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring
*rx_ring
,
551 struct ixgbevf_rx_buffer
*bi
)
553 struct page
*page
= bi
->page
;
554 dma_addr_t dma
= bi
->dma
;
556 /* since we are recycling buffers we should seldom need to alloc */
560 /* alloc new page for storage */
561 page
= dev_alloc_page();
562 if (unlikely(!page
)) {
563 rx_ring
->rx_stats
.alloc_rx_page_failed
++;
567 /* map page for use */
568 dma
= dma_map_page(rx_ring
->dev
, page
, 0,
569 PAGE_SIZE
, DMA_FROM_DEVICE
);
571 /* if mapping failed free memory back to system since
572 * there isn't much point in holding memory we can't use
574 if (dma_mapping_error(rx_ring
->dev
, dma
)) {
577 rx_ring
->rx_stats
.alloc_rx_buff_failed
++;
589 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
590 * @rx_ring: rx descriptor ring (for a specific queue) to setup buffers on
591 * @cleaned_count: number of buffers to replace
593 static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring
*rx_ring
,
596 union ixgbe_adv_rx_desc
*rx_desc
;
597 struct ixgbevf_rx_buffer
*bi
;
598 unsigned int i
= rx_ring
->next_to_use
;
600 /* nothing to do or no valid netdev defined */
601 if (!cleaned_count
|| !rx_ring
->netdev
)
604 rx_desc
= IXGBEVF_RX_DESC(rx_ring
, i
);
605 bi
= &rx_ring
->rx_buffer_info
[i
];
609 if (!ixgbevf_alloc_mapped_page(rx_ring
, bi
))
612 /* Refresh the desc even if pkt_addr didn't change
613 * because each write-back erases this info.
615 rx_desc
->read
.pkt_addr
= cpu_to_le64(bi
->dma
+ bi
->page_offset
);
621 rx_desc
= IXGBEVF_RX_DESC(rx_ring
, 0);
622 bi
= rx_ring
->rx_buffer_info
;
626 /* clear the hdr_addr for the next_to_use descriptor */
627 rx_desc
->read
.hdr_addr
= 0;
630 } while (cleaned_count
);
634 if (rx_ring
->next_to_use
!= i
) {
635 /* record the next descriptor to use */
636 rx_ring
->next_to_use
= i
;
638 /* update next to alloc since we have filled the ring */
639 rx_ring
->next_to_alloc
= i
;
641 /* Force memory writes to complete before letting h/w
642 * know there are new descriptors to fetch. (Only
643 * applicable for weak-ordered memory model archs,
647 ixgbevf_write_tail(rx_ring
, i
);
652 * ixgbevf_cleanup_headers - Correct corrupted or empty headers
653 * @rx_ring: rx descriptor ring packet is being transacted on
654 * @rx_desc: pointer to the EOP Rx descriptor
655 * @skb: pointer to current skb being fixed
657 * Check for corrupted packet headers caused by senders on the local L2
658 * embedded NIC switch not setting up their Tx Descriptors right. These
659 * should be very rare.
661 * Also address the case where we are pulling data in on pages only
662 * and as such no data is present in the skb header.
664 * In addition if skb is not at least 60 bytes we need to pad it so that
665 * it is large enough to qualify as a valid Ethernet frame.
667 * Returns true if an error was encountered and skb was freed.
669 static bool ixgbevf_cleanup_headers(struct ixgbevf_ring
*rx_ring
,
670 union ixgbe_adv_rx_desc
*rx_desc
,
673 /* verify that the packet does not have any known errors */
674 if (unlikely(ixgbevf_test_staterr(rx_desc
,
675 IXGBE_RXDADV_ERR_FRAME_ERR_MASK
))) {
676 struct net_device
*netdev
= rx_ring
->netdev
;
678 if (!(netdev
->features
& NETIF_F_RXALL
)) {
679 dev_kfree_skb_any(skb
);
684 /* if eth_skb_pad returns an error the skb was freed */
685 if (eth_skb_pad(skb
))
692 * ixgbevf_reuse_rx_page - page flip buffer and store it back on the ring
693 * @rx_ring: rx descriptor ring to store buffers on
694 * @old_buff: donor buffer to have page reused
696 * Synchronizes page for reuse by the adapter
698 static void ixgbevf_reuse_rx_page(struct ixgbevf_ring
*rx_ring
,
699 struct ixgbevf_rx_buffer
*old_buff
)
701 struct ixgbevf_rx_buffer
*new_buff
;
702 u16 nta
= rx_ring
->next_to_alloc
;
704 new_buff
= &rx_ring
->rx_buffer_info
[nta
];
706 /* update, and store next to alloc */
708 rx_ring
->next_to_alloc
= (nta
< rx_ring
->count
) ? nta
: 0;
710 /* transfer page from old buffer to new buffer */
711 new_buff
->page
= old_buff
->page
;
712 new_buff
->dma
= old_buff
->dma
;
713 new_buff
->page_offset
= old_buff
->page_offset
;
715 /* sync the buffer for use by the device */
716 dma_sync_single_range_for_device(rx_ring
->dev
, new_buff
->dma
,
717 new_buff
->page_offset
,
722 static inline bool ixgbevf_page_is_reserved(struct page
*page
)
724 return (page_to_nid(page
) != numa_mem_id()) || page
->pfmemalloc
;
728 * ixgbevf_add_rx_frag - Add contents of Rx buffer to sk_buff
729 * @rx_ring: rx descriptor ring to transact packets on
730 * @rx_buffer: buffer containing page to add
731 * @rx_desc: descriptor containing length of buffer written by hardware
732 * @skb: sk_buff to place the data into
734 * This function will add the data contained in rx_buffer->page to the skb.
735 * This is done either through a direct copy if the data in the buffer is
736 * less than the skb header size, otherwise it will just attach the page as
739 * The function will then update the page offset if necessary and return
740 * true if the buffer can be reused by the adapter.
742 static bool ixgbevf_add_rx_frag(struct ixgbevf_ring
*rx_ring
,
743 struct ixgbevf_rx_buffer
*rx_buffer
,
744 union ixgbe_adv_rx_desc
*rx_desc
,
747 struct page
*page
= rx_buffer
->page
;
748 unsigned char *va
= page_address(page
) + rx_buffer
->page_offset
;
749 unsigned int size
= le16_to_cpu(rx_desc
->wb
.upper
.length
);
750 #if (PAGE_SIZE < 8192)
751 unsigned int truesize
= IXGBEVF_RX_BUFSZ
;
753 unsigned int truesize
= ALIGN(size
, L1_CACHE_BYTES
);
755 unsigned int pull_len
;
757 if (unlikely(skb_is_nonlinear(skb
)))
760 if (likely(size
<= IXGBEVF_RX_HDR_SIZE
)) {
761 memcpy(__skb_put(skb
, size
), va
, ALIGN(size
, sizeof(long)));
763 /* page is not reserved, we can reuse buffer as is */
764 if (likely(!ixgbevf_page_is_reserved(page
)))
767 /* this page cannot be reused so discard it */
772 /* we need the header to contain the greater of either ETH_HLEN or
773 * 60 bytes if the skb->len is less than 60 for skb_pad.
775 pull_len
= eth_get_headlen(va
, IXGBEVF_RX_HDR_SIZE
);
777 /* align pull length to size of long to optimize memcpy performance */
778 memcpy(__skb_put(skb
, pull_len
), va
, ALIGN(pull_len
, sizeof(long)));
780 /* update all of the pointers */
785 skb_add_rx_frag(skb
, skb_shinfo(skb
)->nr_frags
, page
,
786 (unsigned long)va
& ~PAGE_MASK
, size
, truesize
);
788 /* avoid re-using remote pages */
789 if (unlikely(ixgbevf_page_is_reserved(page
)))
792 #if (PAGE_SIZE < 8192)
793 /* if we are only owner of page we can reuse it */
794 if (unlikely(page_count(page
) != 1))
797 /* flip page offset to other buffer */
798 rx_buffer
->page_offset
^= IXGBEVF_RX_BUFSZ
;
801 /* move offset up to the next cache line */
802 rx_buffer
->page_offset
+= truesize
;
804 if (rx_buffer
->page_offset
> (PAGE_SIZE
- IXGBEVF_RX_BUFSZ
))
808 /* Even if we own the page, we are not allowed to use atomic_set()
809 * This would break get_page_unless_zero() users.
811 atomic_inc(&page
->_count
);
816 static struct sk_buff
*ixgbevf_fetch_rx_buffer(struct ixgbevf_ring
*rx_ring
,
817 union ixgbe_adv_rx_desc
*rx_desc
,
820 struct ixgbevf_rx_buffer
*rx_buffer
;
823 rx_buffer
= &rx_ring
->rx_buffer_info
[rx_ring
->next_to_clean
];
824 page
= rx_buffer
->page
;
828 void *page_addr
= page_address(page
) +
829 rx_buffer
->page_offset
;
831 /* prefetch first cache line of first page */
833 #if L1_CACHE_BYTES < 128
834 prefetch(page_addr
+ L1_CACHE_BYTES
);
837 /* allocate a skb to store the frags */
838 skb
= netdev_alloc_skb_ip_align(rx_ring
->netdev
,
839 IXGBEVF_RX_HDR_SIZE
);
840 if (unlikely(!skb
)) {
841 rx_ring
->rx_stats
.alloc_rx_buff_failed
++;
845 /* we will be copying header into skb->data in
846 * pskb_may_pull so it is in our interest to prefetch
847 * it now to avoid a possible cache miss
849 prefetchw(skb
->data
);
852 /* we are reusing so sync this buffer for CPU use */
853 dma_sync_single_range_for_cpu(rx_ring
->dev
,
855 rx_buffer
->page_offset
,
859 /* pull page into skb */
860 if (ixgbevf_add_rx_frag(rx_ring
, rx_buffer
, rx_desc
, skb
)) {
861 /* hand second half of page back to the ring */
862 ixgbevf_reuse_rx_page(rx_ring
, rx_buffer
);
864 /* we are not reusing the buffer so unmap it */
865 dma_unmap_page(rx_ring
->dev
, rx_buffer
->dma
,
866 PAGE_SIZE
, DMA_FROM_DEVICE
);
869 /* clear contents of buffer_info */
871 rx_buffer
->page
= NULL
;
876 static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter
*adapter
,
879 struct ixgbe_hw
*hw
= &adapter
->hw
;
881 IXGBE_WRITE_REG(hw
, IXGBE_VTEIMS
, qmask
);
884 static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector
*q_vector
,
885 struct ixgbevf_ring
*rx_ring
,
888 unsigned int total_rx_bytes
= 0, total_rx_packets
= 0;
889 u16 cleaned_count
= ixgbevf_desc_unused(rx_ring
);
890 struct sk_buff
*skb
= rx_ring
->skb
;
892 while (likely(total_rx_packets
< budget
)) {
893 union ixgbe_adv_rx_desc
*rx_desc
;
895 /* return some buffers to hardware, one at a time is too slow */
896 if (cleaned_count
>= IXGBEVF_RX_BUFFER_WRITE
) {
897 ixgbevf_alloc_rx_buffers(rx_ring
, cleaned_count
);
901 rx_desc
= IXGBEVF_RX_DESC(rx_ring
, rx_ring
->next_to_clean
);
903 if (!ixgbevf_test_staterr(rx_desc
, IXGBE_RXD_STAT_DD
))
906 /* This memory barrier is needed to keep us from reading
907 * any other fields out of the rx_desc until we know the
908 * RXD_STAT_DD bit is set
912 /* retrieve a buffer from the ring */
913 skb
= ixgbevf_fetch_rx_buffer(rx_ring
, rx_desc
, skb
);
915 /* exit if we failed to retrieve a buffer */
921 /* fetch next buffer in frame if non-eop */
922 if (ixgbevf_is_non_eop(rx_ring
, rx_desc
))
925 /* verify the packet layout is correct */
926 if (ixgbevf_cleanup_headers(rx_ring
, rx_desc
, skb
)) {
931 /* probably a little skewed due to removing CRC */
932 total_rx_bytes
+= skb
->len
;
934 /* Workaround hardware that can't do proper VEPA multicast
937 if ((skb
->pkt_type
== PACKET_BROADCAST
||
938 skb
->pkt_type
== PACKET_MULTICAST
) &&
939 ether_addr_equal(rx_ring
->netdev
->dev_addr
,
940 eth_hdr(skb
)->h_source
)) {
941 dev_kfree_skb_irq(skb
);
945 /* populate checksum, VLAN, and protocol */
946 ixgbevf_process_skb_fields(rx_ring
, rx_desc
, skb
);
948 ixgbevf_rx_skb(q_vector
, skb
);
950 /* reset skb pointer */
953 /* update budget accounting */
957 /* place incomplete frames back on ring for completion */
960 u64_stats_update_begin(&rx_ring
->syncp
);
961 rx_ring
->stats
.packets
+= total_rx_packets
;
962 rx_ring
->stats
.bytes
+= total_rx_bytes
;
963 u64_stats_update_end(&rx_ring
->syncp
);
964 q_vector
->rx
.total_packets
+= total_rx_packets
;
965 q_vector
->rx
.total_bytes
+= total_rx_bytes
;
967 return total_rx_packets
;
971 * ixgbevf_poll - NAPI polling calback
972 * @napi: napi struct with our devices info in it
973 * @budget: amount of work driver is allowed to do this pass, in packets
975 * This function will clean more than one or more rings associated with a
978 static int ixgbevf_poll(struct napi_struct
*napi
, int budget
)
980 struct ixgbevf_q_vector
*q_vector
=
981 container_of(napi
, struct ixgbevf_q_vector
, napi
);
982 struct ixgbevf_adapter
*adapter
= q_vector
->adapter
;
983 struct ixgbevf_ring
*ring
;
985 bool clean_complete
= true;
987 ixgbevf_for_each_ring(ring
, q_vector
->tx
)
988 clean_complete
&= ixgbevf_clean_tx_irq(q_vector
, ring
);
990 #ifdef CONFIG_NET_RX_BUSY_POLL
991 if (!ixgbevf_qv_lock_napi(q_vector
))
995 /* attempt to distribute budget to each queue fairly, but don't allow
996 * the budget to go below 1 because we'll exit polling
998 if (q_vector
->rx
.count
> 1)
999 per_ring_budget
= max(budget
/q_vector
->rx
.count
, 1);
1001 per_ring_budget
= budget
;
1003 ixgbevf_for_each_ring(ring
, q_vector
->rx
)
1004 clean_complete
&= (ixgbevf_clean_rx_irq(q_vector
, ring
,
1008 #ifdef CONFIG_NET_RX_BUSY_POLL
1009 ixgbevf_qv_unlock_napi(q_vector
);
1012 /* If all work not completed, return budget and keep polling */
1013 if (!clean_complete
)
1015 /* all work done, exit the polling mode */
1016 napi_complete(napi
);
1017 if (adapter
->rx_itr_setting
& 1)
1018 ixgbevf_set_itr(q_vector
);
1019 if (!test_bit(__IXGBEVF_DOWN
, &adapter
->state
) &&
1020 !test_bit(__IXGBEVF_REMOVING
, &adapter
->state
))
1021 ixgbevf_irq_enable_queues(adapter
,
1022 1 << q_vector
->v_idx
);
1028 * ixgbevf_write_eitr - write VTEITR register in hardware specific way
1029 * @q_vector: structure containing interrupt and ring information
1031 void ixgbevf_write_eitr(struct ixgbevf_q_vector
*q_vector
)
1033 struct ixgbevf_adapter
*adapter
= q_vector
->adapter
;
1034 struct ixgbe_hw
*hw
= &adapter
->hw
;
1035 int v_idx
= q_vector
->v_idx
;
1036 u32 itr_reg
= q_vector
->itr
& IXGBE_MAX_EITR
;
1038 /* set the WDIS bit to not clear the timer bits and cause an
1039 * immediate assertion of the interrupt
1041 itr_reg
|= IXGBE_EITR_CNT_WDIS
;
1043 IXGBE_WRITE_REG(hw
, IXGBE_VTEITR(v_idx
), itr_reg
);
1046 #ifdef CONFIG_NET_RX_BUSY_POLL
1047 /* must be called with local_bh_disable()d */
1048 static int ixgbevf_busy_poll_recv(struct napi_struct
*napi
)
1050 struct ixgbevf_q_vector
*q_vector
=
1051 container_of(napi
, struct ixgbevf_q_vector
, napi
);
1052 struct ixgbevf_adapter
*adapter
= q_vector
->adapter
;
1053 struct ixgbevf_ring
*ring
;
1056 if (test_bit(__IXGBEVF_DOWN
, &adapter
->state
))
1057 return LL_FLUSH_FAILED
;
1059 if (!ixgbevf_qv_lock_poll(q_vector
))
1060 return LL_FLUSH_BUSY
;
1062 ixgbevf_for_each_ring(ring
, q_vector
->rx
) {
1063 found
= ixgbevf_clean_rx_irq(q_vector
, ring
, 4);
1064 #ifdef BP_EXTENDED_STATS
1066 ring
->stats
.cleaned
+= found
;
1068 ring
->stats
.misses
++;
1074 ixgbevf_qv_unlock_poll(q_vector
);
1078 #endif /* CONFIG_NET_RX_BUSY_POLL */
1081 * ixgbevf_configure_msix - Configure MSI-X hardware
1082 * @adapter: board private structure
1084 * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X
1087 static void ixgbevf_configure_msix(struct ixgbevf_adapter
*adapter
)
1089 struct ixgbevf_q_vector
*q_vector
;
1090 int q_vectors
, v_idx
;
1092 q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
1093 adapter
->eims_enable_mask
= 0;
1095 /* Populate the IVAR table and set the ITR values to the
1096 * corresponding register.
1098 for (v_idx
= 0; v_idx
< q_vectors
; v_idx
++) {
1099 struct ixgbevf_ring
*ring
;
1101 q_vector
= adapter
->q_vector
[v_idx
];
1103 ixgbevf_for_each_ring(ring
, q_vector
->rx
)
1104 ixgbevf_set_ivar(adapter
, 0, ring
->reg_idx
, v_idx
);
1106 ixgbevf_for_each_ring(ring
, q_vector
->tx
)
1107 ixgbevf_set_ivar(adapter
, 1, ring
->reg_idx
, v_idx
);
1109 if (q_vector
->tx
.ring
&& !q_vector
->rx
.ring
) {
1110 /* Tx only vector */
1111 if (adapter
->tx_itr_setting
== 1)
1112 q_vector
->itr
= IXGBE_10K_ITR
;
1114 q_vector
->itr
= adapter
->tx_itr_setting
;
1116 /* Rx or Rx/Tx vector */
1117 if (adapter
->rx_itr_setting
== 1)
1118 q_vector
->itr
= IXGBE_20K_ITR
;
1120 q_vector
->itr
= adapter
->rx_itr_setting
;
1123 /* add q_vector eims value to global eims_enable_mask */
1124 adapter
->eims_enable_mask
|= 1 << v_idx
;
1126 ixgbevf_write_eitr(q_vector
);
1129 ixgbevf_set_ivar(adapter
, -1, 1, v_idx
);
1130 /* setup eims_other and add value to global eims_enable_mask */
1131 adapter
->eims_other
= 1 << v_idx
;
1132 adapter
->eims_enable_mask
|= adapter
->eims_other
;
1135 enum latency_range
{
1139 latency_invalid
= 255
1143 * ixgbevf_update_itr - update the dynamic ITR value based on statistics
1144 * @q_vector: structure containing interrupt and ring information
1145 * @ring_container: structure containing ring performance data
1147 * Stores a new ITR value based on packets and byte
1148 * counts during the last interrupt. The advantage of per interrupt
1149 * computation is faster updates and more accurate ITR for the current
1150 * traffic pattern. Constants in this function were computed
1151 * based on theoretical maximum wire speed and thresholds were set based
1152 * on testing data as well as attempting to minimize response time
1153 * while increasing bulk throughput.
1155 static void ixgbevf_update_itr(struct ixgbevf_q_vector
*q_vector
,
1156 struct ixgbevf_ring_container
*ring_container
)
1158 int bytes
= ring_container
->total_bytes
;
1159 int packets
= ring_container
->total_packets
;
1162 u8 itr_setting
= ring_container
->itr
;
1167 /* simple throttle rate management
1168 * 0-20MB/s lowest (100000 ints/s)
1169 * 20-100MB/s low (20000 ints/s)
1170 * 100-1249MB/s bulk (8000 ints/s)
1172 /* what was last interrupt timeslice? */
1173 timepassed_us
= q_vector
->itr
>> 2;
1174 bytes_perint
= bytes
/ timepassed_us
; /* bytes/usec */
1176 switch (itr_setting
) {
1177 case lowest_latency
:
1178 if (bytes_perint
> 10)
1179 itr_setting
= low_latency
;
1182 if (bytes_perint
> 20)
1183 itr_setting
= bulk_latency
;
1184 else if (bytes_perint
<= 10)
1185 itr_setting
= lowest_latency
;
1188 if (bytes_perint
<= 20)
1189 itr_setting
= low_latency
;
1193 /* clear work counters since we have the values we need */
1194 ring_container
->total_bytes
= 0;
1195 ring_container
->total_packets
= 0;
1197 /* write updated itr to ring container */
1198 ring_container
->itr
= itr_setting
;
1201 static void ixgbevf_set_itr(struct ixgbevf_q_vector
*q_vector
)
1203 u32 new_itr
= q_vector
->itr
;
1206 ixgbevf_update_itr(q_vector
, &q_vector
->tx
);
1207 ixgbevf_update_itr(q_vector
, &q_vector
->rx
);
1209 current_itr
= max(q_vector
->rx
.itr
, q_vector
->tx
.itr
);
1211 switch (current_itr
) {
1212 /* counts and packets in update_itr are dependent on these numbers */
1213 case lowest_latency
:
1214 new_itr
= IXGBE_100K_ITR
;
1217 new_itr
= IXGBE_20K_ITR
;
1221 new_itr
= IXGBE_8K_ITR
;
1225 if (new_itr
!= q_vector
->itr
) {
1226 /* do an exponential smoothing */
1227 new_itr
= (10 * new_itr
* q_vector
->itr
) /
1228 ((9 * new_itr
) + q_vector
->itr
);
1230 /* save the algorithm value here */
1231 q_vector
->itr
= new_itr
;
1233 ixgbevf_write_eitr(q_vector
);
1237 static irqreturn_t
ixgbevf_msix_other(int irq
, void *data
)
1239 struct ixgbevf_adapter
*adapter
= data
;
1240 struct ixgbe_hw
*hw
= &adapter
->hw
;
1242 hw
->mac
.get_link_status
= 1;
1244 ixgbevf_service_event_schedule(adapter
);
1246 IXGBE_WRITE_REG(hw
, IXGBE_VTEIMS
, adapter
->eims_other
);
1252 * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues)
1254 * @data: pointer to our q_vector struct for this interrupt vector
1256 static irqreturn_t
ixgbevf_msix_clean_rings(int irq
, void *data
)
1258 struct ixgbevf_q_vector
*q_vector
= data
;
1260 /* EIAM disabled interrupts (on this vector) for us */
1261 if (q_vector
->rx
.ring
|| q_vector
->tx
.ring
)
1262 napi_schedule(&q_vector
->napi
);
1267 static inline void map_vector_to_rxq(struct ixgbevf_adapter
*a
, int v_idx
,
1270 struct ixgbevf_q_vector
*q_vector
= a
->q_vector
[v_idx
];
1272 a
->rx_ring
[r_idx
]->next
= q_vector
->rx
.ring
;
1273 q_vector
->rx
.ring
= a
->rx_ring
[r_idx
];
1274 q_vector
->rx
.count
++;
1277 static inline void map_vector_to_txq(struct ixgbevf_adapter
*a
, int v_idx
,
1280 struct ixgbevf_q_vector
*q_vector
= a
->q_vector
[v_idx
];
1282 a
->tx_ring
[t_idx
]->next
= q_vector
->tx
.ring
;
1283 q_vector
->tx
.ring
= a
->tx_ring
[t_idx
];
1284 q_vector
->tx
.count
++;
1288 * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors
1289 * @adapter: board private structure to initialize
1291 * This function maps descriptor rings to the queue-specific vectors
1292 * we were allotted through the MSI-X enabling code. Ideally, we'd have
1293 * one vector per ring/queue, but on a constrained vector budget, we
1294 * group the rings as "efficiently" as possible. You would add new
1295 * mapping configurations in here.
1297 static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter
*adapter
)
1301 int rxr_idx
= 0, txr_idx
= 0;
1302 int rxr_remaining
= adapter
->num_rx_queues
;
1303 int txr_remaining
= adapter
->num_tx_queues
;
1308 q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
1310 /* The ideal configuration...
1311 * We have enough vectors to map one per queue.
1313 if (q_vectors
== adapter
->num_rx_queues
+ adapter
->num_tx_queues
) {
1314 for (; rxr_idx
< rxr_remaining
; v_start
++, rxr_idx
++)
1315 map_vector_to_rxq(adapter
, v_start
, rxr_idx
);
1317 for (; txr_idx
< txr_remaining
; v_start
++, txr_idx
++)
1318 map_vector_to_txq(adapter
, v_start
, txr_idx
);
1322 /* If we don't have enough vectors for a 1-to-1
1323 * mapping, we'll have to group them so there are
1324 * multiple queues per vector.
1326 /* Re-adjusting *qpv takes care of the remainder. */
1327 for (i
= v_start
; i
< q_vectors
; i
++) {
1328 rqpv
= DIV_ROUND_UP(rxr_remaining
, q_vectors
- i
);
1329 for (j
= 0; j
< rqpv
; j
++) {
1330 map_vector_to_rxq(adapter
, i
, rxr_idx
);
1335 for (i
= v_start
; i
< q_vectors
; i
++) {
1336 tqpv
= DIV_ROUND_UP(txr_remaining
, q_vectors
- i
);
1337 for (j
= 0; j
< tqpv
; j
++) {
1338 map_vector_to_txq(adapter
, i
, txr_idx
);
1349 * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
1350 * @adapter: board private structure
1352 * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests
1353 * interrupts from the kernel.
1355 static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter
*adapter
)
1357 struct net_device
*netdev
= adapter
->netdev
;
1358 int q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
1362 for (vector
= 0; vector
< q_vectors
; vector
++) {
1363 struct ixgbevf_q_vector
*q_vector
= adapter
->q_vector
[vector
];
1364 struct msix_entry
*entry
= &adapter
->msix_entries
[vector
];
1366 if (q_vector
->tx
.ring
&& q_vector
->rx
.ring
) {
1367 snprintf(q_vector
->name
, sizeof(q_vector
->name
) - 1,
1368 "%s-%s-%d", netdev
->name
, "TxRx", ri
++);
1370 } else if (q_vector
->rx
.ring
) {
1371 snprintf(q_vector
->name
, sizeof(q_vector
->name
) - 1,
1372 "%s-%s-%d", netdev
->name
, "rx", ri
++);
1373 } else if (q_vector
->tx
.ring
) {
1374 snprintf(q_vector
->name
, sizeof(q_vector
->name
) - 1,
1375 "%s-%s-%d", netdev
->name
, "tx", ti
++);
1377 /* skip this unused q_vector */
1380 err
= request_irq(entry
->vector
, &ixgbevf_msix_clean_rings
, 0,
1381 q_vector
->name
, q_vector
);
1383 hw_dbg(&adapter
->hw
,
1384 "request_irq failed for MSIX interrupt Error: %d\n",
1386 goto free_queue_irqs
;
1390 err
= request_irq(adapter
->msix_entries
[vector
].vector
,
1391 &ixgbevf_msix_other
, 0, netdev
->name
, adapter
);
1393 hw_dbg(&adapter
->hw
, "request_irq for msix_other failed: %d\n",
1395 goto free_queue_irqs
;
1403 free_irq(adapter
->msix_entries
[vector
].vector
,
1404 adapter
->q_vector
[vector
]);
1406 /* This failure is non-recoverable - it indicates the system is
1407 * out of MSIX vector resources and the VF driver cannot run
1408 * without them. Set the number of msix vectors to zero
1409 * indicating that not enough can be allocated. The error
1410 * will be returned to the user indicating device open failed.
1411 * Any further attempts to force the driver to open will also
1412 * fail. The only way to recover is to unload the driver and
1413 * reload it again. If the system has recovered some MSIX
1414 * vectors then it may succeed.
1416 adapter
->num_msix_vectors
= 0;
1420 static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter
*adapter
)
1422 int i
, q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
1424 for (i
= 0; i
< q_vectors
; i
++) {
1425 struct ixgbevf_q_vector
*q_vector
= adapter
->q_vector
[i
];
1427 q_vector
->rx
.ring
= NULL
;
1428 q_vector
->tx
.ring
= NULL
;
1429 q_vector
->rx
.count
= 0;
1430 q_vector
->tx
.count
= 0;
1435 * ixgbevf_request_irq - initialize interrupts
1436 * @adapter: board private structure
1438 * Attempts to configure interrupts using the best available
1439 * capabilities of the hardware and kernel.
1441 static int ixgbevf_request_irq(struct ixgbevf_adapter
*adapter
)
1445 err
= ixgbevf_request_msix_irqs(adapter
);
1448 hw_dbg(&adapter
->hw
, "request_irq failed, Error %d\n", err
);
1453 static void ixgbevf_free_irq(struct ixgbevf_adapter
*adapter
)
1457 q_vectors
= adapter
->num_msix_vectors
;
1460 free_irq(adapter
->msix_entries
[i
].vector
, adapter
);
1463 for (; i
>= 0; i
--) {
1464 /* free only the irqs that were actually requested */
1465 if (!adapter
->q_vector
[i
]->rx
.ring
&&
1466 !adapter
->q_vector
[i
]->tx
.ring
)
1469 free_irq(adapter
->msix_entries
[i
].vector
,
1470 adapter
->q_vector
[i
]);
1473 ixgbevf_reset_q_vectors(adapter
);
1477 * ixgbevf_irq_disable - Mask off interrupt generation on the NIC
1478 * @adapter: board private structure
1480 static inline void ixgbevf_irq_disable(struct ixgbevf_adapter
*adapter
)
1482 struct ixgbe_hw
*hw
= &adapter
->hw
;
1485 IXGBE_WRITE_REG(hw
, IXGBE_VTEIAM
, 0);
1486 IXGBE_WRITE_REG(hw
, IXGBE_VTEIMC
, ~0);
1487 IXGBE_WRITE_REG(hw
, IXGBE_VTEIAC
, 0);
1489 IXGBE_WRITE_FLUSH(hw
);
1491 for (i
= 0; i
< adapter
->num_msix_vectors
; i
++)
1492 synchronize_irq(adapter
->msix_entries
[i
].vector
);
1496 * ixgbevf_irq_enable - Enable default interrupt generation settings
1497 * @adapter: board private structure
1499 static inline void ixgbevf_irq_enable(struct ixgbevf_adapter
*adapter
)
1501 struct ixgbe_hw
*hw
= &adapter
->hw
;
1503 IXGBE_WRITE_REG(hw
, IXGBE_VTEIAM
, adapter
->eims_enable_mask
);
1504 IXGBE_WRITE_REG(hw
, IXGBE_VTEIAC
, adapter
->eims_enable_mask
);
1505 IXGBE_WRITE_REG(hw
, IXGBE_VTEIMS
, adapter
->eims_enable_mask
);
1509 * ixgbevf_configure_tx_ring - Configure 82599 VF Tx ring after Reset
1510 * @adapter: board private structure
1511 * @ring: structure containing ring specific data
1513 * Configure the Tx descriptor ring after a reset.
1515 static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter
*adapter
,
1516 struct ixgbevf_ring
*ring
)
1518 struct ixgbe_hw
*hw
= &adapter
->hw
;
1519 u64 tdba
= ring
->dma
;
1521 u32 txdctl
= IXGBE_TXDCTL_ENABLE
;
1522 u8 reg_idx
= ring
->reg_idx
;
1524 /* disable queue to avoid issues while updating state */
1525 IXGBE_WRITE_REG(hw
, IXGBE_VFTXDCTL(reg_idx
), IXGBE_TXDCTL_SWFLSH
);
1526 IXGBE_WRITE_FLUSH(hw
);
1528 IXGBE_WRITE_REG(hw
, IXGBE_VFTDBAL(reg_idx
), tdba
& DMA_BIT_MASK(32));
1529 IXGBE_WRITE_REG(hw
, IXGBE_VFTDBAH(reg_idx
), tdba
>> 32);
1530 IXGBE_WRITE_REG(hw
, IXGBE_VFTDLEN(reg_idx
),
1531 ring
->count
* sizeof(union ixgbe_adv_tx_desc
));
1533 /* disable head writeback */
1534 IXGBE_WRITE_REG(hw
, IXGBE_VFTDWBAH(reg_idx
), 0);
1535 IXGBE_WRITE_REG(hw
, IXGBE_VFTDWBAL(reg_idx
), 0);
1537 /* enable relaxed ordering */
1538 IXGBE_WRITE_REG(hw
, IXGBE_VFDCA_TXCTRL(reg_idx
),
1539 (IXGBE_DCA_TXCTRL_DESC_RRO_EN
|
1540 IXGBE_DCA_TXCTRL_DATA_RRO_EN
));
1542 /* reset head and tail pointers */
1543 IXGBE_WRITE_REG(hw
, IXGBE_VFTDH(reg_idx
), 0);
1544 IXGBE_WRITE_REG(hw
, IXGBE_VFTDT(reg_idx
), 0);
1545 ring
->tail
= adapter
->io_addr
+ IXGBE_VFTDT(reg_idx
);
1547 /* reset ntu and ntc to place SW in sync with hardwdare */
1548 ring
->next_to_clean
= 0;
1549 ring
->next_to_use
= 0;
1551 /* In order to avoid issues WTHRESH + PTHRESH should always be equal
1552 * to or less than the number of on chip descriptors, which is
1555 txdctl
|= (8 << 16); /* WTHRESH = 8 */
1557 /* Setting PTHRESH to 32 both improves performance */
1558 txdctl
|= (1 << 8) | /* HTHRESH = 1 */
1559 32; /* PTHRESH = 32 */
1561 clear_bit(__IXGBEVF_HANG_CHECK_ARMED
, &ring
->state
);
1563 IXGBE_WRITE_REG(hw
, IXGBE_VFTXDCTL(reg_idx
), txdctl
);
1565 /* poll to verify queue is enabled */
1567 usleep_range(1000, 2000);
1568 txdctl
= IXGBE_READ_REG(hw
, IXGBE_VFTXDCTL(reg_idx
));
1569 } while (--wait_loop
&& !(txdctl
& IXGBE_TXDCTL_ENABLE
));
1571 pr_err("Could not enable Tx Queue %d\n", reg_idx
);
1575 * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
1576 * @adapter: board private structure
1578 * Configure the Tx unit of the MAC after a reset.
1580 static void ixgbevf_configure_tx(struct ixgbevf_adapter
*adapter
)
1584 /* Setup the HW Tx Head and Tail descriptor pointers */
1585 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
1586 ixgbevf_configure_tx_ring(adapter
, adapter
->tx_ring
[i
]);
1589 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1591 static void ixgbevf_configure_srrctl(struct ixgbevf_adapter
*adapter
, int index
)
1593 struct ixgbe_hw
*hw
= &adapter
->hw
;
1596 srrctl
= IXGBE_SRRCTL_DROP_EN
;
1598 srrctl
|= IXGBEVF_RX_HDR_SIZE
<< IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT
;
1599 srrctl
|= IXGBEVF_RX_BUFSZ
>> IXGBE_SRRCTL_BSIZEPKT_SHIFT
;
1600 srrctl
|= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF
;
1602 IXGBE_WRITE_REG(hw
, IXGBE_VFSRRCTL(index
), srrctl
);
1605 static void ixgbevf_setup_psrtype(struct ixgbevf_adapter
*adapter
)
1607 struct ixgbe_hw
*hw
= &adapter
->hw
;
1609 /* PSRTYPE must be initialized in 82599 */
1610 u32 psrtype
= IXGBE_PSRTYPE_TCPHDR
| IXGBE_PSRTYPE_UDPHDR
|
1611 IXGBE_PSRTYPE_IPV4HDR
| IXGBE_PSRTYPE_IPV6HDR
|
1612 IXGBE_PSRTYPE_L2HDR
;
1614 if (adapter
->num_rx_queues
> 1)
1617 IXGBE_WRITE_REG(hw
, IXGBE_VFPSRTYPE
, psrtype
);
1620 #define IXGBEVF_MAX_RX_DESC_POLL 10
1621 static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter
*adapter
,
1622 struct ixgbevf_ring
*ring
)
1624 struct ixgbe_hw
*hw
= &adapter
->hw
;
1625 int wait_loop
= IXGBEVF_MAX_RX_DESC_POLL
;
1627 u8 reg_idx
= ring
->reg_idx
;
1629 if (IXGBE_REMOVED(hw
->hw_addr
))
1631 rxdctl
= IXGBE_READ_REG(hw
, IXGBE_VFRXDCTL(reg_idx
));
1632 rxdctl
&= ~IXGBE_RXDCTL_ENABLE
;
1634 /* write value back with RXDCTL.ENABLE bit cleared */
1635 IXGBE_WRITE_REG(hw
, IXGBE_VFRXDCTL(reg_idx
), rxdctl
);
1637 /* the hardware may take up to 100us to really disable the Rx queue */
1640 rxdctl
= IXGBE_READ_REG(hw
, IXGBE_VFRXDCTL(reg_idx
));
1641 } while (--wait_loop
&& (rxdctl
& IXGBE_RXDCTL_ENABLE
));
1644 pr_err("RXDCTL.ENABLE queue %d not cleared while polling\n",
1648 static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter
*adapter
,
1649 struct ixgbevf_ring
*ring
)
1651 struct ixgbe_hw
*hw
= &adapter
->hw
;
1652 int wait_loop
= IXGBEVF_MAX_RX_DESC_POLL
;
1654 u8 reg_idx
= ring
->reg_idx
;
1656 if (IXGBE_REMOVED(hw
->hw_addr
))
1659 usleep_range(1000, 2000);
1660 rxdctl
= IXGBE_READ_REG(hw
, IXGBE_VFRXDCTL(reg_idx
));
1661 } while (--wait_loop
&& !(rxdctl
& IXGBE_RXDCTL_ENABLE
));
1664 pr_err("RXDCTL.ENABLE queue %d not set while polling\n",
1668 static void ixgbevf_setup_vfmrqc(struct ixgbevf_adapter
*adapter
)
1670 struct ixgbe_hw
*hw
= &adapter
->hw
;
1671 u32 vfmrqc
= 0, vfreta
= 0;
1673 u16 rss_i
= adapter
->num_rx_queues
;
1676 /* Fill out hash function seeds */
1677 netdev_rss_key_fill(rss_key
, sizeof(rss_key
));
1678 for (i
= 0; i
< 10; i
++)
1679 IXGBE_WRITE_REG(hw
, IXGBE_VFRSSRK(i
), rss_key
[i
]);
1681 /* Fill out redirection table */
1682 for (i
= 0, j
= 0; i
< 64; i
++, j
++) {
1685 vfreta
= (vfreta
<< 8) | (j
* 0x1);
1687 IXGBE_WRITE_REG(hw
, IXGBE_VFRETA(i
>> 2), vfreta
);
1690 /* Perform hash on these packet types */
1691 vfmrqc
|= IXGBE_VFMRQC_RSS_FIELD_IPV4
|
1692 IXGBE_VFMRQC_RSS_FIELD_IPV4_TCP
|
1693 IXGBE_VFMRQC_RSS_FIELD_IPV6
|
1694 IXGBE_VFMRQC_RSS_FIELD_IPV6_TCP
;
1696 vfmrqc
|= IXGBE_VFMRQC_RSSEN
;
1698 IXGBE_WRITE_REG(hw
, IXGBE_VFMRQC
, vfmrqc
);
1701 static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter
*adapter
,
1702 struct ixgbevf_ring
*ring
)
1704 struct ixgbe_hw
*hw
= &adapter
->hw
;
1705 u64 rdba
= ring
->dma
;
1707 u8 reg_idx
= ring
->reg_idx
;
1709 /* disable queue to avoid issues while updating state */
1710 rxdctl
= IXGBE_READ_REG(hw
, IXGBE_VFRXDCTL(reg_idx
));
1711 ixgbevf_disable_rx_queue(adapter
, ring
);
1713 IXGBE_WRITE_REG(hw
, IXGBE_VFRDBAL(reg_idx
), rdba
& DMA_BIT_MASK(32));
1714 IXGBE_WRITE_REG(hw
, IXGBE_VFRDBAH(reg_idx
), rdba
>> 32);
1715 IXGBE_WRITE_REG(hw
, IXGBE_VFRDLEN(reg_idx
),
1716 ring
->count
* sizeof(union ixgbe_adv_rx_desc
));
1718 /* enable relaxed ordering */
1719 IXGBE_WRITE_REG(hw
, IXGBE_VFDCA_RXCTRL(reg_idx
),
1720 IXGBE_DCA_RXCTRL_DESC_RRO_EN
);
1722 /* reset head and tail pointers */
1723 IXGBE_WRITE_REG(hw
, IXGBE_VFRDH(reg_idx
), 0);
1724 IXGBE_WRITE_REG(hw
, IXGBE_VFRDT(reg_idx
), 0);
1725 ring
->tail
= adapter
->io_addr
+ IXGBE_VFRDT(reg_idx
);
1727 /* reset ntu and ntc to place SW in sync with hardwdare */
1728 ring
->next_to_clean
= 0;
1729 ring
->next_to_use
= 0;
1730 ring
->next_to_alloc
= 0;
1732 ixgbevf_configure_srrctl(adapter
, reg_idx
);
1734 /* allow any size packet since we can handle overflow */
1735 rxdctl
&= ~IXGBE_RXDCTL_RLPML_EN
;
1737 rxdctl
|= IXGBE_RXDCTL_ENABLE
| IXGBE_RXDCTL_VME
;
1738 IXGBE_WRITE_REG(hw
, IXGBE_VFRXDCTL(reg_idx
), rxdctl
);
1740 ixgbevf_rx_desc_queue_enable(adapter
, ring
);
1741 ixgbevf_alloc_rx_buffers(ring
, ixgbevf_desc_unused(ring
));
1745 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
1746 * @adapter: board private structure
1748 * Configure the Rx unit of the MAC after a reset.
1750 static void ixgbevf_configure_rx(struct ixgbevf_adapter
*adapter
)
1753 struct ixgbe_hw
*hw
= &adapter
->hw
;
1754 struct net_device
*netdev
= adapter
->netdev
;
1756 ixgbevf_setup_psrtype(adapter
);
1757 if (hw
->mac
.type
>= ixgbe_mac_X550_vf
)
1758 ixgbevf_setup_vfmrqc(adapter
);
1760 /* notify the PF of our intent to use this size of frame */
1761 ixgbevf_rlpml_set_vf(hw
, netdev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
);
1763 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1764 * the Base and Length of the Rx Descriptor Ring
1766 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
1767 ixgbevf_configure_rx_ring(adapter
, adapter
->rx_ring
[i
]);
1770 static int ixgbevf_vlan_rx_add_vid(struct net_device
*netdev
,
1771 __be16 proto
, u16 vid
)
1773 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
1774 struct ixgbe_hw
*hw
= &adapter
->hw
;
1777 spin_lock_bh(&adapter
->mbx_lock
);
1779 /* add VID to filter table */
1780 err
= hw
->mac
.ops
.set_vfta(hw
, vid
, 0, true);
1782 spin_unlock_bh(&adapter
->mbx_lock
);
1784 /* translate error return types so error makes sense */
1785 if (err
== IXGBE_ERR_MBX
)
1788 if (err
== IXGBE_ERR_INVALID_ARGUMENT
)
1791 set_bit(vid
, adapter
->active_vlans
);
1796 static int ixgbevf_vlan_rx_kill_vid(struct net_device
*netdev
,
1797 __be16 proto
, u16 vid
)
1799 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
1800 struct ixgbe_hw
*hw
= &adapter
->hw
;
1801 int err
= -EOPNOTSUPP
;
1803 spin_lock_bh(&adapter
->mbx_lock
);
1805 /* remove VID from filter table */
1806 err
= hw
->mac
.ops
.set_vfta(hw
, vid
, 0, false);
1808 spin_unlock_bh(&adapter
->mbx_lock
);
1810 clear_bit(vid
, adapter
->active_vlans
);
1815 static void ixgbevf_restore_vlan(struct ixgbevf_adapter
*adapter
)
1819 for_each_set_bit(vid
, adapter
->active_vlans
, VLAN_N_VID
)
1820 ixgbevf_vlan_rx_add_vid(adapter
->netdev
,
1821 htons(ETH_P_8021Q
), vid
);
1824 static int ixgbevf_write_uc_addr_list(struct net_device
*netdev
)
1826 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
1827 struct ixgbe_hw
*hw
= &adapter
->hw
;
1830 if ((netdev_uc_count(netdev
)) > 10) {
1831 pr_err("Too many unicast filters - No Space\n");
1835 if (!netdev_uc_empty(netdev
)) {
1836 struct netdev_hw_addr
*ha
;
1838 netdev_for_each_uc_addr(ha
, netdev
) {
1839 hw
->mac
.ops
.set_uc_addr(hw
, ++count
, ha
->addr
);
1843 /* If the list is empty then send message to PF driver to
1844 * clear all MAC VLANs on this VF.
1846 hw
->mac
.ops
.set_uc_addr(hw
, 0, NULL
);
1853 * ixgbevf_set_rx_mode - Multicast and unicast set
1854 * @netdev: network interface device structure
1856 * The set_rx_method entry point is called whenever the multicast address
1857 * list, unicast address list or the network interface flags are updated.
1858 * This routine is responsible for configuring the hardware for proper
1859 * multicast mode and configuring requested unicast filters.
1861 static void ixgbevf_set_rx_mode(struct net_device
*netdev
)
1863 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
1864 struct ixgbe_hw
*hw
= &adapter
->hw
;
1866 spin_lock_bh(&adapter
->mbx_lock
);
1868 /* reprogram multicast list */
1869 hw
->mac
.ops
.update_mc_addr_list(hw
, netdev
);
1871 ixgbevf_write_uc_addr_list(netdev
);
1873 spin_unlock_bh(&adapter
->mbx_lock
);
1876 static void ixgbevf_napi_enable_all(struct ixgbevf_adapter
*adapter
)
1879 struct ixgbevf_q_vector
*q_vector
;
1880 int q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
1882 for (q_idx
= 0; q_idx
< q_vectors
; q_idx
++) {
1883 q_vector
= adapter
->q_vector
[q_idx
];
1884 #ifdef CONFIG_NET_RX_BUSY_POLL
1885 ixgbevf_qv_init_lock(adapter
->q_vector
[q_idx
]);
1887 napi_enable(&q_vector
->napi
);
1891 static void ixgbevf_napi_disable_all(struct ixgbevf_adapter
*adapter
)
1894 struct ixgbevf_q_vector
*q_vector
;
1895 int q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
1897 for (q_idx
= 0; q_idx
< q_vectors
; q_idx
++) {
1898 q_vector
= adapter
->q_vector
[q_idx
];
1899 napi_disable(&q_vector
->napi
);
1900 #ifdef CONFIG_NET_RX_BUSY_POLL
1901 while (!ixgbevf_qv_disable(adapter
->q_vector
[q_idx
])) {
1902 pr_info("QV %d locked\n", q_idx
);
1903 usleep_range(1000, 20000);
1905 #endif /* CONFIG_NET_RX_BUSY_POLL */
1909 static int ixgbevf_configure_dcb(struct ixgbevf_adapter
*adapter
)
1911 struct ixgbe_hw
*hw
= &adapter
->hw
;
1912 unsigned int def_q
= 0;
1913 unsigned int num_tcs
= 0;
1914 unsigned int num_rx_queues
= adapter
->num_rx_queues
;
1915 unsigned int num_tx_queues
= adapter
->num_tx_queues
;
1918 spin_lock_bh(&adapter
->mbx_lock
);
1920 /* fetch queue configuration from the PF */
1921 err
= ixgbevf_get_queues(hw
, &num_tcs
, &def_q
);
1923 spin_unlock_bh(&adapter
->mbx_lock
);
1929 /* we need only one Tx queue */
1932 /* update default Tx ring register index */
1933 adapter
->tx_ring
[0]->reg_idx
= def_q
;
1935 /* we need as many queues as traffic classes */
1936 num_rx_queues
= num_tcs
;
1939 /* if we have a bad config abort request queue reset */
1940 if ((adapter
->num_rx_queues
!= num_rx_queues
) ||
1941 (adapter
->num_tx_queues
!= num_tx_queues
)) {
1942 /* force mailbox timeout to prevent further messages */
1943 hw
->mbx
.timeout
= 0;
1945 /* wait for watchdog to come around and bail us out */
1946 adapter
->flags
|= IXGBEVF_FLAG_QUEUE_RESET_REQUESTED
;
1952 static void ixgbevf_configure(struct ixgbevf_adapter
*adapter
)
1954 ixgbevf_configure_dcb(adapter
);
1956 ixgbevf_set_rx_mode(adapter
->netdev
);
1958 ixgbevf_restore_vlan(adapter
);
1960 ixgbevf_configure_tx(adapter
);
1961 ixgbevf_configure_rx(adapter
);
1964 static void ixgbevf_save_reset_stats(struct ixgbevf_adapter
*adapter
)
1966 /* Only save pre-reset stats if there are some */
1967 if (adapter
->stats
.vfgprc
|| adapter
->stats
.vfgptc
) {
1968 adapter
->stats
.saved_reset_vfgprc
+= adapter
->stats
.vfgprc
-
1969 adapter
->stats
.base_vfgprc
;
1970 adapter
->stats
.saved_reset_vfgptc
+= adapter
->stats
.vfgptc
-
1971 adapter
->stats
.base_vfgptc
;
1972 adapter
->stats
.saved_reset_vfgorc
+= adapter
->stats
.vfgorc
-
1973 adapter
->stats
.base_vfgorc
;
1974 adapter
->stats
.saved_reset_vfgotc
+= adapter
->stats
.vfgotc
-
1975 adapter
->stats
.base_vfgotc
;
1976 adapter
->stats
.saved_reset_vfmprc
+= adapter
->stats
.vfmprc
-
1977 adapter
->stats
.base_vfmprc
;
1981 static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter
*adapter
)
1983 struct ixgbe_hw
*hw
= &adapter
->hw
;
1985 adapter
->stats
.last_vfgprc
= IXGBE_READ_REG(hw
, IXGBE_VFGPRC
);
1986 adapter
->stats
.last_vfgorc
= IXGBE_READ_REG(hw
, IXGBE_VFGORC_LSB
);
1987 adapter
->stats
.last_vfgorc
|=
1988 (((u64
)(IXGBE_READ_REG(hw
, IXGBE_VFGORC_MSB
))) << 32);
1989 adapter
->stats
.last_vfgptc
= IXGBE_READ_REG(hw
, IXGBE_VFGPTC
);
1990 adapter
->stats
.last_vfgotc
= IXGBE_READ_REG(hw
, IXGBE_VFGOTC_LSB
);
1991 adapter
->stats
.last_vfgotc
|=
1992 (((u64
)(IXGBE_READ_REG(hw
, IXGBE_VFGOTC_MSB
))) << 32);
1993 adapter
->stats
.last_vfmprc
= IXGBE_READ_REG(hw
, IXGBE_VFMPRC
);
1995 adapter
->stats
.base_vfgprc
= adapter
->stats
.last_vfgprc
;
1996 adapter
->stats
.base_vfgorc
= adapter
->stats
.last_vfgorc
;
1997 adapter
->stats
.base_vfgptc
= adapter
->stats
.last_vfgptc
;
1998 adapter
->stats
.base_vfgotc
= adapter
->stats
.last_vfgotc
;
1999 adapter
->stats
.base_vfmprc
= adapter
->stats
.last_vfmprc
;
2002 static void ixgbevf_negotiate_api(struct ixgbevf_adapter
*adapter
)
2004 struct ixgbe_hw
*hw
= &adapter
->hw
;
2005 int api
[] = { ixgbe_mbox_api_12
,
2008 ixgbe_mbox_api_unknown
};
2009 int err
= 0, idx
= 0;
2011 spin_lock_bh(&adapter
->mbx_lock
);
2013 while (api
[idx
] != ixgbe_mbox_api_unknown
) {
2014 err
= ixgbevf_negotiate_api_version(hw
, api
[idx
]);
2020 spin_unlock_bh(&adapter
->mbx_lock
);
2023 static void ixgbevf_up_complete(struct ixgbevf_adapter
*adapter
)
2025 struct net_device
*netdev
= adapter
->netdev
;
2026 struct ixgbe_hw
*hw
= &adapter
->hw
;
2028 ixgbevf_configure_msix(adapter
);
2030 spin_lock_bh(&adapter
->mbx_lock
);
2032 if (is_valid_ether_addr(hw
->mac
.addr
))
2033 hw
->mac
.ops
.set_rar(hw
, 0, hw
->mac
.addr
, 0);
2035 hw
->mac
.ops
.set_rar(hw
, 0, hw
->mac
.perm_addr
, 0);
2037 spin_unlock_bh(&adapter
->mbx_lock
);
2039 smp_mb__before_atomic();
2040 clear_bit(__IXGBEVF_DOWN
, &adapter
->state
);
2041 ixgbevf_napi_enable_all(adapter
);
2043 /* clear any pending interrupts, may auto mask */
2044 IXGBE_READ_REG(hw
, IXGBE_VTEICR
);
2045 ixgbevf_irq_enable(adapter
);
2047 /* enable transmits */
2048 netif_tx_start_all_queues(netdev
);
2050 ixgbevf_save_reset_stats(adapter
);
2051 ixgbevf_init_last_counter_stats(adapter
);
2053 hw
->mac
.get_link_status
= 1;
2054 mod_timer(&adapter
->service_timer
, jiffies
);
2057 void ixgbevf_up(struct ixgbevf_adapter
*adapter
)
2059 ixgbevf_configure(adapter
);
2061 ixgbevf_up_complete(adapter
);
2065 * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
2066 * @rx_ring: ring to free buffers from
2068 static void ixgbevf_clean_rx_ring(struct ixgbevf_ring
*rx_ring
)
2070 struct device
*dev
= rx_ring
->dev
;
2074 /* Free Rx ring sk_buff */
2076 dev_kfree_skb(rx_ring
->skb
);
2077 rx_ring
->skb
= NULL
;
2080 /* ring already cleared, nothing to do */
2081 if (!rx_ring
->rx_buffer_info
)
2084 /* Free all the Rx ring pages */
2085 for (i
= 0; i
< rx_ring
->count
; i
++) {
2086 struct ixgbevf_rx_buffer
*rx_buffer
;
2088 rx_buffer
= &rx_ring
->rx_buffer_info
[i
];
2090 dma_unmap_page(dev
, rx_buffer
->dma
,
2091 PAGE_SIZE
, DMA_FROM_DEVICE
);
2093 if (rx_buffer
->page
)
2094 __free_page(rx_buffer
->page
);
2095 rx_buffer
->page
= NULL
;
2098 size
= sizeof(struct ixgbevf_rx_buffer
) * rx_ring
->count
;
2099 memset(rx_ring
->rx_buffer_info
, 0, size
);
2101 /* Zero out the descriptor ring */
2102 memset(rx_ring
->desc
, 0, rx_ring
->size
);
2106 * ixgbevf_clean_tx_ring - Free Tx Buffers
2107 * @tx_ring: ring to be cleaned
2109 static void ixgbevf_clean_tx_ring(struct ixgbevf_ring
*tx_ring
)
2111 struct ixgbevf_tx_buffer
*tx_buffer_info
;
2115 if (!tx_ring
->tx_buffer_info
)
2118 /* Free all the Tx ring sk_buffs */
2119 for (i
= 0; i
< tx_ring
->count
; i
++) {
2120 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
2121 ixgbevf_unmap_and_free_tx_resource(tx_ring
, tx_buffer_info
);
2124 size
= sizeof(struct ixgbevf_tx_buffer
) * tx_ring
->count
;
2125 memset(tx_ring
->tx_buffer_info
, 0, size
);
2127 memset(tx_ring
->desc
, 0, tx_ring
->size
);
2131 * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues
2132 * @adapter: board private structure
2134 static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter
*adapter
)
2138 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
2139 ixgbevf_clean_rx_ring(adapter
->rx_ring
[i
]);
2143 * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues
2144 * @adapter: board private structure
2146 static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter
*adapter
)
2150 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
2151 ixgbevf_clean_tx_ring(adapter
->tx_ring
[i
]);
2154 void ixgbevf_down(struct ixgbevf_adapter
*adapter
)
2156 struct net_device
*netdev
= adapter
->netdev
;
2157 struct ixgbe_hw
*hw
= &adapter
->hw
;
2160 /* signal that we are down to the interrupt handler */
2161 if (test_and_set_bit(__IXGBEVF_DOWN
, &adapter
->state
))
2162 return; /* do nothing if already down */
2164 /* disable all enabled Rx queues */
2165 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
2166 ixgbevf_disable_rx_queue(adapter
, adapter
->rx_ring
[i
]);
2168 usleep_range(10000, 20000);
2170 netif_tx_stop_all_queues(netdev
);
2172 /* call carrier off first to avoid false dev_watchdog timeouts */
2173 netif_carrier_off(netdev
);
2174 netif_tx_disable(netdev
);
2176 ixgbevf_irq_disable(adapter
);
2178 ixgbevf_napi_disable_all(adapter
);
2180 del_timer_sync(&adapter
->service_timer
);
2182 /* disable transmits in the hardware now that interrupts are off */
2183 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
2184 u8 reg_idx
= adapter
->tx_ring
[i
]->reg_idx
;
2186 IXGBE_WRITE_REG(hw
, IXGBE_VFTXDCTL(reg_idx
),
2187 IXGBE_TXDCTL_SWFLSH
);
2190 if (!pci_channel_offline(adapter
->pdev
))
2191 ixgbevf_reset(adapter
);
2193 ixgbevf_clean_all_tx_rings(adapter
);
2194 ixgbevf_clean_all_rx_rings(adapter
);
2197 void ixgbevf_reinit_locked(struct ixgbevf_adapter
*adapter
)
2199 WARN_ON(in_interrupt());
2201 while (test_and_set_bit(__IXGBEVF_RESETTING
, &adapter
->state
))
2204 ixgbevf_down(adapter
);
2205 ixgbevf_up(adapter
);
2207 clear_bit(__IXGBEVF_RESETTING
, &adapter
->state
);
2210 void ixgbevf_reset(struct ixgbevf_adapter
*adapter
)
2212 struct ixgbe_hw
*hw
= &adapter
->hw
;
2213 struct net_device
*netdev
= adapter
->netdev
;
2215 if (hw
->mac
.ops
.reset_hw(hw
)) {
2216 hw_dbg(hw
, "PF still resetting\n");
2218 hw
->mac
.ops
.init_hw(hw
);
2219 ixgbevf_negotiate_api(adapter
);
2222 if (is_valid_ether_addr(adapter
->hw
.mac
.addr
)) {
2223 memcpy(netdev
->dev_addr
, adapter
->hw
.mac
.addr
,
2225 memcpy(netdev
->perm_addr
, adapter
->hw
.mac
.addr
,
2229 adapter
->last_reset
= jiffies
;
2232 static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter
*adapter
,
2235 int vector_threshold
;
2237 /* We'll want at least 2 (vector_threshold):
2238 * 1) TxQ[0] + RxQ[0] handler
2239 * 2) Other (Link Status Change, etc.)
2241 vector_threshold
= MIN_MSIX_COUNT
;
2243 /* The more we get, the more we will assign to Tx/Rx Cleanup
2244 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
2245 * Right now, we simply care about how many we'll get; we'll
2246 * set them up later while requesting irq's.
2248 vectors
= pci_enable_msix_range(adapter
->pdev
, adapter
->msix_entries
,
2249 vector_threshold
, vectors
);
2252 dev_err(&adapter
->pdev
->dev
,
2253 "Unable to allocate MSI-X interrupts\n");
2254 kfree(adapter
->msix_entries
);
2255 adapter
->msix_entries
= NULL
;
2259 /* Adjust for only the vectors we'll use, which is minimum
2260 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
2261 * vectors we were allocated.
2263 adapter
->num_msix_vectors
= vectors
;
2269 * ixgbevf_set_num_queues - Allocate queues for device, feature dependent
2270 * @adapter: board private structure to initialize
2272 * This is the top level queue allocation routine. The order here is very
2273 * important, starting with the "most" number of features turned on at once,
2274 * and ending with the smallest set of features. This way large combinations
2275 * can be allocated if they're turned on, and smaller combinations are the
2276 * fallthrough conditions.
2279 static void ixgbevf_set_num_queues(struct ixgbevf_adapter
*adapter
)
2281 struct ixgbe_hw
*hw
= &adapter
->hw
;
2282 unsigned int def_q
= 0;
2283 unsigned int num_tcs
= 0;
2286 /* Start with base case */
2287 adapter
->num_rx_queues
= 1;
2288 adapter
->num_tx_queues
= 1;
2290 spin_lock_bh(&adapter
->mbx_lock
);
2292 /* fetch queue configuration from the PF */
2293 err
= ixgbevf_get_queues(hw
, &num_tcs
, &def_q
);
2295 spin_unlock_bh(&adapter
->mbx_lock
);
2300 /* we need as many queues as traffic classes */
2302 adapter
->num_rx_queues
= num_tcs
;
2304 u16 rss
= min_t(u16
, num_online_cpus(), IXGBEVF_MAX_RSS_QUEUES
);
2306 switch (hw
->api_version
) {
2307 case ixgbe_mbox_api_11
:
2308 case ixgbe_mbox_api_12
:
2309 adapter
->num_rx_queues
= rss
;
2310 adapter
->num_tx_queues
= rss
;
2318 * ixgbevf_alloc_queues - Allocate memory for all rings
2319 * @adapter: board private structure to initialize
2321 * We allocate one ring per queue at run-time since we don't know the
2322 * number of queues at compile-time. The polling_netdev array is
2323 * intended for Multiqueue, but should work fine with a single queue.
2325 static int ixgbevf_alloc_queues(struct ixgbevf_adapter
*adapter
)
2327 struct ixgbevf_ring
*ring
;
2330 for (; tx
< adapter
->num_tx_queues
; tx
++) {
2331 ring
= kzalloc(sizeof(*ring
), GFP_KERNEL
);
2333 goto err_allocation
;
2335 ring
->dev
= &adapter
->pdev
->dev
;
2336 ring
->netdev
= adapter
->netdev
;
2337 ring
->count
= adapter
->tx_ring_count
;
2338 ring
->queue_index
= tx
;
2341 adapter
->tx_ring
[tx
] = ring
;
2344 for (; rx
< adapter
->num_rx_queues
; rx
++) {
2345 ring
= kzalloc(sizeof(*ring
), GFP_KERNEL
);
2347 goto err_allocation
;
2349 ring
->dev
= &adapter
->pdev
->dev
;
2350 ring
->netdev
= adapter
->netdev
;
2352 ring
->count
= adapter
->rx_ring_count
;
2353 ring
->queue_index
= rx
;
2356 adapter
->rx_ring
[rx
] = ring
;
2363 kfree(adapter
->tx_ring
[--tx
]);
2364 adapter
->tx_ring
[tx
] = NULL
;
2368 kfree(adapter
->rx_ring
[--rx
]);
2369 adapter
->rx_ring
[rx
] = NULL
;
2375 * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
2376 * @adapter: board private structure to initialize
2378 * Attempt to configure the interrupts using the best available
2379 * capabilities of the hardware and the kernel.
2381 static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter
*adapter
)
2383 struct net_device
*netdev
= adapter
->netdev
;
2385 int vector
, v_budget
;
2387 /* It's easy to be greedy for MSI-X vectors, but it really
2388 * doesn't do us much good if we have a lot more vectors
2389 * than CPU's. So let's be conservative and only ask for
2390 * (roughly) the same number of vectors as there are CPU's.
2391 * The default is to use pairs of vectors.
2393 v_budget
= max(adapter
->num_rx_queues
, adapter
->num_tx_queues
);
2394 v_budget
= min_t(int, v_budget
, num_online_cpus());
2395 v_budget
+= NON_Q_VECTORS
;
2397 /* A failure in MSI-X entry allocation isn't fatal, but it does
2398 * mean we disable MSI-X capabilities of the adapter.
2400 adapter
->msix_entries
= kcalloc(v_budget
,
2401 sizeof(struct msix_entry
), GFP_KERNEL
);
2402 if (!adapter
->msix_entries
) {
2407 for (vector
= 0; vector
< v_budget
; vector
++)
2408 adapter
->msix_entries
[vector
].entry
= vector
;
2410 err
= ixgbevf_acquire_msix_vectors(adapter
, v_budget
);
2414 err
= netif_set_real_num_tx_queues(netdev
, adapter
->num_tx_queues
);
2418 err
= netif_set_real_num_rx_queues(netdev
, adapter
->num_rx_queues
);
2425 * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors
2426 * @adapter: board private structure to initialize
2428 * We allocate one q_vector per queue interrupt. If allocation fails we
2431 static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter
*adapter
)
2433 int q_idx
, num_q_vectors
;
2434 struct ixgbevf_q_vector
*q_vector
;
2436 num_q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
2438 for (q_idx
= 0; q_idx
< num_q_vectors
; q_idx
++) {
2439 q_vector
= kzalloc(sizeof(struct ixgbevf_q_vector
), GFP_KERNEL
);
2442 q_vector
->adapter
= adapter
;
2443 q_vector
->v_idx
= q_idx
;
2444 netif_napi_add(adapter
->netdev
, &q_vector
->napi
,
2446 #ifdef CONFIG_NET_RX_BUSY_POLL
2447 napi_hash_add(&q_vector
->napi
);
2449 adapter
->q_vector
[q_idx
] = q_vector
;
2457 q_vector
= adapter
->q_vector
[q_idx
];
2458 #ifdef CONFIG_NET_RX_BUSY_POLL
2459 napi_hash_del(&q_vector
->napi
);
2461 netif_napi_del(&q_vector
->napi
);
2463 adapter
->q_vector
[q_idx
] = NULL
;
2469 * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors
2470 * @adapter: board private structure to initialize
2472 * This function frees the memory allocated to the q_vectors. In addition if
2473 * NAPI is enabled it will delete any references to the NAPI struct prior
2474 * to freeing the q_vector.
2476 static void ixgbevf_free_q_vectors(struct ixgbevf_adapter
*adapter
)
2478 int q_idx
, num_q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
2480 for (q_idx
= 0; q_idx
< num_q_vectors
; q_idx
++) {
2481 struct ixgbevf_q_vector
*q_vector
= adapter
->q_vector
[q_idx
];
2483 adapter
->q_vector
[q_idx
] = NULL
;
2484 #ifdef CONFIG_NET_RX_BUSY_POLL
2485 napi_hash_del(&q_vector
->napi
);
2487 netif_napi_del(&q_vector
->napi
);
2493 * ixgbevf_reset_interrupt_capability - Reset MSIX setup
2494 * @adapter: board private structure
2497 static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter
*adapter
)
2499 pci_disable_msix(adapter
->pdev
);
2500 kfree(adapter
->msix_entries
);
2501 adapter
->msix_entries
= NULL
;
2505 * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init
2506 * @adapter: board private structure to initialize
2509 static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter
*adapter
)
2513 /* Number of supported queues */
2514 ixgbevf_set_num_queues(adapter
);
2516 err
= ixgbevf_set_interrupt_capability(adapter
);
2518 hw_dbg(&adapter
->hw
,
2519 "Unable to setup interrupt capabilities\n");
2520 goto err_set_interrupt
;
2523 err
= ixgbevf_alloc_q_vectors(adapter
);
2525 hw_dbg(&adapter
->hw
, "Unable to allocate memory for queue vectors\n");
2526 goto err_alloc_q_vectors
;
2529 err
= ixgbevf_alloc_queues(adapter
);
2531 pr_err("Unable to allocate memory for queues\n");
2532 goto err_alloc_queues
;
2535 hw_dbg(&adapter
->hw
, "Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n",
2536 (adapter
->num_rx_queues
> 1) ? "Enabled" :
2537 "Disabled", adapter
->num_rx_queues
, adapter
->num_tx_queues
);
2539 set_bit(__IXGBEVF_DOWN
, &adapter
->state
);
2543 ixgbevf_free_q_vectors(adapter
);
2544 err_alloc_q_vectors
:
2545 ixgbevf_reset_interrupt_capability(adapter
);
2551 * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings
2552 * @adapter: board private structure to clear interrupt scheme on
2554 * We go through and clear interrupt specific resources and reset the structure
2555 * to pre-load conditions
2557 static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter
*adapter
)
2561 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
2562 kfree(adapter
->tx_ring
[i
]);
2563 adapter
->tx_ring
[i
] = NULL
;
2565 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
2566 kfree(adapter
->rx_ring
[i
]);
2567 adapter
->rx_ring
[i
] = NULL
;
2570 adapter
->num_tx_queues
= 0;
2571 adapter
->num_rx_queues
= 0;
2573 ixgbevf_free_q_vectors(adapter
);
2574 ixgbevf_reset_interrupt_capability(adapter
);
2578 * ixgbevf_sw_init - Initialize general software structures
2579 * @adapter: board private structure to initialize
2581 * ixgbevf_sw_init initializes the Adapter private data structure.
2582 * Fields are initialized based on PCI device information and
2583 * OS network device settings (MTU size).
2585 static int ixgbevf_sw_init(struct ixgbevf_adapter
*adapter
)
2587 struct ixgbe_hw
*hw
= &adapter
->hw
;
2588 struct pci_dev
*pdev
= adapter
->pdev
;
2589 struct net_device
*netdev
= adapter
->netdev
;
2592 /* PCI config space info */
2593 hw
->vendor_id
= pdev
->vendor
;
2594 hw
->device_id
= pdev
->device
;
2595 hw
->revision_id
= pdev
->revision
;
2596 hw
->subsystem_vendor_id
= pdev
->subsystem_vendor
;
2597 hw
->subsystem_device_id
= pdev
->subsystem_device
;
2599 hw
->mbx
.ops
.init_params(hw
);
2601 /* assume legacy case in which PF would only give VF 2 queues */
2602 hw
->mac
.max_tx_queues
= 2;
2603 hw
->mac
.max_rx_queues
= 2;
2605 /* lock to protect mailbox accesses */
2606 spin_lock_init(&adapter
->mbx_lock
);
2608 err
= hw
->mac
.ops
.reset_hw(hw
);
2610 dev_info(&pdev
->dev
,
2611 "PF still in reset state. Is the PF interface up?\n");
2613 err
= hw
->mac
.ops
.init_hw(hw
);
2615 pr_err("init_shared_code failed: %d\n", err
);
2618 ixgbevf_negotiate_api(adapter
);
2619 err
= hw
->mac
.ops
.get_mac_addr(hw
, hw
->mac
.addr
);
2621 dev_info(&pdev
->dev
, "Error reading MAC address\n");
2622 else if (is_zero_ether_addr(adapter
->hw
.mac
.addr
))
2623 dev_info(&pdev
->dev
,
2624 "MAC address not assigned by administrator.\n");
2625 memcpy(netdev
->dev_addr
, hw
->mac
.addr
, netdev
->addr_len
);
2628 if (!is_valid_ether_addr(netdev
->dev_addr
)) {
2629 dev_info(&pdev
->dev
, "Assigning random MAC address\n");
2630 eth_hw_addr_random(netdev
);
2631 memcpy(hw
->mac
.addr
, netdev
->dev_addr
, netdev
->addr_len
);
2634 /* Enable dynamic interrupt throttling rates */
2635 adapter
->rx_itr_setting
= 1;
2636 adapter
->tx_itr_setting
= 1;
2638 /* set default ring sizes */
2639 adapter
->tx_ring_count
= IXGBEVF_DEFAULT_TXD
;
2640 adapter
->rx_ring_count
= IXGBEVF_DEFAULT_RXD
;
2642 set_bit(__IXGBEVF_DOWN
, &adapter
->state
);
2649 #define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
2651 u32 current_counter = IXGBE_READ_REG(hw, reg); \
2652 if (current_counter < last_counter) \
2653 counter += 0x100000000LL; \
2654 last_counter = current_counter; \
2655 counter &= 0xFFFFFFFF00000000LL; \
2656 counter |= current_counter; \
2659 #define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
2661 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
2662 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
2663 u64 current_counter = (current_counter_msb << 32) | \
2664 current_counter_lsb; \
2665 if (current_counter < last_counter) \
2666 counter += 0x1000000000LL; \
2667 last_counter = current_counter; \
2668 counter &= 0xFFFFFFF000000000LL; \
2669 counter |= current_counter; \
2672 * ixgbevf_update_stats - Update the board statistics counters.
2673 * @adapter: board private structure
2675 void ixgbevf_update_stats(struct ixgbevf_adapter
*adapter
)
2677 struct ixgbe_hw
*hw
= &adapter
->hw
;
2680 if (test_bit(__IXGBEVF_DOWN
, &adapter
->state
) ||
2681 test_bit(__IXGBEVF_RESETTING
, &adapter
->state
))
2684 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC
, adapter
->stats
.last_vfgprc
,
2685 adapter
->stats
.vfgprc
);
2686 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC
, adapter
->stats
.last_vfgptc
,
2687 adapter
->stats
.vfgptc
);
2688 UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB
, IXGBE_VFGORC_MSB
,
2689 adapter
->stats
.last_vfgorc
,
2690 adapter
->stats
.vfgorc
);
2691 UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB
, IXGBE_VFGOTC_MSB
,
2692 adapter
->stats
.last_vfgotc
,
2693 adapter
->stats
.vfgotc
);
2694 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC
, adapter
->stats
.last_vfmprc
,
2695 adapter
->stats
.vfmprc
);
2697 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
2698 adapter
->hw_csum_rx_error
+=
2699 adapter
->rx_ring
[i
]->hw_csum_rx_error
;
2700 adapter
->rx_ring
[i
]->hw_csum_rx_error
= 0;
2705 * ixgbevf_service_timer - Timer Call-back
2706 * @data: pointer to adapter cast into an unsigned long
2708 static void ixgbevf_service_timer(unsigned long data
)
2710 struct ixgbevf_adapter
*adapter
= (struct ixgbevf_adapter
*)data
;
2712 /* Reset the timer */
2713 mod_timer(&adapter
->service_timer
, (HZ
* 2) + jiffies
);
2715 ixgbevf_service_event_schedule(adapter
);
2718 static void ixgbevf_reset_subtask(struct ixgbevf_adapter
*adapter
)
2720 if (!(adapter
->flags
& IXGBEVF_FLAG_RESET_REQUESTED
))
2723 adapter
->flags
&= ~IXGBEVF_FLAG_RESET_REQUESTED
;
2725 /* If we're already down or resetting, just bail */
2726 if (test_bit(__IXGBEVF_DOWN
, &adapter
->state
) ||
2727 test_bit(__IXGBEVF_RESETTING
, &adapter
->state
))
2730 adapter
->tx_timeout_count
++;
2732 ixgbevf_reinit_locked(adapter
);
2736 * ixgbevf_check_hang_subtask - check for hung queues and dropped interrupts
2737 * @adapter: pointer to the device adapter structure
2739 * This function serves two purposes. First it strobes the interrupt lines
2740 * in order to make certain interrupts are occurring. Secondly it sets the
2741 * bits needed to check for TX hangs. As a result we should immediately
2742 * determine if a hang has occurred.
2744 static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter
*adapter
)
2746 struct ixgbe_hw
*hw
= &adapter
->hw
;
2750 /* If we're down or resetting, just bail */
2751 if (test_bit(__IXGBEVF_DOWN
, &adapter
->state
) ||
2752 test_bit(__IXGBEVF_RESETTING
, &adapter
->state
))
2755 /* Force detection of hung controller */
2756 if (netif_carrier_ok(adapter
->netdev
)) {
2757 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
2758 set_check_for_tx_hang(adapter
->tx_ring
[i
]);
2761 /* get one bit for every active Tx/Rx interrupt vector */
2762 for (i
= 0; i
< adapter
->num_msix_vectors
- NON_Q_VECTORS
; i
++) {
2763 struct ixgbevf_q_vector
*qv
= adapter
->q_vector
[i
];
2765 if (qv
->rx
.ring
|| qv
->tx
.ring
)
2769 /* Cause software interrupt to ensure rings are cleaned */
2770 IXGBE_WRITE_REG(hw
, IXGBE_VTEICS
, eics
);
2774 * ixgbevf_watchdog_update_link - update the link status
2775 * @adapter: pointer to the device adapter structure
2777 static void ixgbevf_watchdog_update_link(struct ixgbevf_adapter
*adapter
)
2779 struct ixgbe_hw
*hw
= &adapter
->hw
;
2780 u32 link_speed
= adapter
->link_speed
;
2781 bool link_up
= adapter
->link_up
;
2784 spin_lock_bh(&adapter
->mbx_lock
);
2786 err
= hw
->mac
.ops
.check_link(hw
, &link_speed
, &link_up
, false);
2788 spin_unlock_bh(&adapter
->mbx_lock
);
2790 /* if check for link returns error we will need to reset */
2791 if (err
&& time_after(jiffies
, adapter
->last_reset
+ (10 * HZ
))) {
2792 adapter
->flags
|= IXGBEVF_FLAG_RESET_REQUESTED
;
2796 adapter
->link_up
= link_up
;
2797 adapter
->link_speed
= link_speed
;
2801 * ixgbevf_watchdog_link_is_up - update netif_carrier status and
2802 * print link up message
2803 * @adapter: pointer to the device adapter structure
2805 static void ixgbevf_watchdog_link_is_up(struct ixgbevf_adapter
*adapter
)
2807 struct net_device
*netdev
= adapter
->netdev
;
2809 /* only continue if link was previously down */
2810 if (netif_carrier_ok(netdev
))
2813 dev_info(&adapter
->pdev
->dev
, "NIC Link is Up %s\n",
2814 (adapter
->link_speed
== IXGBE_LINK_SPEED_10GB_FULL
) ?
2816 (adapter
->link_speed
== IXGBE_LINK_SPEED_1GB_FULL
) ?
2818 (adapter
->link_speed
== IXGBE_LINK_SPEED_100_FULL
) ?
2822 netif_carrier_on(netdev
);
2826 * ixgbevf_watchdog_link_is_down - update netif_carrier status and
2827 * print link down message
2828 * @adapter: pointer to the adapter structure
2830 static void ixgbevf_watchdog_link_is_down(struct ixgbevf_adapter
*adapter
)
2832 struct net_device
*netdev
= adapter
->netdev
;
2834 adapter
->link_speed
= 0;
2836 /* only continue if link was up previously */
2837 if (!netif_carrier_ok(netdev
))
2840 dev_info(&adapter
->pdev
->dev
, "NIC Link is Down\n");
2842 netif_carrier_off(netdev
);
2846 * ixgbevf_watchdog_subtask - worker thread to bring link up
2847 * @work: pointer to work_struct containing our data
2849 static void ixgbevf_watchdog_subtask(struct ixgbevf_adapter
*adapter
)
2851 /* if interface is down do nothing */
2852 if (test_bit(__IXGBEVF_DOWN
, &adapter
->state
) ||
2853 test_bit(__IXGBEVF_RESETTING
, &adapter
->state
))
2856 ixgbevf_watchdog_update_link(adapter
);
2858 if (adapter
->link_up
)
2859 ixgbevf_watchdog_link_is_up(adapter
);
2861 ixgbevf_watchdog_link_is_down(adapter
);
2863 ixgbevf_update_stats(adapter
);
2867 * ixgbevf_service_task - manages and runs subtasks
2868 * @work: pointer to work_struct containing our data
2870 static void ixgbevf_service_task(struct work_struct
*work
)
2872 struct ixgbevf_adapter
*adapter
= container_of(work
,
2873 struct ixgbevf_adapter
,
2875 struct ixgbe_hw
*hw
= &adapter
->hw
;
2877 if (IXGBE_REMOVED(hw
->hw_addr
)) {
2878 if (!test_bit(__IXGBEVF_DOWN
, &adapter
->state
)) {
2880 ixgbevf_down(adapter
);
2886 ixgbevf_queue_reset_subtask(adapter
);
2887 ixgbevf_reset_subtask(adapter
);
2888 ixgbevf_watchdog_subtask(adapter
);
2889 ixgbevf_check_hang_subtask(adapter
);
2891 ixgbevf_service_event_complete(adapter
);
2895 * ixgbevf_free_tx_resources - Free Tx Resources per Queue
2896 * @tx_ring: Tx descriptor ring for a specific queue
2898 * Free all transmit software resources
2900 void ixgbevf_free_tx_resources(struct ixgbevf_ring
*tx_ring
)
2902 ixgbevf_clean_tx_ring(tx_ring
);
2904 vfree(tx_ring
->tx_buffer_info
);
2905 tx_ring
->tx_buffer_info
= NULL
;
2907 /* if not set, then don't free */
2911 dma_free_coherent(tx_ring
->dev
, tx_ring
->size
, tx_ring
->desc
,
2914 tx_ring
->desc
= NULL
;
2918 * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues
2919 * @adapter: board private structure
2921 * Free all transmit software resources
2923 static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter
*adapter
)
2927 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
2928 if (adapter
->tx_ring
[i
]->desc
)
2929 ixgbevf_free_tx_resources(adapter
->tx_ring
[i
]);
2933 * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
2934 * @tx_ring: Tx descriptor ring (for a specific queue) to setup
2936 * Return 0 on success, negative on failure
2938 int ixgbevf_setup_tx_resources(struct ixgbevf_ring
*tx_ring
)
2942 size
= sizeof(struct ixgbevf_tx_buffer
) * tx_ring
->count
;
2943 tx_ring
->tx_buffer_info
= vzalloc(size
);
2944 if (!tx_ring
->tx_buffer_info
)
2947 /* round up to nearest 4K */
2948 tx_ring
->size
= tx_ring
->count
* sizeof(union ixgbe_adv_tx_desc
);
2949 tx_ring
->size
= ALIGN(tx_ring
->size
, 4096);
2951 tx_ring
->desc
= dma_alloc_coherent(tx_ring
->dev
, tx_ring
->size
,
2952 &tx_ring
->dma
, GFP_KERNEL
);
2959 vfree(tx_ring
->tx_buffer_info
);
2960 tx_ring
->tx_buffer_info
= NULL
;
2961 hw_dbg(&adapter
->hw
, "Unable to allocate memory for the transmit descriptor ring\n");
2966 * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources
2967 * @adapter: board private structure
2969 * If this function returns with an error, then it's possible one or
2970 * more of the rings is populated (while the rest are not). It is the
2971 * callers duty to clean those orphaned rings.
2973 * Return 0 on success, negative on failure
2975 static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter
*adapter
)
2979 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
2980 err
= ixgbevf_setup_tx_resources(adapter
->tx_ring
[i
]);
2983 hw_dbg(&adapter
->hw
, "Allocation for Tx Queue %u failed\n", i
);
2991 * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
2992 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
2994 * Returns 0 on success, negative on failure
2996 int ixgbevf_setup_rx_resources(struct ixgbevf_ring
*rx_ring
)
3000 size
= sizeof(struct ixgbevf_rx_buffer
) * rx_ring
->count
;
3001 rx_ring
->rx_buffer_info
= vzalloc(size
);
3002 if (!rx_ring
->rx_buffer_info
)
3005 /* Round up to nearest 4K */
3006 rx_ring
->size
= rx_ring
->count
* sizeof(union ixgbe_adv_rx_desc
);
3007 rx_ring
->size
= ALIGN(rx_ring
->size
, 4096);
3009 rx_ring
->desc
= dma_alloc_coherent(rx_ring
->dev
, rx_ring
->size
,
3010 &rx_ring
->dma
, GFP_KERNEL
);
3017 vfree(rx_ring
->rx_buffer_info
);
3018 rx_ring
->rx_buffer_info
= NULL
;
3019 dev_err(rx_ring
->dev
, "Unable to allocate memory for the Rx descriptor ring\n");
3024 * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources
3025 * @adapter: board private structure
3027 * If this function returns with an error, then it's possible one or
3028 * more of the rings is populated (while the rest are not). It is the
3029 * callers duty to clean those orphaned rings.
3031 * Return 0 on success, negative on failure
3033 static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter
*adapter
)
3037 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
3038 err
= ixgbevf_setup_rx_resources(adapter
->rx_ring
[i
]);
3041 hw_dbg(&adapter
->hw
, "Allocation for Rx Queue %u failed\n", i
);
3048 * ixgbevf_free_rx_resources - Free Rx Resources
3049 * @rx_ring: ring to clean the resources from
3051 * Free all receive software resources
3053 void ixgbevf_free_rx_resources(struct ixgbevf_ring
*rx_ring
)
3055 ixgbevf_clean_rx_ring(rx_ring
);
3057 vfree(rx_ring
->rx_buffer_info
);
3058 rx_ring
->rx_buffer_info
= NULL
;
3060 dma_free_coherent(rx_ring
->dev
, rx_ring
->size
, rx_ring
->desc
,
3063 rx_ring
->desc
= NULL
;
3067 * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues
3068 * @adapter: board private structure
3070 * Free all receive software resources
3072 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter
*adapter
)
3076 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
3077 if (adapter
->rx_ring
[i
]->desc
)
3078 ixgbevf_free_rx_resources(adapter
->rx_ring
[i
]);
3082 * ixgbevf_open - Called when a network interface is made active
3083 * @netdev: network interface device structure
3085 * Returns 0 on success, negative value on failure
3087 * The open entry point is called when a network interface is made
3088 * active by the system (IFF_UP). At this point all resources needed
3089 * for transmit and receive operations are allocated, the interrupt
3090 * handler is registered with the OS, the watchdog timer is started,
3091 * and the stack is notified that the interface is ready.
3093 static int ixgbevf_open(struct net_device
*netdev
)
3095 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3096 struct ixgbe_hw
*hw
= &adapter
->hw
;
3099 /* A previous failure to open the device because of a lack of
3100 * available MSIX vector resources may have reset the number
3101 * of msix vectors variable to zero. The only way to recover
3102 * is to unload/reload the driver and hope that the system has
3103 * been able to recover some MSIX vector resources.
3105 if (!adapter
->num_msix_vectors
)
3108 if (hw
->adapter_stopped
) {
3109 ixgbevf_reset(adapter
);
3110 /* if adapter is still stopped then PF isn't up and
3111 * the VF can't start.
3113 if (hw
->adapter_stopped
) {
3114 err
= IXGBE_ERR_MBX
;
3115 pr_err("Unable to start - perhaps the PF Driver isn't up yet\n");
3116 goto err_setup_reset
;
3120 /* disallow open during test */
3121 if (test_bit(__IXGBEVF_TESTING
, &adapter
->state
))
3124 netif_carrier_off(netdev
);
3126 /* allocate transmit descriptors */
3127 err
= ixgbevf_setup_all_tx_resources(adapter
);
3131 /* allocate receive descriptors */
3132 err
= ixgbevf_setup_all_rx_resources(adapter
);
3136 ixgbevf_configure(adapter
);
3138 /* Map the Tx/Rx rings to the vectors we were allotted.
3139 * if request_irq will be called in this function map_rings
3140 * must be called *before* up_complete
3142 ixgbevf_map_rings_to_vectors(adapter
);
3144 err
= ixgbevf_request_irq(adapter
);
3148 ixgbevf_up_complete(adapter
);
3153 ixgbevf_down(adapter
);
3155 ixgbevf_free_all_rx_resources(adapter
);
3157 ixgbevf_free_all_tx_resources(adapter
);
3158 ixgbevf_reset(adapter
);
3166 * ixgbevf_close - Disables a network interface
3167 * @netdev: network interface device structure
3169 * Returns 0, this is not allowed to fail
3171 * The close entry point is called when an interface is de-activated
3172 * by the OS. The hardware is still under the drivers control, but
3173 * needs to be disabled. A global MAC reset is issued to stop the
3174 * hardware, and all transmit and receive resources are freed.
3176 static int ixgbevf_close(struct net_device
*netdev
)
3178 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3180 ixgbevf_down(adapter
);
3181 ixgbevf_free_irq(adapter
);
3183 ixgbevf_free_all_tx_resources(adapter
);
3184 ixgbevf_free_all_rx_resources(adapter
);
3189 static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter
*adapter
)
3191 struct net_device
*dev
= adapter
->netdev
;
3193 if (!(adapter
->flags
& IXGBEVF_FLAG_QUEUE_RESET_REQUESTED
))
3196 adapter
->flags
&= ~IXGBEVF_FLAG_QUEUE_RESET_REQUESTED
;
3198 /* if interface is down do nothing */
3199 if (test_bit(__IXGBEVF_DOWN
, &adapter
->state
) ||
3200 test_bit(__IXGBEVF_RESETTING
, &adapter
->state
))
3203 /* Hardware has to reinitialize queues and interrupts to
3204 * match packet buffer alignment. Unfortunately, the
3205 * hardware is not flexible enough to do this dynamically.
3207 if (netif_running(dev
))
3210 ixgbevf_clear_interrupt_scheme(adapter
);
3211 ixgbevf_init_interrupt_scheme(adapter
);
3213 if (netif_running(dev
))
3217 static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring
*tx_ring
,
3218 u32 vlan_macip_lens
, u32 type_tucmd
,
3221 struct ixgbe_adv_tx_context_desc
*context_desc
;
3222 u16 i
= tx_ring
->next_to_use
;
3224 context_desc
= IXGBEVF_TX_CTXTDESC(tx_ring
, i
);
3227 tx_ring
->next_to_use
= (i
< tx_ring
->count
) ? i
: 0;
3229 /* set bits to identify this as an advanced context descriptor */
3230 type_tucmd
|= IXGBE_TXD_CMD_DEXT
| IXGBE_ADVTXD_DTYP_CTXT
;
3232 context_desc
->vlan_macip_lens
= cpu_to_le32(vlan_macip_lens
);
3233 context_desc
->seqnum_seed
= 0;
3234 context_desc
->type_tucmd_mlhl
= cpu_to_le32(type_tucmd
);
3235 context_desc
->mss_l4len_idx
= cpu_to_le32(mss_l4len_idx
);
3238 static int ixgbevf_tso(struct ixgbevf_ring
*tx_ring
,
3239 struct ixgbevf_tx_buffer
*first
,
3242 struct sk_buff
*skb
= first
->skb
;
3243 u32 vlan_macip_lens
, type_tucmd
;
3244 u32 mss_l4len_idx
, l4len
;
3247 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
3250 if (!skb_is_gso(skb
))
3253 err
= skb_cow_head(skb
, 0);
3257 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
3258 type_tucmd
= IXGBE_ADVTXD_TUCMD_L4T_TCP
;
3260 if (first
->protocol
== htons(ETH_P_IP
)) {
3261 struct iphdr
*iph
= ip_hdr(skb
);
3265 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(iph
->saddr
,
3269 type_tucmd
|= IXGBE_ADVTXD_TUCMD_IPV4
;
3270 first
->tx_flags
|= IXGBE_TX_FLAGS_TSO
|
3271 IXGBE_TX_FLAGS_CSUM
|
3272 IXGBE_TX_FLAGS_IPV4
;
3273 } else if (skb_is_gso_v6(skb
)) {
3274 ipv6_hdr(skb
)->payload_len
= 0;
3275 tcp_hdr(skb
)->check
=
3276 ~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
3277 &ipv6_hdr(skb
)->daddr
,
3279 first
->tx_flags
|= IXGBE_TX_FLAGS_TSO
|
3280 IXGBE_TX_FLAGS_CSUM
;
3283 /* compute header lengths */
3284 l4len
= tcp_hdrlen(skb
);
3286 *hdr_len
= skb_transport_offset(skb
) + l4len
;
3288 /* update GSO size and bytecount with header size */
3289 first
->gso_segs
= skb_shinfo(skb
)->gso_segs
;
3290 first
->bytecount
+= (first
->gso_segs
- 1) * *hdr_len
;
3292 /* mss_l4len_id: use 1 as index for TSO */
3293 mss_l4len_idx
= l4len
<< IXGBE_ADVTXD_L4LEN_SHIFT
;
3294 mss_l4len_idx
|= skb_shinfo(skb
)->gso_size
<< IXGBE_ADVTXD_MSS_SHIFT
;
3295 mss_l4len_idx
|= 1 << IXGBE_ADVTXD_IDX_SHIFT
;
3297 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
3298 vlan_macip_lens
= skb_network_header_len(skb
);
3299 vlan_macip_lens
|= skb_network_offset(skb
) << IXGBE_ADVTXD_MACLEN_SHIFT
;
3300 vlan_macip_lens
|= first
->tx_flags
& IXGBE_TX_FLAGS_VLAN_MASK
;
3302 ixgbevf_tx_ctxtdesc(tx_ring
, vlan_macip_lens
,
3303 type_tucmd
, mss_l4len_idx
);
3308 static void ixgbevf_tx_csum(struct ixgbevf_ring
*tx_ring
,
3309 struct ixgbevf_tx_buffer
*first
)
3311 struct sk_buff
*skb
= first
->skb
;
3312 u32 vlan_macip_lens
= 0;
3313 u32 mss_l4len_idx
= 0;
3316 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
3319 switch (first
->protocol
) {
3320 case htons(ETH_P_IP
):
3321 vlan_macip_lens
|= skb_network_header_len(skb
);
3322 type_tucmd
|= IXGBE_ADVTXD_TUCMD_IPV4
;
3323 l4_hdr
= ip_hdr(skb
)->protocol
;
3325 case htons(ETH_P_IPV6
):
3326 vlan_macip_lens
|= skb_network_header_len(skb
);
3327 l4_hdr
= ipv6_hdr(skb
)->nexthdr
;
3330 if (unlikely(net_ratelimit())) {
3331 dev_warn(tx_ring
->dev
,
3332 "partial checksum but proto=%x!\n",
3340 type_tucmd
|= IXGBE_ADVTXD_TUCMD_L4T_TCP
;
3341 mss_l4len_idx
= tcp_hdrlen(skb
) <<
3342 IXGBE_ADVTXD_L4LEN_SHIFT
;
3345 type_tucmd
|= IXGBE_ADVTXD_TUCMD_L4T_SCTP
;
3346 mss_l4len_idx
= sizeof(struct sctphdr
) <<
3347 IXGBE_ADVTXD_L4LEN_SHIFT
;
3350 mss_l4len_idx
= sizeof(struct udphdr
) <<
3351 IXGBE_ADVTXD_L4LEN_SHIFT
;
3354 if (unlikely(net_ratelimit())) {
3355 dev_warn(tx_ring
->dev
,
3356 "partial checksum but l4 proto=%x!\n",
3362 /* update TX checksum flag */
3363 first
->tx_flags
|= IXGBE_TX_FLAGS_CSUM
;
3366 /* vlan_macip_lens: MACLEN, VLAN tag */
3367 vlan_macip_lens
|= skb_network_offset(skb
) << IXGBE_ADVTXD_MACLEN_SHIFT
;
3368 vlan_macip_lens
|= first
->tx_flags
& IXGBE_TX_FLAGS_VLAN_MASK
;
3370 ixgbevf_tx_ctxtdesc(tx_ring
, vlan_macip_lens
,
3371 type_tucmd
, mss_l4len_idx
);
3374 static __le32
ixgbevf_tx_cmd_type(u32 tx_flags
)
3376 /* set type for advanced descriptor with frame checksum insertion */
3377 __le32 cmd_type
= cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA
|
3378 IXGBE_ADVTXD_DCMD_IFCS
|
3379 IXGBE_ADVTXD_DCMD_DEXT
);
3381 /* set HW VLAN bit if VLAN is present */
3382 if (tx_flags
& IXGBE_TX_FLAGS_VLAN
)
3383 cmd_type
|= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE
);
3385 /* set segmentation enable bits for TSO/FSO */
3386 if (tx_flags
& IXGBE_TX_FLAGS_TSO
)
3387 cmd_type
|= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE
);
3392 static void ixgbevf_tx_olinfo_status(union ixgbe_adv_tx_desc
*tx_desc
,
3393 u32 tx_flags
, unsigned int paylen
)
3395 __le32 olinfo_status
= cpu_to_le32(paylen
<< IXGBE_ADVTXD_PAYLEN_SHIFT
);
3397 /* enable L4 checksum for TSO and TX checksum offload */
3398 if (tx_flags
& IXGBE_TX_FLAGS_CSUM
)
3399 olinfo_status
|= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM
);
3401 /* enble IPv4 checksum for TSO */
3402 if (tx_flags
& IXGBE_TX_FLAGS_IPV4
)
3403 olinfo_status
|= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM
);
3405 /* use index 1 context for TSO/FSO/FCOE */
3406 if (tx_flags
& IXGBE_TX_FLAGS_TSO
)
3407 olinfo_status
|= cpu_to_le32(1 << IXGBE_ADVTXD_IDX_SHIFT
);
3409 /* Check Context must be set if Tx switch is enabled, which it
3410 * always is for case where virtual functions are running
3412 olinfo_status
|= cpu_to_le32(IXGBE_ADVTXD_CC
);
3414 tx_desc
->read
.olinfo_status
= olinfo_status
;
3417 static void ixgbevf_tx_map(struct ixgbevf_ring
*tx_ring
,
3418 struct ixgbevf_tx_buffer
*first
,
3422 struct sk_buff
*skb
= first
->skb
;
3423 struct ixgbevf_tx_buffer
*tx_buffer
;
3424 union ixgbe_adv_tx_desc
*tx_desc
;
3425 struct skb_frag_struct
*frag
= &skb_shinfo(skb
)->frags
[0];
3426 unsigned int data_len
= skb
->data_len
;
3427 unsigned int size
= skb_headlen(skb
);
3428 unsigned int paylen
= skb
->len
- hdr_len
;
3429 u32 tx_flags
= first
->tx_flags
;
3431 u16 i
= tx_ring
->next_to_use
;
3433 tx_desc
= IXGBEVF_TX_DESC(tx_ring
, i
);
3435 ixgbevf_tx_olinfo_status(tx_desc
, tx_flags
, paylen
);
3436 cmd_type
= ixgbevf_tx_cmd_type(tx_flags
);
3438 dma
= dma_map_single(tx_ring
->dev
, skb
->data
, size
, DMA_TO_DEVICE
);
3439 if (dma_mapping_error(tx_ring
->dev
, dma
))
3442 /* record length, and DMA address */
3443 dma_unmap_len_set(first
, len
, size
);
3444 dma_unmap_addr_set(first
, dma
, dma
);
3446 tx_desc
->read
.buffer_addr
= cpu_to_le64(dma
);
3449 while (unlikely(size
> IXGBE_MAX_DATA_PER_TXD
)) {
3450 tx_desc
->read
.cmd_type_len
=
3451 cmd_type
| cpu_to_le32(IXGBE_MAX_DATA_PER_TXD
);
3455 if (i
== tx_ring
->count
) {
3456 tx_desc
= IXGBEVF_TX_DESC(tx_ring
, 0);
3460 dma
+= IXGBE_MAX_DATA_PER_TXD
;
3461 size
-= IXGBE_MAX_DATA_PER_TXD
;
3463 tx_desc
->read
.buffer_addr
= cpu_to_le64(dma
);
3464 tx_desc
->read
.olinfo_status
= 0;
3467 if (likely(!data_len
))
3470 tx_desc
->read
.cmd_type_len
= cmd_type
| cpu_to_le32(size
);
3474 if (i
== tx_ring
->count
) {
3475 tx_desc
= IXGBEVF_TX_DESC(tx_ring
, 0);
3479 size
= skb_frag_size(frag
);
3482 dma
= skb_frag_dma_map(tx_ring
->dev
, frag
, 0, size
,
3484 if (dma_mapping_error(tx_ring
->dev
, dma
))
3487 tx_buffer
= &tx_ring
->tx_buffer_info
[i
];
3488 dma_unmap_len_set(tx_buffer
, len
, size
);
3489 dma_unmap_addr_set(tx_buffer
, dma
, dma
);
3491 tx_desc
->read
.buffer_addr
= cpu_to_le64(dma
);
3492 tx_desc
->read
.olinfo_status
= 0;
3497 /* write last descriptor with RS and EOP bits */
3498 cmd_type
|= cpu_to_le32(size
) | cpu_to_le32(IXGBE_TXD_CMD
);
3499 tx_desc
->read
.cmd_type_len
= cmd_type
;
3501 /* set the timestamp */
3502 first
->time_stamp
= jiffies
;
3504 /* Force memory writes to complete before letting h/w know there
3505 * are new descriptors to fetch. (Only applicable for weak-ordered
3506 * memory model archs, such as IA-64).
3508 * We also need this memory barrier (wmb) to make certain all of the
3509 * status bits have been updated before next_to_watch is written.
3513 /* set next_to_watch value indicating a packet is present */
3514 first
->next_to_watch
= tx_desc
;
3517 if (i
== tx_ring
->count
)
3520 tx_ring
->next_to_use
= i
;
3522 /* notify HW of packet */
3523 ixgbevf_write_tail(tx_ring
, i
);
3527 dev_err(tx_ring
->dev
, "TX DMA map failed\n");
3529 /* clear dma mappings for failed tx_buffer_info map */
3531 tx_buffer
= &tx_ring
->tx_buffer_info
[i
];
3532 ixgbevf_unmap_and_free_tx_resource(tx_ring
, tx_buffer
);
3533 if (tx_buffer
== first
)
3540 tx_ring
->next_to_use
= i
;
3543 static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring
*tx_ring
, int size
)
3545 netif_stop_subqueue(tx_ring
->netdev
, tx_ring
->queue_index
);
3546 /* Herbert's original patch had:
3547 * smp_mb__after_netif_stop_queue();
3548 * but since that doesn't exist yet, just open code it.
3552 /* We need to check again in a case another CPU has just
3553 * made room available.
3555 if (likely(ixgbevf_desc_unused(tx_ring
) < size
))
3558 /* A reprieve! - use start_queue because it doesn't call schedule */
3559 netif_start_subqueue(tx_ring
->netdev
, tx_ring
->queue_index
);
3560 ++tx_ring
->tx_stats
.restart_queue
;
3565 static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring
*tx_ring
, int size
)
3567 if (likely(ixgbevf_desc_unused(tx_ring
) >= size
))
3569 return __ixgbevf_maybe_stop_tx(tx_ring
, size
);
3572 static int ixgbevf_xmit_frame(struct sk_buff
*skb
, struct net_device
*netdev
)
3574 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3575 struct ixgbevf_tx_buffer
*first
;
3576 struct ixgbevf_ring
*tx_ring
;
3579 u16 count
= TXD_USE_COUNT(skb_headlen(skb
));
3580 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
3584 u8
*dst_mac
= skb_header_pointer(skb
, 0, 0, NULL
);
3586 if (!dst_mac
|| is_link_local_ether_addr(dst_mac
)) {
3587 dev_kfree_skb_any(skb
);
3588 return NETDEV_TX_OK
;
3591 tx_ring
= adapter
->tx_ring
[skb
->queue_mapping
];
3593 /* need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
3594 * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD,
3595 * + 2 desc gap to keep tail from touching head,
3596 * + 1 desc for context descriptor,
3597 * otherwise try next time
3599 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
3600 for (f
= 0; f
< skb_shinfo(skb
)->nr_frags
; f
++)
3601 count
+= TXD_USE_COUNT(skb_shinfo(skb
)->frags
[f
].size
);
3603 count
+= skb_shinfo(skb
)->nr_frags
;
3605 if (ixgbevf_maybe_stop_tx(tx_ring
, count
+ 3)) {
3606 tx_ring
->tx_stats
.tx_busy
++;
3607 return NETDEV_TX_BUSY
;
3610 /* record the location of the first descriptor for this packet */
3611 first
= &tx_ring
->tx_buffer_info
[tx_ring
->next_to_use
];
3613 first
->bytecount
= skb
->len
;
3614 first
->gso_segs
= 1;
3616 if (skb_vlan_tag_present(skb
)) {
3617 tx_flags
|= skb_vlan_tag_get(skb
);
3618 tx_flags
<<= IXGBE_TX_FLAGS_VLAN_SHIFT
;
3619 tx_flags
|= IXGBE_TX_FLAGS_VLAN
;
3622 /* record initial flags and protocol */
3623 first
->tx_flags
= tx_flags
;
3624 first
->protocol
= vlan_get_protocol(skb
);
3626 tso
= ixgbevf_tso(tx_ring
, first
, &hdr_len
);
3630 ixgbevf_tx_csum(tx_ring
, first
);
3632 ixgbevf_tx_map(tx_ring
, first
, hdr_len
);
3634 ixgbevf_maybe_stop_tx(tx_ring
, DESC_NEEDED
);
3636 return NETDEV_TX_OK
;
3639 dev_kfree_skb_any(first
->skb
);
3642 return NETDEV_TX_OK
;
3646 * ixgbevf_set_mac - Change the Ethernet Address of the NIC
3647 * @netdev: network interface device structure
3648 * @p: pointer to an address structure
3650 * Returns 0 on success, negative on failure
3652 static int ixgbevf_set_mac(struct net_device
*netdev
, void *p
)
3654 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3655 struct ixgbe_hw
*hw
= &adapter
->hw
;
3656 struct sockaddr
*addr
= p
;
3658 if (!is_valid_ether_addr(addr
->sa_data
))
3659 return -EADDRNOTAVAIL
;
3661 memcpy(netdev
->dev_addr
, addr
->sa_data
, netdev
->addr_len
);
3662 memcpy(hw
->mac
.addr
, addr
->sa_data
, netdev
->addr_len
);
3664 spin_lock_bh(&adapter
->mbx_lock
);
3666 hw
->mac
.ops
.set_rar(hw
, 0, hw
->mac
.addr
, 0);
3668 spin_unlock_bh(&adapter
->mbx_lock
);
3674 * ixgbevf_change_mtu - Change the Maximum Transfer Unit
3675 * @netdev: network interface device structure
3676 * @new_mtu: new value for maximum frame size
3678 * Returns 0 on success, negative on failure
3680 static int ixgbevf_change_mtu(struct net_device
*netdev
, int new_mtu
)
3682 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3683 struct ixgbe_hw
*hw
= &adapter
->hw
;
3684 int max_frame
= new_mtu
+ ETH_HLEN
+ ETH_FCS_LEN
;
3685 int max_possible_frame
= MAXIMUM_ETHERNET_VLAN_SIZE
;
3687 switch (adapter
->hw
.api_version
) {
3688 case ixgbe_mbox_api_11
:
3689 case ixgbe_mbox_api_12
:
3690 max_possible_frame
= IXGBE_MAX_JUMBO_FRAME_SIZE
;
3693 if (adapter
->hw
.mac
.type
!= ixgbe_mac_82599_vf
)
3694 max_possible_frame
= IXGBE_MAX_JUMBO_FRAME_SIZE
;
3698 /* MTU < 68 is an error and causes problems on some kernels */
3699 if ((new_mtu
< 68) || (max_frame
> max_possible_frame
))
3702 hw_dbg(hw
, "changing MTU from %d to %d\n",
3703 netdev
->mtu
, new_mtu
);
3704 /* must set new MTU before calling down or up */
3705 netdev
->mtu
= new_mtu
;
3707 /* notify the PF of our intent to use this size of frame */
3708 ixgbevf_rlpml_set_vf(hw
, max_frame
);
3713 #ifdef CONFIG_NET_POLL_CONTROLLER
3714 /* Polling 'interrupt' - used by things like netconsole to send skbs
3715 * without having to re-enable interrupts. It's not called while
3716 * the interrupt routine is executing.
3718 static void ixgbevf_netpoll(struct net_device
*netdev
)
3720 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3723 /* if interface is down do nothing */
3724 if (test_bit(__IXGBEVF_DOWN
, &adapter
->state
))
3726 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
3727 ixgbevf_msix_clean_rings(0, adapter
->q_vector
[i
]);
3729 #endif /* CONFIG_NET_POLL_CONTROLLER */
3731 static int ixgbevf_suspend(struct pci_dev
*pdev
, pm_message_t state
)
3733 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3734 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3739 netif_device_detach(netdev
);
3741 if (netif_running(netdev
)) {
3743 ixgbevf_down(adapter
);
3744 ixgbevf_free_irq(adapter
);
3745 ixgbevf_free_all_tx_resources(adapter
);
3746 ixgbevf_free_all_rx_resources(adapter
);
3750 ixgbevf_clear_interrupt_scheme(adapter
);
3753 retval
= pci_save_state(pdev
);
3758 if (!test_and_set_bit(__IXGBEVF_DISABLED
, &adapter
->state
))
3759 pci_disable_device(pdev
);
3765 static int ixgbevf_resume(struct pci_dev
*pdev
)
3767 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3768 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3771 pci_restore_state(pdev
);
3772 /* pci_restore_state clears dev->state_saved so call
3773 * pci_save_state to restore it.
3775 pci_save_state(pdev
);
3777 err
= pci_enable_device_mem(pdev
);
3779 dev_err(&pdev
->dev
, "Cannot enable PCI device from suspend\n");
3782 smp_mb__before_atomic();
3783 clear_bit(__IXGBEVF_DISABLED
, &adapter
->state
);
3784 pci_set_master(pdev
);
3786 ixgbevf_reset(adapter
);
3789 err
= ixgbevf_init_interrupt_scheme(adapter
);
3792 dev_err(&pdev
->dev
, "Cannot initialize interrupts\n");
3796 if (netif_running(netdev
)) {
3797 err
= ixgbevf_open(netdev
);
3802 netif_device_attach(netdev
);
3807 #endif /* CONFIG_PM */
3808 static void ixgbevf_shutdown(struct pci_dev
*pdev
)
3810 ixgbevf_suspend(pdev
, PMSG_SUSPEND
);
3813 static struct rtnl_link_stats64
*ixgbevf_get_stats(struct net_device
*netdev
,
3814 struct rtnl_link_stats64
*stats
)
3816 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3819 const struct ixgbevf_ring
*ring
;
3822 ixgbevf_update_stats(adapter
);
3824 stats
->multicast
= adapter
->stats
.vfmprc
- adapter
->stats
.base_vfmprc
;
3826 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
3827 ring
= adapter
->rx_ring
[i
];
3829 start
= u64_stats_fetch_begin_irq(&ring
->syncp
);
3830 bytes
= ring
->stats
.bytes
;
3831 packets
= ring
->stats
.packets
;
3832 } while (u64_stats_fetch_retry_irq(&ring
->syncp
, start
));
3833 stats
->rx_bytes
+= bytes
;
3834 stats
->rx_packets
+= packets
;
3837 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
3838 ring
= adapter
->tx_ring
[i
];
3840 start
= u64_stats_fetch_begin_irq(&ring
->syncp
);
3841 bytes
= ring
->stats
.bytes
;
3842 packets
= ring
->stats
.packets
;
3843 } while (u64_stats_fetch_retry_irq(&ring
->syncp
, start
));
3844 stats
->tx_bytes
+= bytes
;
3845 stats
->tx_packets
+= packets
;
3851 static const struct net_device_ops ixgbevf_netdev_ops
= {
3852 .ndo_open
= ixgbevf_open
,
3853 .ndo_stop
= ixgbevf_close
,
3854 .ndo_start_xmit
= ixgbevf_xmit_frame
,
3855 .ndo_set_rx_mode
= ixgbevf_set_rx_mode
,
3856 .ndo_get_stats64
= ixgbevf_get_stats
,
3857 .ndo_validate_addr
= eth_validate_addr
,
3858 .ndo_set_mac_address
= ixgbevf_set_mac
,
3859 .ndo_change_mtu
= ixgbevf_change_mtu
,
3860 .ndo_tx_timeout
= ixgbevf_tx_timeout
,
3861 .ndo_vlan_rx_add_vid
= ixgbevf_vlan_rx_add_vid
,
3862 .ndo_vlan_rx_kill_vid
= ixgbevf_vlan_rx_kill_vid
,
3863 #ifdef CONFIG_NET_RX_BUSY_POLL
3864 .ndo_busy_poll
= ixgbevf_busy_poll_recv
,
3866 #ifdef CONFIG_NET_POLL_CONTROLLER
3867 .ndo_poll_controller
= ixgbevf_netpoll
,
3871 static void ixgbevf_assign_netdev_ops(struct net_device
*dev
)
3873 dev
->netdev_ops
= &ixgbevf_netdev_ops
;
3874 ixgbevf_set_ethtool_ops(dev
);
3875 dev
->watchdog_timeo
= 5 * HZ
;
3879 * ixgbevf_probe - Device Initialization Routine
3880 * @pdev: PCI device information struct
3881 * @ent: entry in ixgbevf_pci_tbl
3883 * Returns 0 on success, negative on failure
3885 * ixgbevf_probe initializes an adapter identified by a pci_dev structure.
3886 * The OS initialization, configuring of the adapter private structure,
3887 * and a hardware reset occur.
3889 static int ixgbevf_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
3891 struct net_device
*netdev
;
3892 struct ixgbevf_adapter
*adapter
= NULL
;
3893 struct ixgbe_hw
*hw
= NULL
;
3894 const struct ixgbevf_info
*ii
= ixgbevf_info_tbl
[ent
->driver_data
];
3895 int err
, pci_using_dac
;
3896 bool disable_dev
= false;
3898 err
= pci_enable_device(pdev
);
3902 if (!dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64))) {
3905 err
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
3907 dev_err(&pdev
->dev
, "No usable DMA configuration, aborting\n");
3913 err
= pci_request_regions(pdev
, ixgbevf_driver_name
);
3915 dev_err(&pdev
->dev
, "pci_request_regions failed 0x%x\n", err
);
3919 pci_set_master(pdev
);
3921 netdev
= alloc_etherdev_mq(sizeof(struct ixgbevf_adapter
),
3925 goto err_alloc_etherdev
;
3928 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
3930 adapter
= netdev_priv(netdev
);
3932 adapter
->netdev
= netdev
;
3933 adapter
->pdev
= pdev
;
3936 adapter
->msg_enable
= netif_msg_init(debug
, DEFAULT_MSG_ENABLE
);
3938 /* call save state here in standalone driver because it relies on
3939 * adapter struct to exist, and needs to call netdev_priv
3941 pci_save_state(pdev
);
3943 hw
->hw_addr
= ioremap(pci_resource_start(pdev
, 0),
3944 pci_resource_len(pdev
, 0));
3945 adapter
->io_addr
= hw
->hw_addr
;
3951 ixgbevf_assign_netdev_ops(netdev
);
3954 memcpy(&hw
->mac
.ops
, ii
->mac_ops
, sizeof(hw
->mac
.ops
));
3955 hw
->mac
.type
= ii
->mac
;
3957 memcpy(&hw
->mbx
.ops
, &ixgbevf_mbx_ops
,
3958 sizeof(struct ixgbe_mbx_operations
));
3960 /* setup the private structure */
3961 err
= ixgbevf_sw_init(adapter
);
3965 /* The HW MAC address was set and/or determined in sw_init */
3966 if (!is_valid_ether_addr(netdev
->dev_addr
)) {
3967 pr_err("invalid MAC address\n");
3972 netdev
->hw_features
= NETIF_F_SG
|
3979 netdev
->features
= netdev
->hw_features
|
3980 NETIF_F_HW_VLAN_CTAG_TX
|
3981 NETIF_F_HW_VLAN_CTAG_RX
|
3982 NETIF_F_HW_VLAN_CTAG_FILTER
;
3984 netdev
->vlan_features
|= NETIF_F_TSO
|
3991 netdev
->features
|= NETIF_F_HIGHDMA
;
3993 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
3995 if (IXGBE_REMOVED(hw
->hw_addr
)) {
4000 setup_timer(&adapter
->service_timer
, &ixgbevf_service_timer
,
4001 (unsigned long)adapter
);
4003 INIT_WORK(&adapter
->service_task
, ixgbevf_service_task
);
4004 set_bit(__IXGBEVF_SERVICE_INITED
, &adapter
->state
);
4005 clear_bit(__IXGBEVF_SERVICE_SCHED
, &adapter
->state
);
4007 err
= ixgbevf_init_interrupt_scheme(adapter
);
4011 strcpy(netdev
->name
, "eth%d");
4013 err
= register_netdev(netdev
);
4017 pci_set_drvdata(pdev
, netdev
);
4018 netif_carrier_off(netdev
);
4020 ixgbevf_init_last_counter_stats(adapter
);
4022 /* print the VF info */
4023 dev_info(&pdev
->dev
, "%pM\n", netdev
->dev_addr
);
4024 dev_info(&pdev
->dev
, "MAC: %d\n", hw
->mac
.type
);
4026 switch (hw
->mac
.type
) {
4027 case ixgbe_mac_X550_vf
:
4028 dev_info(&pdev
->dev
, "Intel(R) X550 Virtual Function\n");
4030 case ixgbe_mac_X540_vf
:
4031 dev_info(&pdev
->dev
, "Intel(R) X540 Virtual Function\n");
4033 case ixgbe_mac_82599_vf
:
4035 dev_info(&pdev
->dev
, "Intel(R) 82599 Virtual Function\n");
4042 ixgbevf_clear_interrupt_scheme(adapter
);
4044 ixgbevf_reset_interrupt_capability(adapter
);
4045 iounmap(adapter
->io_addr
);
4047 disable_dev
= !test_and_set_bit(__IXGBEVF_DISABLED
, &adapter
->state
);
4048 free_netdev(netdev
);
4050 pci_release_regions(pdev
);
4053 if (!adapter
|| disable_dev
)
4054 pci_disable_device(pdev
);
4059 * ixgbevf_remove - Device Removal Routine
4060 * @pdev: PCI device information struct
4062 * ixgbevf_remove is called by the PCI subsystem to alert the driver
4063 * that it should release a PCI device. The could be caused by a
4064 * Hot-Plug event, or because the driver is going to be removed from
4067 static void ixgbevf_remove(struct pci_dev
*pdev
)
4069 struct net_device
*netdev
= pci_get_drvdata(pdev
);
4070 struct ixgbevf_adapter
*adapter
;
4076 adapter
= netdev_priv(netdev
);
4078 set_bit(__IXGBEVF_REMOVING
, &adapter
->state
);
4079 cancel_work_sync(&adapter
->service_task
);
4081 if (netdev
->reg_state
== NETREG_REGISTERED
)
4082 unregister_netdev(netdev
);
4084 ixgbevf_clear_interrupt_scheme(adapter
);
4085 ixgbevf_reset_interrupt_capability(adapter
);
4087 iounmap(adapter
->io_addr
);
4088 pci_release_regions(pdev
);
4090 hw_dbg(&adapter
->hw
, "Remove complete\n");
4092 disable_dev
= !test_and_set_bit(__IXGBEVF_DISABLED
, &adapter
->state
);
4093 free_netdev(netdev
);
4096 pci_disable_device(pdev
);
4100 * ixgbevf_io_error_detected - called when PCI error is detected
4101 * @pdev: Pointer to PCI device
4102 * @state: The current pci connection state
4104 * This function is called after a PCI bus error affecting
4105 * this device has been detected.
4107 static pci_ers_result_t
ixgbevf_io_error_detected(struct pci_dev
*pdev
,
4108 pci_channel_state_t state
)
4110 struct net_device
*netdev
= pci_get_drvdata(pdev
);
4111 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
4113 if (!test_bit(__IXGBEVF_SERVICE_INITED
, &adapter
->state
))
4114 return PCI_ERS_RESULT_DISCONNECT
;
4117 netif_device_detach(netdev
);
4119 if (state
== pci_channel_io_perm_failure
) {
4121 return PCI_ERS_RESULT_DISCONNECT
;
4124 if (netif_running(netdev
))
4125 ixgbevf_down(adapter
);
4127 if (!test_and_set_bit(__IXGBEVF_DISABLED
, &adapter
->state
))
4128 pci_disable_device(pdev
);
4131 /* Request a slot slot reset. */
4132 return PCI_ERS_RESULT_NEED_RESET
;
4136 * ixgbevf_io_slot_reset - called after the pci bus has been reset.
4137 * @pdev: Pointer to PCI device
4139 * Restart the card from scratch, as if from a cold-boot. Implementation
4140 * resembles the first-half of the ixgbevf_resume routine.
4142 static pci_ers_result_t
ixgbevf_io_slot_reset(struct pci_dev
*pdev
)
4144 struct net_device
*netdev
= pci_get_drvdata(pdev
);
4145 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
4147 if (pci_enable_device_mem(pdev
)) {
4149 "Cannot re-enable PCI device after reset.\n");
4150 return PCI_ERS_RESULT_DISCONNECT
;
4153 smp_mb__before_atomic();
4154 clear_bit(__IXGBEVF_DISABLED
, &adapter
->state
);
4155 pci_set_master(pdev
);
4157 ixgbevf_reset(adapter
);
4159 return PCI_ERS_RESULT_RECOVERED
;
4163 * ixgbevf_io_resume - called when traffic can start flowing again.
4164 * @pdev: Pointer to PCI device
4166 * This callback is called when the error recovery driver tells us that
4167 * its OK to resume normal operation. Implementation resembles the
4168 * second-half of the ixgbevf_resume routine.
4170 static void ixgbevf_io_resume(struct pci_dev
*pdev
)
4172 struct net_device
*netdev
= pci_get_drvdata(pdev
);
4173 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
4175 if (netif_running(netdev
))
4176 ixgbevf_up(adapter
);
4178 netif_device_attach(netdev
);
4181 /* PCI Error Recovery (ERS) */
4182 static const struct pci_error_handlers ixgbevf_err_handler
= {
4183 .error_detected
= ixgbevf_io_error_detected
,
4184 .slot_reset
= ixgbevf_io_slot_reset
,
4185 .resume
= ixgbevf_io_resume
,
4188 static struct pci_driver ixgbevf_driver
= {
4189 .name
= ixgbevf_driver_name
,
4190 .id_table
= ixgbevf_pci_tbl
,
4191 .probe
= ixgbevf_probe
,
4192 .remove
= ixgbevf_remove
,
4194 /* Power Management Hooks */
4195 .suspend
= ixgbevf_suspend
,
4196 .resume
= ixgbevf_resume
,
4198 .shutdown
= ixgbevf_shutdown
,
4199 .err_handler
= &ixgbevf_err_handler
4203 * ixgbevf_init_module - Driver Registration Routine
4205 * ixgbevf_init_module is the first routine called when the driver is
4206 * loaded. All it does is register with the PCI subsystem.
4208 static int __init
ixgbevf_init_module(void)
4212 pr_info("%s - version %s\n", ixgbevf_driver_string
,
4213 ixgbevf_driver_version
);
4215 pr_info("%s\n", ixgbevf_copyright
);
4217 ret
= pci_register_driver(&ixgbevf_driver
);
4221 module_init(ixgbevf_init_module
);
4224 * ixgbevf_exit_module - Driver Exit Cleanup Routine
4226 * ixgbevf_exit_module is called just before the driver is removed
4229 static void __exit
ixgbevf_exit_module(void)
4231 pci_unregister_driver(&ixgbevf_driver
);
4236 * ixgbevf_get_hw_dev_name - return device name string
4237 * used by hardware layer to print debugging information
4239 char *ixgbevf_get_hw_dev_name(struct ixgbe_hw
*hw
)
4241 struct ixgbevf_adapter
*adapter
= hw
->back
;
4243 return adapter
->netdev
->name
;
4247 module_exit(ixgbevf_exit_module
);
4249 /* ixgbevf_main.c */