1 /*******************************************************************************
3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2014 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *******************************************************************************/
29 /******************************************************************************
30 Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
31 ******************************************************************************/
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
35 #include <linux/types.h>
36 #include <linux/bitops.h>
37 #include <linux/module.h>
38 #include <linux/pci.h>
39 #include <linux/netdevice.h>
40 #include <linux/vmalloc.h>
41 #include <linux/string.h>
44 #include <linux/tcp.h>
45 #include <linux/sctp.h>
46 #include <linux/ipv6.h>
47 #include <linux/slab.h>
48 #include <net/checksum.h>
49 #include <net/ip6_checksum.h>
50 #include <linux/ethtool.h>
52 #include <linux/if_vlan.h>
53 #include <linux/prefetch.h>
57 const char ixgbevf_driver_name
[] = "ixgbevf";
58 static const char ixgbevf_driver_string
[] =
59 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
61 #define DRV_VERSION "2.12.1-k"
62 const char ixgbevf_driver_version
[] = DRV_VERSION
;
63 static char ixgbevf_copyright
[] =
64 "Copyright (c) 2009 - 2012 Intel Corporation.";
66 static const struct ixgbevf_info
*ixgbevf_info_tbl
[] = {
67 [board_82599_vf
] = &ixgbevf_82599_vf_info
,
68 [board_X540_vf
] = &ixgbevf_X540_vf_info
,
69 [board_X550_vf
] = &ixgbevf_X550_vf_info
,
70 [board_X550EM_x_vf
] = &ixgbevf_X550EM_x_vf_info
,
73 /* ixgbevf_pci_tbl - PCI Device ID Table
75 * Wildcard entries (PCI_ANY_ID) should come last
76 * Last entry must be all 0s
78 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
79 * Class, Class Mask, private data (not used) }
81 static const struct pci_device_id ixgbevf_pci_tbl
[] = {
82 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82599_VF
), board_82599_vf
},
83 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_X540_VF
), board_X540_vf
},
84 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_X550_VF
), board_X550_vf
},
85 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_X550EM_X_VF
), board_X550EM_x_vf
},
86 /* required last entry */
89 MODULE_DEVICE_TABLE(pci
, ixgbevf_pci_tbl
);
91 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
92 MODULE_DESCRIPTION("Intel(R) 10 Gigabit Virtual Function Network Driver");
93 MODULE_LICENSE("GPL");
94 MODULE_VERSION(DRV_VERSION
);
96 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
97 static int debug
= -1;
98 module_param(debug
, int, 0);
99 MODULE_PARM_DESC(debug
, "Debug level (0=none,...,16=all)");
102 static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter
*adapter
);
103 static void ixgbevf_set_itr(struct ixgbevf_q_vector
*q_vector
);
104 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter
*adapter
);
106 static void ixgbevf_remove_adapter(struct ixgbe_hw
*hw
)
108 struct ixgbevf_adapter
*adapter
= hw
->back
;
113 dev_err(&adapter
->pdev
->dev
, "Adapter removed\n");
114 if (test_bit(__IXGBEVF_WORK_INIT
, &adapter
->state
))
115 schedule_work(&adapter
->watchdog_task
);
118 static void ixgbevf_check_remove(struct ixgbe_hw
*hw
, u32 reg
)
122 /* The following check not only optimizes a bit by not
123 * performing a read on the status register when the
124 * register just read was a status register read that
125 * returned IXGBE_FAILED_READ_REG. It also blocks any
126 * potential recursion.
128 if (reg
== IXGBE_VFSTATUS
) {
129 ixgbevf_remove_adapter(hw
);
132 value
= ixgbevf_read_reg(hw
, IXGBE_VFSTATUS
);
133 if (value
== IXGBE_FAILED_READ_REG
)
134 ixgbevf_remove_adapter(hw
);
137 u32
ixgbevf_read_reg(struct ixgbe_hw
*hw
, u32 reg
)
139 u8 __iomem
*reg_addr
= ACCESS_ONCE(hw
->hw_addr
);
142 if (IXGBE_REMOVED(reg_addr
))
143 return IXGBE_FAILED_READ_REG
;
144 value
= readl(reg_addr
+ reg
);
145 if (unlikely(value
== IXGBE_FAILED_READ_REG
))
146 ixgbevf_check_remove(hw
, reg
);
151 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
152 * @adapter: pointer to adapter struct
153 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
154 * @queue: queue to map the corresponding interrupt to
155 * @msix_vector: the vector to map to the corresponding queue
157 static void ixgbevf_set_ivar(struct ixgbevf_adapter
*adapter
, s8 direction
,
158 u8 queue
, u8 msix_vector
)
161 struct ixgbe_hw
*hw
= &adapter
->hw
;
162 if (direction
== -1) {
164 msix_vector
|= IXGBE_IVAR_ALLOC_VAL
;
165 ivar
= IXGBE_READ_REG(hw
, IXGBE_VTIVAR_MISC
);
168 IXGBE_WRITE_REG(hw
, IXGBE_VTIVAR_MISC
, ivar
);
170 /* tx or rx causes */
171 msix_vector
|= IXGBE_IVAR_ALLOC_VAL
;
172 index
= ((16 * (queue
& 1)) + (8 * direction
));
173 ivar
= IXGBE_READ_REG(hw
, IXGBE_VTIVAR(queue
>> 1));
174 ivar
&= ~(0xFF << index
);
175 ivar
|= (msix_vector
<< index
);
176 IXGBE_WRITE_REG(hw
, IXGBE_VTIVAR(queue
>> 1), ivar
);
180 static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring
*tx_ring
,
181 struct ixgbevf_tx_buffer
*tx_buffer
)
183 if (tx_buffer
->skb
) {
184 dev_kfree_skb_any(tx_buffer
->skb
);
185 if (dma_unmap_len(tx_buffer
, len
))
186 dma_unmap_single(tx_ring
->dev
,
187 dma_unmap_addr(tx_buffer
, dma
),
188 dma_unmap_len(tx_buffer
, len
),
190 } else if (dma_unmap_len(tx_buffer
, len
)) {
191 dma_unmap_page(tx_ring
->dev
,
192 dma_unmap_addr(tx_buffer
, dma
),
193 dma_unmap_len(tx_buffer
, len
),
196 tx_buffer
->next_to_watch
= NULL
;
197 tx_buffer
->skb
= NULL
;
198 dma_unmap_len_set(tx_buffer
, len
, 0);
199 /* tx_buffer must be completely set up in the transmit path */
202 #define IXGBE_MAX_TXD_PWR 14
203 #define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
205 /* Tx Descriptors needed, worst case */
206 #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
207 #define DESC_NEEDED (MAX_SKB_FRAGS + 4)
209 static void ixgbevf_tx_timeout(struct net_device
*netdev
);
212 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
213 * @q_vector: board private structure
214 * @tx_ring: tx ring to clean
216 static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector
*q_vector
,
217 struct ixgbevf_ring
*tx_ring
)
219 struct ixgbevf_adapter
*adapter
= q_vector
->adapter
;
220 struct ixgbevf_tx_buffer
*tx_buffer
;
221 union ixgbe_adv_tx_desc
*tx_desc
;
222 unsigned int total_bytes
= 0, total_packets
= 0;
223 unsigned int budget
= tx_ring
->count
/ 2;
224 unsigned int i
= tx_ring
->next_to_clean
;
226 if (test_bit(__IXGBEVF_DOWN
, &adapter
->state
))
229 tx_buffer
= &tx_ring
->tx_buffer_info
[i
];
230 tx_desc
= IXGBEVF_TX_DESC(tx_ring
, i
);
234 union ixgbe_adv_tx_desc
*eop_desc
= tx_buffer
->next_to_watch
;
236 /* if next_to_watch is not set then there is no work pending */
240 /* prevent any other reads prior to eop_desc */
241 read_barrier_depends();
243 /* if DD is not set pending work has not been completed */
244 if (!(eop_desc
->wb
.status
& cpu_to_le32(IXGBE_TXD_STAT_DD
)))
247 /* clear next_to_watch to prevent false hangs */
248 tx_buffer
->next_to_watch
= NULL
;
250 /* update the statistics for this packet */
251 total_bytes
+= tx_buffer
->bytecount
;
252 total_packets
+= tx_buffer
->gso_segs
;
255 dev_kfree_skb_any(tx_buffer
->skb
);
257 /* unmap skb header data */
258 dma_unmap_single(tx_ring
->dev
,
259 dma_unmap_addr(tx_buffer
, dma
),
260 dma_unmap_len(tx_buffer
, len
),
263 /* clear tx_buffer data */
264 tx_buffer
->skb
= NULL
;
265 dma_unmap_len_set(tx_buffer
, len
, 0);
267 /* unmap remaining buffers */
268 while (tx_desc
!= eop_desc
) {
274 tx_buffer
= tx_ring
->tx_buffer_info
;
275 tx_desc
= IXGBEVF_TX_DESC(tx_ring
, 0);
278 /* unmap any remaining paged data */
279 if (dma_unmap_len(tx_buffer
, len
)) {
280 dma_unmap_page(tx_ring
->dev
,
281 dma_unmap_addr(tx_buffer
, dma
),
282 dma_unmap_len(tx_buffer
, len
),
284 dma_unmap_len_set(tx_buffer
, len
, 0);
288 /* move us one more past the eop_desc for start of next pkt */
294 tx_buffer
= tx_ring
->tx_buffer_info
;
295 tx_desc
= IXGBEVF_TX_DESC(tx_ring
, 0);
298 /* issue prefetch for next Tx descriptor */
301 /* update budget accounting */
303 } while (likely(budget
));
306 tx_ring
->next_to_clean
= i
;
307 u64_stats_update_begin(&tx_ring
->syncp
);
308 tx_ring
->stats
.bytes
+= total_bytes
;
309 tx_ring
->stats
.packets
+= total_packets
;
310 u64_stats_update_end(&tx_ring
->syncp
);
311 q_vector
->tx
.total_bytes
+= total_bytes
;
312 q_vector
->tx
.total_packets
+= total_packets
;
314 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
315 if (unlikely(total_packets
&& netif_carrier_ok(tx_ring
->netdev
) &&
316 (ixgbevf_desc_unused(tx_ring
) >= TX_WAKE_THRESHOLD
))) {
317 /* Make sure that anybody stopping the queue after this
318 * sees the new next_to_clean.
322 if (__netif_subqueue_stopped(tx_ring
->netdev
,
323 tx_ring
->queue_index
) &&
324 !test_bit(__IXGBEVF_DOWN
, &adapter
->state
)) {
325 netif_wake_subqueue(tx_ring
->netdev
,
326 tx_ring
->queue_index
);
327 ++tx_ring
->tx_stats
.restart_queue
;
335 * ixgbevf_rx_skb - Helper function to determine proper Rx method
336 * @q_vector: structure containing interrupt and ring information
337 * @skb: packet to send up
339 static void ixgbevf_rx_skb(struct ixgbevf_q_vector
*q_vector
,
342 #ifdef CONFIG_NET_RX_BUSY_POLL
343 skb_mark_napi_id(skb
, &q_vector
->napi
);
345 if (ixgbevf_qv_busy_polling(q_vector
)) {
346 netif_receive_skb(skb
);
347 /* exit early if we busy polled */
350 #endif /* CONFIG_NET_RX_BUSY_POLL */
352 napi_gro_receive(&q_vector
->napi
, skb
);
355 /* ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
356 * @ring: structure containig ring specific data
357 * @rx_desc: current Rx descriptor being processed
358 * @skb: skb currently being received and modified
360 static inline void ixgbevf_rx_checksum(struct ixgbevf_ring
*ring
,
361 union ixgbe_adv_rx_desc
*rx_desc
,
364 skb_checksum_none_assert(skb
);
366 /* Rx csum disabled */
367 if (!(ring
->netdev
->features
& NETIF_F_RXCSUM
))
370 /* if IP and error */
371 if (ixgbevf_test_staterr(rx_desc
, IXGBE_RXD_STAT_IPCS
) &&
372 ixgbevf_test_staterr(rx_desc
, IXGBE_RXDADV_ERR_IPE
)) {
373 ring
->rx_stats
.csum_err
++;
377 if (!ixgbevf_test_staterr(rx_desc
, IXGBE_RXD_STAT_L4CS
))
380 if (ixgbevf_test_staterr(rx_desc
, IXGBE_RXDADV_ERR_TCPE
)) {
381 ring
->rx_stats
.csum_err
++;
385 /* It must be a TCP or UDP packet with a valid checksum */
386 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
389 /* ixgbevf_process_skb_fields - Populate skb header fields from Rx descriptor
390 * @rx_ring: rx descriptor ring packet is being transacted on
391 * @rx_desc: pointer to the EOP Rx descriptor
392 * @skb: pointer to current skb being populated
394 * This function checks the ring, descriptor, and packet information in
395 * order to populate the checksum, VLAN, protocol, and other fields within
398 static void ixgbevf_process_skb_fields(struct ixgbevf_ring
*rx_ring
,
399 union ixgbe_adv_rx_desc
*rx_desc
,
402 ixgbevf_rx_checksum(rx_ring
, rx_desc
, skb
);
404 if (ixgbevf_test_staterr(rx_desc
, IXGBE_RXD_STAT_VP
)) {
405 u16 vid
= le16_to_cpu(rx_desc
->wb
.upper
.vlan
);
406 unsigned long *active_vlans
= netdev_priv(rx_ring
->netdev
);
408 if (test_bit(vid
& VLAN_VID_MASK
, active_vlans
))
409 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), vid
);
412 skb
->protocol
= eth_type_trans(skb
, rx_ring
->netdev
);
416 * ixgbevf_is_non_eop - process handling of non-EOP buffers
417 * @rx_ring: Rx ring being processed
418 * @rx_desc: Rx descriptor for current buffer
419 * @skb: current socket buffer containing buffer in progress
421 * This function updates next to clean. If the buffer is an EOP buffer
422 * this function exits returning false, otherwise it will place the
423 * sk_buff in the next buffer to be chained and return true indicating
424 * that this is in fact a non-EOP buffer.
426 static bool ixgbevf_is_non_eop(struct ixgbevf_ring
*rx_ring
,
427 union ixgbe_adv_rx_desc
*rx_desc
)
429 u32 ntc
= rx_ring
->next_to_clean
+ 1;
431 /* fetch, update, and store next to clean */
432 ntc
= (ntc
< rx_ring
->count
) ? ntc
: 0;
433 rx_ring
->next_to_clean
= ntc
;
435 prefetch(IXGBEVF_RX_DESC(rx_ring
, ntc
));
437 if (likely(ixgbevf_test_staterr(rx_desc
, IXGBE_RXD_STAT_EOP
)))
443 static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring
*rx_ring
,
444 struct ixgbevf_rx_buffer
*bi
)
446 struct page
*page
= bi
->page
;
447 dma_addr_t dma
= bi
->dma
;
449 /* since we are recycling buffers we should seldom need to alloc */
453 /* alloc new page for storage */
454 page
= dev_alloc_page();
455 if (unlikely(!page
)) {
456 rx_ring
->rx_stats
.alloc_rx_page_failed
++;
460 /* map page for use */
461 dma
= dma_map_page(rx_ring
->dev
, page
, 0,
462 PAGE_SIZE
, DMA_FROM_DEVICE
);
464 /* if mapping failed free memory back to system since
465 * there isn't much point in holding memory we can't use
467 if (dma_mapping_error(rx_ring
->dev
, dma
)) {
470 rx_ring
->rx_stats
.alloc_rx_buff_failed
++;
482 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
483 * @rx_ring: rx descriptor ring (for a specific queue) to setup buffers on
484 * @cleaned_count: number of buffers to replace
486 static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring
*rx_ring
,
489 union ixgbe_adv_rx_desc
*rx_desc
;
490 struct ixgbevf_rx_buffer
*bi
;
491 unsigned int i
= rx_ring
->next_to_use
;
493 /* nothing to do or no valid netdev defined */
494 if (!cleaned_count
|| !rx_ring
->netdev
)
497 rx_desc
= IXGBEVF_RX_DESC(rx_ring
, i
);
498 bi
= &rx_ring
->rx_buffer_info
[i
];
502 if (!ixgbevf_alloc_mapped_page(rx_ring
, bi
))
505 /* Refresh the desc even if pkt_addr didn't change
506 * because each write-back erases this info.
508 rx_desc
->read
.pkt_addr
= cpu_to_le64(bi
->dma
+ bi
->page_offset
);
514 rx_desc
= IXGBEVF_RX_DESC(rx_ring
, 0);
515 bi
= rx_ring
->rx_buffer_info
;
519 /* clear the hdr_addr for the next_to_use descriptor */
520 rx_desc
->read
.hdr_addr
= 0;
523 } while (cleaned_count
);
527 if (rx_ring
->next_to_use
!= i
) {
528 /* record the next descriptor to use */
529 rx_ring
->next_to_use
= i
;
531 /* update next to alloc since we have filled the ring */
532 rx_ring
->next_to_alloc
= i
;
534 /* Force memory writes to complete before letting h/w
535 * know there are new descriptors to fetch. (Only
536 * applicable for weak-ordered memory model archs,
540 ixgbevf_write_tail(rx_ring
, i
);
544 /* ixgbevf_pull_tail - ixgbevf specific version of skb_pull_tail
545 * @rx_ring: rx descriptor ring packet is being transacted on
546 * @skb: pointer to current skb being adjusted
548 * This function is an ixgbevf specific version of __pskb_pull_tail. The
549 * main difference between this version and the original function is that
550 * this function can make several assumptions about the state of things
551 * that allow for significant optimizations versus the standard function.
552 * As a result we can do things like drop a frag and maintain an accurate
553 * truesize for the skb.
555 static void ixgbevf_pull_tail(struct ixgbevf_ring
*rx_ring
,
558 struct skb_frag_struct
*frag
= &skb_shinfo(skb
)->frags
[0];
560 unsigned int pull_len
;
562 /* it is valid to use page_address instead of kmap since we are
563 * working with pages allocated out of the lomem pool per
564 * alloc_page(GFP_ATOMIC)
566 va
= skb_frag_address(frag
);
568 /* we need the header to contain the greater of either ETH_HLEN or
569 * 60 bytes if the skb->len is less than 60 for skb_pad.
571 pull_len
= eth_get_headlen(va
, IXGBEVF_RX_HDR_SIZE
);
573 /* align pull length to size of long to optimize memcpy performance */
574 skb_copy_to_linear_data(skb
, va
, ALIGN(pull_len
, sizeof(long)));
576 /* update all of the pointers */
577 skb_frag_size_sub(frag
, pull_len
);
578 frag
->page_offset
+= pull_len
;
579 skb
->data_len
-= pull_len
;
580 skb
->tail
+= pull_len
;
583 /* ixgbevf_cleanup_headers - Correct corrupted or empty headers
584 * @rx_ring: rx descriptor ring packet is being transacted on
585 * @rx_desc: pointer to the EOP Rx descriptor
586 * @skb: pointer to current skb being fixed
588 * Check for corrupted packet headers caused by senders on the local L2
589 * embedded NIC switch not setting up their Tx Descriptors right. These
590 * should be very rare.
592 * Also address the case where we are pulling data in on pages only
593 * and as such no data is present in the skb header.
595 * In addition if skb is not at least 60 bytes we need to pad it so that
596 * it is large enough to qualify as a valid Ethernet frame.
598 * Returns true if an error was encountered and skb was freed.
600 static bool ixgbevf_cleanup_headers(struct ixgbevf_ring
*rx_ring
,
601 union ixgbe_adv_rx_desc
*rx_desc
,
604 /* verify that the packet does not have any known errors */
605 if (unlikely(ixgbevf_test_staterr(rx_desc
,
606 IXGBE_RXDADV_ERR_FRAME_ERR_MASK
))) {
607 struct net_device
*netdev
= rx_ring
->netdev
;
609 if (!(netdev
->features
& NETIF_F_RXALL
)) {
610 dev_kfree_skb_any(skb
);
615 /* place header in linear portion of buffer */
616 if (skb_is_nonlinear(skb
))
617 ixgbevf_pull_tail(rx_ring
, skb
);
619 /* if skb_pad returns an error the skb was freed */
620 if (unlikely(skb
->len
< 60)) {
621 int pad_len
= 60 - skb
->len
;
623 if (skb_pad(skb
, pad_len
))
625 __skb_put(skb
, pad_len
);
631 /* ixgbevf_reuse_rx_page - page flip buffer and store it back on the ring
632 * @rx_ring: rx descriptor ring to store buffers on
633 * @old_buff: donor buffer to have page reused
635 * Synchronizes page for reuse by the adapter
637 static void ixgbevf_reuse_rx_page(struct ixgbevf_ring
*rx_ring
,
638 struct ixgbevf_rx_buffer
*old_buff
)
640 struct ixgbevf_rx_buffer
*new_buff
;
641 u16 nta
= rx_ring
->next_to_alloc
;
643 new_buff
= &rx_ring
->rx_buffer_info
[nta
];
645 /* update, and store next to alloc */
647 rx_ring
->next_to_alloc
= (nta
< rx_ring
->count
) ? nta
: 0;
649 /* transfer page from old buffer to new buffer */
650 new_buff
->page
= old_buff
->page
;
651 new_buff
->dma
= old_buff
->dma
;
652 new_buff
->page_offset
= old_buff
->page_offset
;
654 /* sync the buffer for use by the device */
655 dma_sync_single_range_for_device(rx_ring
->dev
, new_buff
->dma
,
656 new_buff
->page_offset
,
661 static inline bool ixgbevf_page_is_reserved(struct page
*page
)
663 return (page_to_nid(page
) != numa_mem_id()) || page
->pfmemalloc
;
666 /* ixgbevf_add_rx_frag - Add contents of Rx buffer to sk_buff
667 * @rx_ring: rx descriptor ring to transact packets on
668 * @rx_buffer: buffer containing page to add
669 * @rx_desc: descriptor containing length of buffer written by hardware
670 * @skb: sk_buff to place the data into
672 * This function will add the data contained in rx_buffer->page to the skb.
673 * This is done either through a direct copy if the data in the buffer is
674 * less than the skb header size, otherwise it will just attach the page as
677 * The function will then update the page offset if necessary and return
678 * true if the buffer can be reused by the adapter.
680 static bool ixgbevf_add_rx_frag(struct ixgbevf_ring
*rx_ring
,
681 struct ixgbevf_rx_buffer
*rx_buffer
,
682 union ixgbe_adv_rx_desc
*rx_desc
,
685 struct page
*page
= rx_buffer
->page
;
686 unsigned int size
= le16_to_cpu(rx_desc
->wb
.upper
.length
);
687 #if (PAGE_SIZE < 8192)
688 unsigned int truesize
= IXGBEVF_RX_BUFSZ
;
690 unsigned int truesize
= ALIGN(size
, L1_CACHE_BYTES
);
693 if ((size
<= IXGBEVF_RX_HDR_SIZE
) && !skb_is_nonlinear(skb
)) {
694 unsigned char *va
= page_address(page
) + rx_buffer
->page_offset
;
696 memcpy(__skb_put(skb
, size
), va
, ALIGN(size
, sizeof(long)));
698 /* page is not reserved, we can reuse buffer as is */
699 if (likely(!ixgbevf_page_is_reserved(page
)))
702 /* this page cannot be reused so discard it */
707 skb_add_rx_frag(skb
, skb_shinfo(skb
)->nr_frags
, page
,
708 rx_buffer
->page_offset
, size
, truesize
);
710 /* avoid re-using remote pages */
711 if (unlikely(ixgbevf_page_is_reserved(page
)))
714 #if (PAGE_SIZE < 8192)
715 /* if we are only owner of page we can reuse it */
716 if (unlikely(page_count(page
) != 1))
719 /* flip page offset to other buffer */
720 rx_buffer
->page_offset
^= IXGBEVF_RX_BUFSZ
;
723 /* move offset up to the next cache line */
724 rx_buffer
->page_offset
+= truesize
;
726 if (rx_buffer
->page_offset
> (PAGE_SIZE
- IXGBEVF_RX_BUFSZ
))
730 /* Even if we own the page, we are not allowed to use atomic_set()
731 * This would break get_page_unless_zero() users.
733 atomic_inc(&page
->_count
);
738 static struct sk_buff
*ixgbevf_fetch_rx_buffer(struct ixgbevf_ring
*rx_ring
,
739 union ixgbe_adv_rx_desc
*rx_desc
,
742 struct ixgbevf_rx_buffer
*rx_buffer
;
745 rx_buffer
= &rx_ring
->rx_buffer_info
[rx_ring
->next_to_clean
];
746 page
= rx_buffer
->page
;
750 void *page_addr
= page_address(page
) +
751 rx_buffer
->page_offset
;
753 /* prefetch first cache line of first page */
755 #if L1_CACHE_BYTES < 128
756 prefetch(page_addr
+ L1_CACHE_BYTES
);
759 /* allocate a skb to store the frags */
760 skb
= netdev_alloc_skb_ip_align(rx_ring
->netdev
,
761 IXGBEVF_RX_HDR_SIZE
);
762 if (unlikely(!skb
)) {
763 rx_ring
->rx_stats
.alloc_rx_buff_failed
++;
767 /* we will be copying header into skb->data in
768 * pskb_may_pull so it is in our interest to prefetch
769 * it now to avoid a possible cache miss
771 prefetchw(skb
->data
);
774 /* we are reusing so sync this buffer for CPU use */
775 dma_sync_single_range_for_cpu(rx_ring
->dev
,
777 rx_buffer
->page_offset
,
781 /* pull page into skb */
782 if (ixgbevf_add_rx_frag(rx_ring
, rx_buffer
, rx_desc
, skb
)) {
783 /* hand second half of page back to the ring */
784 ixgbevf_reuse_rx_page(rx_ring
, rx_buffer
);
786 /* we are not reusing the buffer so unmap it */
787 dma_unmap_page(rx_ring
->dev
, rx_buffer
->dma
,
788 PAGE_SIZE
, DMA_FROM_DEVICE
);
791 /* clear contents of buffer_info */
793 rx_buffer
->page
= NULL
;
798 static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter
*adapter
,
801 struct ixgbe_hw
*hw
= &adapter
->hw
;
803 IXGBE_WRITE_REG(hw
, IXGBE_VTEIMS
, qmask
);
806 static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector
*q_vector
,
807 struct ixgbevf_ring
*rx_ring
,
810 unsigned int total_rx_bytes
= 0, total_rx_packets
= 0;
811 u16 cleaned_count
= ixgbevf_desc_unused(rx_ring
);
812 struct sk_buff
*skb
= rx_ring
->skb
;
814 while (likely(total_rx_packets
< budget
)) {
815 union ixgbe_adv_rx_desc
*rx_desc
;
817 /* return some buffers to hardware, one at a time is too slow */
818 if (cleaned_count
>= IXGBEVF_RX_BUFFER_WRITE
) {
819 ixgbevf_alloc_rx_buffers(rx_ring
, cleaned_count
);
823 rx_desc
= IXGBEVF_RX_DESC(rx_ring
, rx_ring
->next_to_clean
);
825 if (!ixgbevf_test_staterr(rx_desc
, IXGBE_RXD_STAT_DD
))
828 /* This memory barrier is needed to keep us from reading
829 * any other fields out of the rx_desc until we know the
830 * RXD_STAT_DD bit is set
834 /* retrieve a buffer from the ring */
835 skb
= ixgbevf_fetch_rx_buffer(rx_ring
, rx_desc
, skb
);
837 /* exit if we failed to retrieve a buffer */
843 /* fetch next buffer in frame if non-eop */
844 if (ixgbevf_is_non_eop(rx_ring
, rx_desc
))
847 /* verify the packet layout is correct */
848 if (ixgbevf_cleanup_headers(rx_ring
, rx_desc
, skb
)) {
853 /* probably a little skewed due to removing CRC */
854 total_rx_bytes
+= skb
->len
;
856 /* Workaround hardware that can't do proper VEPA multicast
859 if ((skb
->pkt_type
== PACKET_BROADCAST
||
860 skb
->pkt_type
== PACKET_MULTICAST
) &&
861 ether_addr_equal(rx_ring
->netdev
->dev_addr
,
862 eth_hdr(skb
)->h_source
)) {
863 dev_kfree_skb_irq(skb
);
867 /* populate checksum, VLAN, and protocol */
868 ixgbevf_process_skb_fields(rx_ring
, rx_desc
, skb
);
870 ixgbevf_rx_skb(q_vector
, skb
);
872 /* reset skb pointer */
875 /* update budget accounting */
879 /* place incomplete frames back on ring for completion */
882 u64_stats_update_begin(&rx_ring
->syncp
);
883 rx_ring
->stats
.packets
+= total_rx_packets
;
884 rx_ring
->stats
.bytes
+= total_rx_bytes
;
885 u64_stats_update_end(&rx_ring
->syncp
);
886 q_vector
->rx
.total_packets
+= total_rx_packets
;
887 q_vector
->rx
.total_bytes
+= total_rx_bytes
;
889 return total_rx_packets
;
893 * ixgbevf_poll - NAPI polling calback
894 * @napi: napi struct with our devices info in it
895 * @budget: amount of work driver is allowed to do this pass, in packets
897 * This function will clean more than one or more rings associated with a
900 static int ixgbevf_poll(struct napi_struct
*napi
, int budget
)
902 struct ixgbevf_q_vector
*q_vector
=
903 container_of(napi
, struct ixgbevf_q_vector
, napi
);
904 struct ixgbevf_adapter
*adapter
= q_vector
->adapter
;
905 struct ixgbevf_ring
*ring
;
907 bool clean_complete
= true;
909 ixgbevf_for_each_ring(ring
, q_vector
->tx
)
910 clean_complete
&= ixgbevf_clean_tx_irq(q_vector
, ring
);
912 #ifdef CONFIG_NET_RX_BUSY_POLL
913 if (!ixgbevf_qv_lock_napi(q_vector
))
917 /* attempt to distribute budget to each queue fairly, but don't allow
918 * the budget to go below 1 because we'll exit polling */
919 if (q_vector
->rx
.count
> 1)
920 per_ring_budget
= max(budget
/q_vector
->rx
.count
, 1);
922 per_ring_budget
= budget
;
924 ixgbevf_for_each_ring(ring
, q_vector
->rx
)
925 clean_complete
&= (ixgbevf_clean_rx_irq(q_vector
, ring
,
929 #ifdef CONFIG_NET_RX_BUSY_POLL
930 ixgbevf_qv_unlock_napi(q_vector
);
933 /* If all work not completed, return budget and keep polling */
936 /* all work done, exit the polling mode */
938 if (adapter
->rx_itr_setting
& 1)
939 ixgbevf_set_itr(q_vector
);
940 if (!test_bit(__IXGBEVF_DOWN
, &adapter
->state
) &&
941 !test_bit(__IXGBEVF_REMOVING
, &adapter
->state
))
942 ixgbevf_irq_enable_queues(adapter
,
943 1 << q_vector
->v_idx
);
949 * ixgbevf_write_eitr - write VTEITR register in hardware specific way
950 * @q_vector: structure containing interrupt and ring information
952 void ixgbevf_write_eitr(struct ixgbevf_q_vector
*q_vector
)
954 struct ixgbevf_adapter
*adapter
= q_vector
->adapter
;
955 struct ixgbe_hw
*hw
= &adapter
->hw
;
956 int v_idx
= q_vector
->v_idx
;
957 u32 itr_reg
= q_vector
->itr
& IXGBE_MAX_EITR
;
960 * set the WDIS bit to not clear the timer bits and cause an
961 * immediate assertion of the interrupt
963 itr_reg
|= IXGBE_EITR_CNT_WDIS
;
965 IXGBE_WRITE_REG(hw
, IXGBE_VTEITR(v_idx
), itr_reg
);
968 #ifdef CONFIG_NET_RX_BUSY_POLL
969 /* must be called with local_bh_disable()d */
970 static int ixgbevf_busy_poll_recv(struct napi_struct
*napi
)
972 struct ixgbevf_q_vector
*q_vector
=
973 container_of(napi
, struct ixgbevf_q_vector
, napi
);
974 struct ixgbevf_adapter
*adapter
= q_vector
->adapter
;
975 struct ixgbevf_ring
*ring
;
978 if (test_bit(__IXGBEVF_DOWN
, &adapter
->state
))
979 return LL_FLUSH_FAILED
;
981 if (!ixgbevf_qv_lock_poll(q_vector
))
982 return LL_FLUSH_BUSY
;
984 ixgbevf_for_each_ring(ring
, q_vector
->rx
) {
985 found
= ixgbevf_clean_rx_irq(q_vector
, ring
, 4);
986 #ifdef BP_EXTENDED_STATS
988 ring
->stats
.cleaned
+= found
;
990 ring
->stats
.misses
++;
996 ixgbevf_qv_unlock_poll(q_vector
);
1000 #endif /* CONFIG_NET_RX_BUSY_POLL */
1003 * ixgbevf_configure_msix - Configure MSI-X hardware
1004 * @adapter: board private structure
1006 * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X
1009 static void ixgbevf_configure_msix(struct ixgbevf_adapter
*adapter
)
1011 struct ixgbevf_q_vector
*q_vector
;
1012 int q_vectors
, v_idx
;
1014 q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
1015 adapter
->eims_enable_mask
= 0;
1018 * Populate the IVAR table and set the ITR values to the
1019 * corresponding register.
1021 for (v_idx
= 0; v_idx
< q_vectors
; v_idx
++) {
1022 struct ixgbevf_ring
*ring
;
1023 q_vector
= adapter
->q_vector
[v_idx
];
1025 ixgbevf_for_each_ring(ring
, q_vector
->rx
)
1026 ixgbevf_set_ivar(adapter
, 0, ring
->reg_idx
, v_idx
);
1028 ixgbevf_for_each_ring(ring
, q_vector
->tx
)
1029 ixgbevf_set_ivar(adapter
, 1, ring
->reg_idx
, v_idx
);
1031 if (q_vector
->tx
.ring
&& !q_vector
->rx
.ring
) {
1032 /* tx only vector */
1033 if (adapter
->tx_itr_setting
== 1)
1034 q_vector
->itr
= IXGBE_10K_ITR
;
1036 q_vector
->itr
= adapter
->tx_itr_setting
;
1038 /* rx or rx/tx vector */
1039 if (adapter
->rx_itr_setting
== 1)
1040 q_vector
->itr
= IXGBE_20K_ITR
;
1042 q_vector
->itr
= adapter
->rx_itr_setting
;
1045 /* add q_vector eims value to global eims_enable_mask */
1046 adapter
->eims_enable_mask
|= 1 << v_idx
;
1048 ixgbevf_write_eitr(q_vector
);
1051 ixgbevf_set_ivar(adapter
, -1, 1, v_idx
);
1052 /* setup eims_other and add value to global eims_enable_mask */
1053 adapter
->eims_other
= 1 << v_idx
;
1054 adapter
->eims_enable_mask
|= adapter
->eims_other
;
1057 enum latency_range
{
1061 latency_invalid
= 255
1065 * ixgbevf_update_itr - update the dynamic ITR value based on statistics
1066 * @q_vector: structure containing interrupt and ring information
1067 * @ring_container: structure containing ring performance data
1069 * Stores a new ITR value based on packets and byte
1070 * counts during the last interrupt. The advantage of per interrupt
1071 * computation is faster updates and more accurate ITR for the current
1072 * traffic pattern. Constants in this function were computed
1073 * based on theoretical maximum wire speed and thresholds were set based
1074 * on testing data as well as attempting to minimize response time
1075 * while increasing bulk throughput.
1077 static void ixgbevf_update_itr(struct ixgbevf_q_vector
*q_vector
,
1078 struct ixgbevf_ring_container
*ring_container
)
1080 int bytes
= ring_container
->total_bytes
;
1081 int packets
= ring_container
->total_packets
;
1084 u8 itr_setting
= ring_container
->itr
;
1089 /* simple throttlerate management
1090 * 0-20MB/s lowest (100000 ints/s)
1091 * 20-100MB/s low (20000 ints/s)
1092 * 100-1249MB/s bulk (8000 ints/s)
1094 /* what was last interrupt timeslice? */
1095 timepassed_us
= q_vector
->itr
>> 2;
1096 bytes_perint
= bytes
/ timepassed_us
; /* bytes/usec */
1098 switch (itr_setting
) {
1099 case lowest_latency
:
1100 if (bytes_perint
> 10)
1101 itr_setting
= low_latency
;
1104 if (bytes_perint
> 20)
1105 itr_setting
= bulk_latency
;
1106 else if (bytes_perint
<= 10)
1107 itr_setting
= lowest_latency
;
1110 if (bytes_perint
<= 20)
1111 itr_setting
= low_latency
;
1115 /* clear work counters since we have the values we need */
1116 ring_container
->total_bytes
= 0;
1117 ring_container
->total_packets
= 0;
1119 /* write updated itr to ring container */
1120 ring_container
->itr
= itr_setting
;
1123 static void ixgbevf_set_itr(struct ixgbevf_q_vector
*q_vector
)
1125 u32 new_itr
= q_vector
->itr
;
1128 ixgbevf_update_itr(q_vector
, &q_vector
->tx
);
1129 ixgbevf_update_itr(q_vector
, &q_vector
->rx
);
1131 current_itr
= max(q_vector
->rx
.itr
, q_vector
->tx
.itr
);
1133 switch (current_itr
) {
1134 /* counts and packets in update_itr are dependent on these numbers */
1135 case lowest_latency
:
1136 new_itr
= IXGBE_100K_ITR
;
1139 new_itr
= IXGBE_20K_ITR
;
1143 new_itr
= IXGBE_8K_ITR
;
1147 if (new_itr
!= q_vector
->itr
) {
1148 /* do an exponential smoothing */
1149 new_itr
= (10 * new_itr
* q_vector
->itr
) /
1150 ((9 * new_itr
) + q_vector
->itr
);
1152 /* save the algorithm value here */
1153 q_vector
->itr
= new_itr
;
1155 ixgbevf_write_eitr(q_vector
);
1159 static irqreturn_t
ixgbevf_msix_other(int irq
, void *data
)
1161 struct ixgbevf_adapter
*adapter
= data
;
1162 struct ixgbe_hw
*hw
= &adapter
->hw
;
1164 hw
->mac
.get_link_status
= 1;
1166 if (!test_bit(__IXGBEVF_DOWN
, &adapter
->state
) &&
1167 !test_bit(__IXGBEVF_REMOVING
, &adapter
->state
))
1168 mod_timer(&adapter
->watchdog_timer
, jiffies
);
1170 IXGBE_WRITE_REG(hw
, IXGBE_VTEIMS
, adapter
->eims_other
);
1176 * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues)
1178 * @data: pointer to our q_vector struct for this interrupt vector
1180 static irqreturn_t
ixgbevf_msix_clean_rings(int irq
, void *data
)
1182 struct ixgbevf_q_vector
*q_vector
= data
;
1184 /* EIAM disabled interrupts (on this vector) for us */
1185 if (q_vector
->rx
.ring
|| q_vector
->tx
.ring
)
1186 napi_schedule(&q_vector
->napi
);
1191 static inline void map_vector_to_rxq(struct ixgbevf_adapter
*a
, int v_idx
,
1194 struct ixgbevf_q_vector
*q_vector
= a
->q_vector
[v_idx
];
1196 a
->rx_ring
[r_idx
]->next
= q_vector
->rx
.ring
;
1197 q_vector
->rx
.ring
= a
->rx_ring
[r_idx
];
1198 q_vector
->rx
.count
++;
1201 static inline void map_vector_to_txq(struct ixgbevf_adapter
*a
, int v_idx
,
1204 struct ixgbevf_q_vector
*q_vector
= a
->q_vector
[v_idx
];
1206 a
->tx_ring
[t_idx
]->next
= q_vector
->tx
.ring
;
1207 q_vector
->tx
.ring
= a
->tx_ring
[t_idx
];
1208 q_vector
->tx
.count
++;
1212 * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors
1213 * @adapter: board private structure to initialize
1215 * This function maps descriptor rings to the queue-specific vectors
1216 * we were allotted through the MSI-X enabling code. Ideally, we'd have
1217 * one vector per ring/queue, but on a constrained vector budget, we
1218 * group the rings as "efficiently" as possible. You would add new
1219 * mapping configurations in here.
1221 static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter
*adapter
)
1225 int rxr_idx
= 0, txr_idx
= 0;
1226 int rxr_remaining
= adapter
->num_rx_queues
;
1227 int txr_remaining
= adapter
->num_tx_queues
;
1232 q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
1235 * The ideal configuration...
1236 * We have enough vectors to map one per queue.
1238 if (q_vectors
== adapter
->num_rx_queues
+ adapter
->num_tx_queues
) {
1239 for (; rxr_idx
< rxr_remaining
; v_start
++, rxr_idx
++)
1240 map_vector_to_rxq(adapter
, v_start
, rxr_idx
);
1242 for (; txr_idx
< txr_remaining
; v_start
++, txr_idx
++)
1243 map_vector_to_txq(adapter
, v_start
, txr_idx
);
1248 * If we don't have enough vectors for a 1-to-1
1249 * mapping, we'll have to group them so there are
1250 * multiple queues per vector.
1252 /* Re-adjusting *qpv takes care of the remainder. */
1253 for (i
= v_start
; i
< q_vectors
; i
++) {
1254 rqpv
= DIV_ROUND_UP(rxr_remaining
, q_vectors
- i
);
1255 for (j
= 0; j
< rqpv
; j
++) {
1256 map_vector_to_rxq(adapter
, i
, rxr_idx
);
1261 for (i
= v_start
; i
< q_vectors
; i
++) {
1262 tqpv
= DIV_ROUND_UP(txr_remaining
, q_vectors
- i
);
1263 for (j
= 0; j
< tqpv
; j
++) {
1264 map_vector_to_txq(adapter
, i
, txr_idx
);
1275 * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
1276 * @adapter: board private structure
1278 * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests
1279 * interrupts from the kernel.
1281 static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter
*adapter
)
1283 struct net_device
*netdev
= adapter
->netdev
;
1284 int q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
1288 for (vector
= 0; vector
< q_vectors
; vector
++) {
1289 struct ixgbevf_q_vector
*q_vector
= adapter
->q_vector
[vector
];
1290 struct msix_entry
*entry
= &adapter
->msix_entries
[vector
];
1292 if (q_vector
->tx
.ring
&& q_vector
->rx
.ring
) {
1293 snprintf(q_vector
->name
, sizeof(q_vector
->name
) - 1,
1294 "%s-%s-%d", netdev
->name
, "TxRx", ri
++);
1296 } else if (q_vector
->rx
.ring
) {
1297 snprintf(q_vector
->name
, sizeof(q_vector
->name
) - 1,
1298 "%s-%s-%d", netdev
->name
, "rx", ri
++);
1299 } else if (q_vector
->tx
.ring
) {
1300 snprintf(q_vector
->name
, sizeof(q_vector
->name
) - 1,
1301 "%s-%s-%d", netdev
->name
, "tx", ti
++);
1303 /* skip this unused q_vector */
1306 err
= request_irq(entry
->vector
, &ixgbevf_msix_clean_rings
, 0,
1307 q_vector
->name
, q_vector
);
1309 hw_dbg(&adapter
->hw
,
1310 "request_irq failed for MSIX interrupt "
1311 "Error: %d\n", err
);
1312 goto free_queue_irqs
;
1316 err
= request_irq(adapter
->msix_entries
[vector
].vector
,
1317 &ixgbevf_msix_other
, 0, netdev
->name
, adapter
);
1319 hw_dbg(&adapter
->hw
,
1320 "request_irq for msix_other failed: %d\n", err
);
1321 goto free_queue_irqs
;
1329 free_irq(adapter
->msix_entries
[vector
].vector
,
1330 adapter
->q_vector
[vector
]);
1332 /* This failure is non-recoverable - it indicates the system is
1333 * out of MSIX vector resources and the VF driver cannot run
1334 * without them. Set the number of msix vectors to zero
1335 * indicating that not enough can be allocated. The error
1336 * will be returned to the user indicating device open failed.
1337 * Any further attempts to force the driver to open will also
1338 * fail. The only way to recover is to unload the driver and
1339 * reload it again. If the system has recovered some MSIX
1340 * vectors then it may succeed.
1342 adapter
->num_msix_vectors
= 0;
1346 static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter
*adapter
)
1348 int i
, q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
1350 for (i
= 0; i
< q_vectors
; i
++) {
1351 struct ixgbevf_q_vector
*q_vector
= adapter
->q_vector
[i
];
1352 q_vector
->rx
.ring
= NULL
;
1353 q_vector
->tx
.ring
= NULL
;
1354 q_vector
->rx
.count
= 0;
1355 q_vector
->tx
.count
= 0;
1360 * ixgbevf_request_irq - initialize interrupts
1361 * @adapter: board private structure
1363 * Attempts to configure interrupts using the best available
1364 * capabilities of the hardware and kernel.
1366 static int ixgbevf_request_irq(struct ixgbevf_adapter
*adapter
)
1370 err
= ixgbevf_request_msix_irqs(adapter
);
1373 hw_dbg(&adapter
->hw
,
1374 "request_irq failed, Error %d\n", err
);
1379 static void ixgbevf_free_irq(struct ixgbevf_adapter
*adapter
)
1383 q_vectors
= adapter
->num_msix_vectors
;
1386 free_irq(adapter
->msix_entries
[i
].vector
, adapter
);
1389 for (; i
>= 0; i
--) {
1390 /* free only the irqs that were actually requested */
1391 if (!adapter
->q_vector
[i
]->rx
.ring
&&
1392 !adapter
->q_vector
[i
]->tx
.ring
)
1395 free_irq(adapter
->msix_entries
[i
].vector
,
1396 adapter
->q_vector
[i
]);
1399 ixgbevf_reset_q_vectors(adapter
);
1403 * ixgbevf_irq_disable - Mask off interrupt generation on the NIC
1404 * @adapter: board private structure
1406 static inline void ixgbevf_irq_disable(struct ixgbevf_adapter
*adapter
)
1408 struct ixgbe_hw
*hw
= &adapter
->hw
;
1411 IXGBE_WRITE_REG(hw
, IXGBE_VTEIAM
, 0);
1412 IXGBE_WRITE_REG(hw
, IXGBE_VTEIMC
, ~0);
1413 IXGBE_WRITE_REG(hw
, IXGBE_VTEIAC
, 0);
1415 IXGBE_WRITE_FLUSH(hw
);
1417 for (i
= 0; i
< adapter
->num_msix_vectors
; i
++)
1418 synchronize_irq(adapter
->msix_entries
[i
].vector
);
1422 * ixgbevf_irq_enable - Enable default interrupt generation settings
1423 * @adapter: board private structure
1425 static inline void ixgbevf_irq_enable(struct ixgbevf_adapter
*adapter
)
1427 struct ixgbe_hw
*hw
= &adapter
->hw
;
1429 IXGBE_WRITE_REG(hw
, IXGBE_VTEIAM
, adapter
->eims_enable_mask
);
1430 IXGBE_WRITE_REG(hw
, IXGBE_VTEIAC
, adapter
->eims_enable_mask
);
1431 IXGBE_WRITE_REG(hw
, IXGBE_VTEIMS
, adapter
->eims_enable_mask
);
1435 * ixgbevf_configure_tx_ring - Configure 82599 VF Tx ring after Reset
1436 * @adapter: board private structure
1437 * @ring: structure containing ring specific data
1439 * Configure the Tx descriptor ring after a reset.
1441 static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter
*adapter
,
1442 struct ixgbevf_ring
*ring
)
1444 struct ixgbe_hw
*hw
= &adapter
->hw
;
1445 u64 tdba
= ring
->dma
;
1447 u32 txdctl
= IXGBE_TXDCTL_ENABLE
;
1448 u8 reg_idx
= ring
->reg_idx
;
1450 /* disable queue to avoid issues while updating state */
1451 IXGBE_WRITE_REG(hw
, IXGBE_VFTXDCTL(reg_idx
), IXGBE_TXDCTL_SWFLSH
);
1452 IXGBE_WRITE_FLUSH(hw
);
1454 IXGBE_WRITE_REG(hw
, IXGBE_VFTDBAL(reg_idx
), tdba
& DMA_BIT_MASK(32));
1455 IXGBE_WRITE_REG(hw
, IXGBE_VFTDBAH(reg_idx
), tdba
>> 32);
1456 IXGBE_WRITE_REG(hw
, IXGBE_VFTDLEN(reg_idx
),
1457 ring
->count
* sizeof(union ixgbe_adv_tx_desc
));
1459 /* disable head writeback */
1460 IXGBE_WRITE_REG(hw
, IXGBE_VFTDWBAH(reg_idx
), 0);
1461 IXGBE_WRITE_REG(hw
, IXGBE_VFTDWBAL(reg_idx
), 0);
1463 /* enable relaxed ordering */
1464 IXGBE_WRITE_REG(hw
, IXGBE_VFDCA_TXCTRL(reg_idx
),
1465 (IXGBE_DCA_TXCTRL_DESC_RRO_EN
|
1466 IXGBE_DCA_TXCTRL_DATA_RRO_EN
));
1468 /* reset head and tail pointers */
1469 IXGBE_WRITE_REG(hw
, IXGBE_VFTDH(reg_idx
), 0);
1470 IXGBE_WRITE_REG(hw
, IXGBE_VFTDT(reg_idx
), 0);
1471 ring
->tail
= adapter
->io_addr
+ IXGBE_VFTDT(reg_idx
);
1473 /* reset ntu and ntc to place SW in sync with hardwdare */
1474 ring
->next_to_clean
= 0;
1475 ring
->next_to_use
= 0;
1477 /* In order to avoid issues WTHRESH + PTHRESH should always be equal
1478 * to or less than the number of on chip descriptors, which is
1481 txdctl
|= (8 << 16); /* WTHRESH = 8 */
1483 /* Setting PTHRESH to 32 both improves performance */
1484 txdctl
|= (1 << 8) | /* HTHRESH = 1 */
1485 32; /* PTHRESH = 32 */
1487 IXGBE_WRITE_REG(hw
, IXGBE_VFTXDCTL(reg_idx
), txdctl
);
1489 /* poll to verify queue is enabled */
1491 usleep_range(1000, 2000);
1492 txdctl
= IXGBE_READ_REG(hw
, IXGBE_VFTXDCTL(reg_idx
));
1493 } while (--wait_loop
&& !(txdctl
& IXGBE_TXDCTL_ENABLE
));
1495 pr_err("Could not enable Tx Queue %d\n", reg_idx
);
1499 * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
1500 * @adapter: board private structure
1502 * Configure the Tx unit of the MAC after a reset.
1504 static void ixgbevf_configure_tx(struct ixgbevf_adapter
*adapter
)
1508 /* Setup the HW Tx Head and Tail descriptor pointers */
1509 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
1510 ixgbevf_configure_tx_ring(adapter
, adapter
->tx_ring
[i
]);
1513 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1515 static void ixgbevf_configure_srrctl(struct ixgbevf_adapter
*adapter
, int index
)
1517 struct ixgbe_hw
*hw
= &adapter
->hw
;
1520 srrctl
= IXGBE_SRRCTL_DROP_EN
;
1522 srrctl
|= IXGBEVF_RX_HDR_SIZE
<< IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT
;
1523 srrctl
|= IXGBEVF_RX_BUFSZ
>> IXGBE_SRRCTL_BSIZEPKT_SHIFT
;
1524 srrctl
|= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF
;
1526 IXGBE_WRITE_REG(hw
, IXGBE_VFSRRCTL(index
), srrctl
);
1529 static void ixgbevf_setup_psrtype(struct ixgbevf_adapter
*adapter
)
1531 struct ixgbe_hw
*hw
= &adapter
->hw
;
1533 /* PSRTYPE must be initialized in 82599 */
1534 u32 psrtype
= IXGBE_PSRTYPE_TCPHDR
| IXGBE_PSRTYPE_UDPHDR
|
1535 IXGBE_PSRTYPE_IPV4HDR
| IXGBE_PSRTYPE_IPV6HDR
|
1536 IXGBE_PSRTYPE_L2HDR
;
1538 if (adapter
->num_rx_queues
> 1)
1541 IXGBE_WRITE_REG(hw
, IXGBE_VFPSRTYPE
, psrtype
);
1544 #define IXGBEVF_MAX_RX_DESC_POLL 10
1545 static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter
*adapter
,
1546 struct ixgbevf_ring
*ring
)
1548 struct ixgbe_hw
*hw
= &adapter
->hw
;
1549 int wait_loop
= IXGBEVF_MAX_RX_DESC_POLL
;
1551 u8 reg_idx
= ring
->reg_idx
;
1553 if (IXGBE_REMOVED(hw
->hw_addr
))
1555 rxdctl
= IXGBE_READ_REG(hw
, IXGBE_VFRXDCTL(reg_idx
));
1556 rxdctl
&= ~IXGBE_RXDCTL_ENABLE
;
1558 /* write value back with RXDCTL.ENABLE bit cleared */
1559 IXGBE_WRITE_REG(hw
, IXGBE_VFRXDCTL(reg_idx
), rxdctl
);
1561 /* the hardware may take up to 100us to really disable the rx queue */
1564 rxdctl
= IXGBE_READ_REG(hw
, IXGBE_VFRXDCTL(reg_idx
));
1565 } while (--wait_loop
&& (rxdctl
& IXGBE_RXDCTL_ENABLE
));
1568 pr_err("RXDCTL.ENABLE queue %d not cleared while polling\n",
1572 static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter
*adapter
,
1573 struct ixgbevf_ring
*ring
)
1575 struct ixgbe_hw
*hw
= &adapter
->hw
;
1576 int wait_loop
= IXGBEVF_MAX_RX_DESC_POLL
;
1578 u8 reg_idx
= ring
->reg_idx
;
1580 if (IXGBE_REMOVED(hw
->hw_addr
))
1583 usleep_range(1000, 2000);
1584 rxdctl
= IXGBE_READ_REG(hw
, IXGBE_VFRXDCTL(reg_idx
));
1585 } while (--wait_loop
&& !(rxdctl
& IXGBE_RXDCTL_ENABLE
));
1588 pr_err("RXDCTL.ENABLE queue %d not set while polling\n",
1592 static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter
*adapter
,
1593 struct ixgbevf_ring
*ring
)
1595 struct ixgbe_hw
*hw
= &adapter
->hw
;
1596 u64 rdba
= ring
->dma
;
1598 u8 reg_idx
= ring
->reg_idx
;
1600 /* disable queue to avoid issues while updating state */
1601 rxdctl
= IXGBE_READ_REG(hw
, IXGBE_VFRXDCTL(reg_idx
));
1602 ixgbevf_disable_rx_queue(adapter
, ring
);
1604 IXGBE_WRITE_REG(hw
, IXGBE_VFRDBAL(reg_idx
), rdba
& DMA_BIT_MASK(32));
1605 IXGBE_WRITE_REG(hw
, IXGBE_VFRDBAH(reg_idx
), rdba
>> 32);
1606 IXGBE_WRITE_REG(hw
, IXGBE_VFRDLEN(reg_idx
),
1607 ring
->count
* sizeof(union ixgbe_adv_rx_desc
));
1609 /* enable relaxed ordering */
1610 IXGBE_WRITE_REG(hw
, IXGBE_VFDCA_RXCTRL(reg_idx
),
1611 IXGBE_DCA_RXCTRL_DESC_RRO_EN
);
1613 /* reset head and tail pointers */
1614 IXGBE_WRITE_REG(hw
, IXGBE_VFRDH(reg_idx
), 0);
1615 IXGBE_WRITE_REG(hw
, IXGBE_VFRDT(reg_idx
), 0);
1616 ring
->tail
= adapter
->io_addr
+ IXGBE_VFRDT(reg_idx
);
1618 /* reset ntu and ntc to place SW in sync with hardwdare */
1619 ring
->next_to_clean
= 0;
1620 ring
->next_to_use
= 0;
1621 ring
->next_to_alloc
= 0;
1623 ixgbevf_configure_srrctl(adapter
, reg_idx
);
1625 /* allow any size packet since we can handle overflow */
1626 rxdctl
&= ~IXGBE_RXDCTL_RLPML_EN
;
1628 rxdctl
|= IXGBE_RXDCTL_ENABLE
| IXGBE_RXDCTL_VME
;
1629 IXGBE_WRITE_REG(hw
, IXGBE_VFRXDCTL(reg_idx
), rxdctl
);
1631 ixgbevf_rx_desc_queue_enable(adapter
, ring
);
1632 ixgbevf_alloc_rx_buffers(ring
, ixgbevf_desc_unused(ring
));
1636 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
1637 * @adapter: board private structure
1639 * Configure the Rx unit of the MAC after a reset.
1641 static void ixgbevf_configure_rx(struct ixgbevf_adapter
*adapter
)
1644 struct ixgbe_hw
*hw
= &adapter
->hw
;
1645 struct net_device
*netdev
= adapter
->netdev
;
1647 ixgbevf_setup_psrtype(adapter
);
1649 /* notify the PF of our intent to use this size of frame */
1650 ixgbevf_rlpml_set_vf(hw
, netdev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
);
1652 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1653 * the Base and Length of the Rx Descriptor Ring */
1654 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
1655 ixgbevf_configure_rx_ring(adapter
, adapter
->rx_ring
[i
]);
1658 static int ixgbevf_vlan_rx_add_vid(struct net_device
*netdev
,
1659 __be16 proto
, u16 vid
)
1661 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
1662 struct ixgbe_hw
*hw
= &adapter
->hw
;
1665 spin_lock_bh(&adapter
->mbx_lock
);
1667 /* add VID to filter table */
1668 err
= hw
->mac
.ops
.set_vfta(hw
, vid
, 0, true);
1670 spin_unlock_bh(&adapter
->mbx_lock
);
1672 /* translate error return types so error makes sense */
1673 if (err
== IXGBE_ERR_MBX
)
1676 if (err
== IXGBE_ERR_INVALID_ARGUMENT
)
1679 set_bit(vid
, adapter
->active_vlans
);
1684 static int ixgbevf_vlan_rx_kill_vid(struct net_device
*netdev
,
1685 __be16 proto
, u16 vid
)
1687 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
1688 struct ixgbe_hw
*hw
= &adapter
->hw
;
1689 int err
= -EOPNOTSUPP
;
1691 spin_lock_bh(&adapter
->mbx_lock
);
1693 /* remove VID from filter table */
1694 err
= hw
->mac
.ops
.set_vfta(hw
, vid
, 0, false);
1696 spin_unlock_bh(&adapter
->mbx_lock
);
1698 clear_bit(vid
, adapter
->active_vlans
);
1703 static void ixgbevf_restore_vlan(struct ixgbevf_adapter
*adapter
)
1707 for_each_set_bit(vid
, adapter
->active_vlans
, VLAN_N_VID
)
1708 ixgbevf_vlan_rx_add_vid(adapter
->netdev
,
1709 htons(ETH_P_8021Q
), vid
);
1712 static int ixgbevf_write_uc_addr_list(struct net_device
*netdev
)
1714 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
1715 struct ixgbe_hw
*hw
= &adapter
->hw
;
1718 if ((netdev_uc_count(netdev
)) > 10) {
1719 pr_err("Too many unicast filters - No Space\n");
1723 if (!netdev_uc_empty(netdev
)) {
1724 struct netdev_hw_addr
*ha
;
1725 netdev_for_each_uc_addr(ha
, netdev
) {
1726 hw
->mac
.ops
.set_uc_addr(hw
, ++count
, ha
->addr
);
1731 * If the list is empty then send message to PF driver to
1732 * clear all macvlans on this VF.
1734 hw
->mac
.ops
.set_uc_addr(hw
, 0, NULL
);
1741 * ixgbevf_set_rx_mode - Multicast and unicast set
1742 * @netdev: network interface device structure
1744 * The set_rx_method entry point is called whenever the multicast address
1745 * list, unicast address list or the network interface flags are updated.
1746 * This routine is responsible for configuring the hardware for proper
1747 * multicast mode and configuring requested unicast filters.
1749 static void ixgbevf_set_rx_mode(struct net_device
*netdev
)
1751 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
1752 struct ixgbe_hw
*hw
= &adapter
->hw
;
1754 spin_lock_bh(&adapter
->mbx_lock
);
1756 /* reprogram multicast list */
1757 hw
->mac
.ops
.update_mc_addr_list(hw
, netdev
);
1759 ixgbevf_write_uc_addr_list(netdev
);
1761 spin_unlock_bh(&adapter
->mbx_lock
);
1764 static void ixgbevf_napi_enable_all(struct ixgbevf_adapter
*adapter
)
1767 struct ixgbevf_q_vector
*q_vector
;
1768 int q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
1770 for (q_idx
= 0; q_idx
< q_vectors
; q_idx
++) {
1771 q_vector
= adapter
->q_vector
[q_idx
];
1772 #ifdef CONFIG_NET_RX_BUSY_POLL
1773 ixgbevf_qv_init_lock(adapter
->q_vector
[q_idx
]);
1775 napi_enable(&q_vector
->napi
);
1779 static void ixgbevf_napi_disable_all(struct ixgbevf_adapter
*adapter
)
1782 struct ixgbevf_q_vector
*q_vector
;
1783 int q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
1785 for (q_idx
= 0; q_idx
< q_vectors
; q_idx
++) {
1786 q_vector
= adapter
->q_vector
[q_idx
];
1787 napi_disable(&q_vector
->napi
);
1788 #ifdef CONFIG_NET_RX_BUSY_POLL
1789 while (!ixgbevf_qv_disable(adapter
->q_vector
[q_idx
])) {
1790 pr_info("QV %d locked\n", q_idx
);
1791 usleep_range(1000, 20000);
1793 #endif /* CONFIG_NET_RX_BUSY_POLL */
1797 static int ixgbevf_configure_dcb(struct ixgbevf_adapter
*adapter
)
1799 struct ixgbe_hw
*hw
= &adapter
->hw
;
1800 unsigned int def_q
= 0;
1801 unsigned int num_tcs
= 0;
1802 unsigned int num_rx_queues
= 1;
1805 spin_lock_bh(&adapter
->mbx_lock
);
1807 /* fetch queue configuration from the PF */
1808 err
= ixgbevf_get_queues(hw
, &num_tcs
, &def_q
);
1810 spin_unlock_bh(&adapter
->mbx_lock
);
1816 /* update default Tx ring register index */
1817 adapter
->tx_ring
[0]->reg_idx
= def_q
;
1819 /* we need as many queues as traffic classes */
1820 num_rx_queues
= num_tcs
;
1823 /* if we have a bad config abort request queue reset */
1824 if (adapter
->num_rx_queues
!= num_rx_queues
) {
1825 /* force mailbox timeout to prevent further messages */
1826 hw
->mbx
.timeout
= 0;
1828 /* wait for watchdog to come around and bail us out */
1829 adapter
->flags
|= IXGBEVF_FLAG_QUEUE_RESET_REQUESTED
;
1835 static void ixgbevf_configure(struct ixgbevf_adapter
*adapter
)
1837 ixgbevf_configure_dcb(adapter
);
1839 ixgbevf_set_rx_mode(adapter
->netdev
);
1841 ixgbevf_restore_vlan(adapter
);
1843 ixgbevf_configure_tx(adapter
);
1844 ixgbevf_configure_rx(adapter
);
1847 static void ixgbevf_save_reset_stats(struct ixgbevf_adapter
*adapter
)
1849 /* Only save pre-reset stats if there are some */
1850 if (adapter
->stats
.vfgprc
|| adapter
->stats
.vfgptc
) {
1851 adapter
->stats
.saved_reset_vfgprc
+= adapter
->stats
.vfgprc
-
1852 adapter
->stats
.base_vfgprc
;
1853 adapter
->stats
.saved_reset_vfgptc
+= adapter
->stats
.vfgptc
-
1854 adapter
->stats
.base_vfgptc
;
1855 adapter
->stats
.saved_reset_vfgorc
+= adapter
->stats
.vfgorc
-
1856 adapter
->stats
.base_vfgorc
;
1857 adapter
->stats
.saved_reset_vfgotc
+= adapter
->stats
.vfgotc
-
1858 adapter
->stats
.base_vfgotc
;
1859 adapter
->stats
.saved_reset_vfmprc
+= adapter
->stats
.vfmprc
-
1860 adapter
->stats
.base_vfmprc
;
1864 static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter
*adapter
)
1866 struct ixgbe_hw
*hw
= &adapter
->hw
;
1868 adapter
->stats
.last_vfgprc
= IXGBE_READ_REG(hw
, IXGBE_VFGPRC
);
1869 adapter
->stats
.last_vfgorc
= IXGBE_READ_REG(hw
, IXGBE_VFGORC_LSB
);
1870 adapter
->stats
.last_vfgorc
|=
1871 (((u64
)(IXGBE_READ_REG(hw
, IXGBE_VFGORC_MSB
))) << 32);
1872 adapter
->stats
.last_vfgptc
= IXGBE_READ_REG(hw
, IXGBE_VFGPTC
);
1873 adapter
->stats
.last_vfgotc
= IXGBE_READ_REG(hw
, IXGBE_VFGOTC_LSB
);
1874 adapter
->stats
.last_vfgotc
|=
1875 (((u64
)(IXGBE_READ_REG(hw
, IXGBE_VFGOTC_MSB
))) << 32);
1876 adapter
->stats
.last_vfmprc
= IXGBE_READ_REG(hw
, IXGBE_VFMPRC
);
1878 adapter
->stats
.base_vfgprc
= adapter
->stats
.last_vfgprc
;
1879 adapter
->stats
.base_vfgorc
= adapter
->stats
.last_vfgorc
;
1880 adapter
->stats
.base_vfgptc
= adapter
->stats
.last_vfgptc
;
1881 adapter
->stats
.base_vfgotc
= adapter
->stats
.last_vfgotc
;
1882 adapter
->stats
.base_vfmprc
= adapter
->stats
.last_vfmprc
;
1885 static void ixgbevf_negotiate_api(struct ixgbevf_adapter
*adapter
)
1887 struct ixgbe_hw
*hw
= &adapter
->hw
;
1888 int api
[] = { ixgbe_mbox_api_11
,
1890 ixgbe_mbox_api_unknown
};
1891 int err
= 0, idx
= 0;
1893 spin_lock_bh(&adapter
->mbx_lock
);
1895 while (api
[idx
] != ixgbe_mbox_api_unknown
) {
1896 err
= ixgbevf_negotiate_api_version(hw
, api
[idx
]);
1902 spin_unlock_bh(&adapter
->mbx_lock
);
1905 static void ixgbevf_up_complete(struct ixgbevf_adapter
*adapter
)
1907 struct net_device
*netdev
= adapter
->netdev
;
1908 struct ixgbe_hw
*hw
= &adapter
->hw
;
1910 ixgbevf_configure_msix(adapter
);
1912 spin_lock_bh(&adapter
->mbx_lock
);
1914 if (is_valid_ether_addr(hw
->mac
.addr
))
1915 hw
->mac
.ops
.set_rar(hw
, 0, hw
->mac
.addr
, 0);
1917 hw
->mac
.ops
.set_rar(hw
, 0, hw
->mac
.perm_addr
, 0);
1919 spin_unlock_bh(&adapter
->mbx_lock
);
1921 smp_mb__before_atomic();
1922 clear_bit(__IXGBEVF_DOWN
, &adapter
->state
);
1923 ixgbevf_napi_enable_all(adapter
);
1925 /* enable transmits */
1926 netif_tx_start_all_queues(netdev
);
1928 ixgbevf_save_reset_stats(adapter
);
1929 ixgbevf_init_last_counter_stats(adapter
);
1931 hw
->mac
.get_link_status
= 1;
1932 mod_timer(&adapter
->watchdog_timer
, jiffies
);
1935 void ixgbevf_up(struct ixgbevf_adapter
*adapter
)
1937 struct ixgbe_hw
*hw
= &adapter
->hw
;
1939 ixgbevf_configure(adapter
);
1941 ixgbevf_up_complete(adapter
);
1943 /* clear any pending interrupts, may auto mask */
1944 IXGBE_READ_REG(hw
, IXGBE_VTEICR
);
1946 ixgbevf_irq_enable(adapter
);
1950 * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
1951 * @rx_ring: ring to free buffers from
1953 static void ixgbevf_clean_rx_ring(struct ixgbevf_ring
*rx_ring
)
1955 struct device
*dev
= rx_ring
->dev
;
1959 /* Free Rx ring sk_buff */
1961 dev_kfree_skb(rx_ring
->skb
);
1962 rx_ring
->skb
= NULL
;
1965 /* ring already cleared, nothing to do */
1966 if (!rx_ring
->rx_buffer_info
)
1969 /* Free all the Rx ring pages */
1970 for (i
= 0; i
< rx_ring
->count
; i
++) {
1971 struct ixgbevf_rx_buffer
*rx_buffer
;
1973 rx_buffer
= &rx_ring
->rx_buffer_info
[i
];
1975 dma_unmap_page(dev
, rx_buffer
->dma
,
1976 PAGE_SIZE
, DMA_FROM_DEVICE
);
1978 if (rx_buffer
->page
)
1979 __free_page(rx_buffer
->page
);
1980 rx_buffer
->page
= NULL
;
1983 size
= sizeof(struct ixgbevf_rx_buffer
) * rx_ring
->count
;
1984 memset(rx_ring
->rx_buffer_info
, 0, size
);
1986 /* Zero out the descriptor ring */
1987 memset(rx_ring
->desc
, 0, rx_ring
->size
);
1991 * ixgbevf_clean_tx_ring - Free Tx Buffers
1992 * @tx_ring: ring to be cleaned
1994 static void ixgbevf_clean_tx_ring(struct ixgbevf_ring
*tx_ring
)
1996 struct ixgbevf_tx_buffer
*tx_buffer_info
;
2000 if (!tx_ring
->tx_buffer_info
)
2003 /* Free all the Tx ring sk_buffs */
2004 for (i
= 0; i
< tx_ring
->count
; i
++) {
2005 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
2006 ixgbevf_unmap_and_free_tx_resource(tx_ring
, tx_buffer_info
);
2009 size
= sizeof(struct ixgbevf_tx_buffer
) * tx_ring
->count
;
2010 memset(tx_ring
->tx_buffer_info
, 0, size
);
2012 memset(tx_ring
->desc
, 0, tx_ring
->size
);
2016 * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues
2017 * @adapter: board private structure
2019 static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter
*adapter
)
2023 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
2024 ixgbevf_clean_rx_ring(adapter
->rx_ring
[i
]);
2028 * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues
2029 * @adapter: board private structure
2031 static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter
*adapter
)
2035 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
2036 ixgbevf_clean_tx_ring(adapter
->tx_ring
[i
]);
2039 void ixgbevf_down(struct ixgbevf_adapter
*adapter
)
2041 struct net_device
*netdev
= adapter
->netdev
;
2042 struct ixgbe_hw
*hw
= &adapter
->hw
;
2045 /* signal that we are down to the interrupt handler */
2046 if (test_and_set_bit(__IXGBEVF_DOWN
, &adapter
->state
))
2047 return; /* do nothing if already down */
2049 /* disable all enabled rx queues */
2050 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
2051 ixgbevf_disable_rx_queue(adapter
, adapter
->rx_ring
[i
]);
2053 netif_tx_disable(netdev
);
2057 netif_tx_stop_all_queues(netdev
);
2059 ixgbevf_irq_disable(adapter
);
2061 ixgbevf_napi_disable_all(adapter
);
2063 del_timer_sync(&adapter
->watchdog_timer
);
2064 /* can't call flush scheduled work here because it can deadlock
2065 * if linkwatch_event tries to acquire the rtnl_lock which we are
2067 while (adapter
->flags
& IXGBE_FLAG_IN_WATCHDOG_TASK
)
2070 /* disable transmits in the hardware now that interrupts are off */
2071 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
2072 u8 reg_idx
= adapter
->tx_ring
[i
]->reg_idx
;
2074 IXGBE_WRITE_REG(hw
, IXGBE_VFTXDCTL(reg_idx
),
2075 IXGBE_TXDCTL_SWFLSH
);
2078 netif_carrier_off(netdev
);
2080 if (!pci_channel_offline(adapter
->pdev
))
2081 ixgbevf_reset(adapter
);
2083 ixgbevf_clean_all_tx_rings(adapter
);
2084 ixgbevf_clean_all_rx_rings(adapter
);
2087 void ixgbevf_reinit_locked(struct ixgbevf_adapter
*adapter
)
2089 WARN_ON(in_interrupt());
2091 while (test_and_set_bit(__IXGBEVF_RESETTING
, &adapter
->state
))
2094 ixgbevf_down(adapter
);
2095 ixgbevf_up(adapter
);
2097 clear_bit(__IXGBEVF_RESETTING
, &adapter
->state
);
2100 void ixgbevf_reset(struct ixgbevf_adapter
*adapter
)
2102 struct ixgbe_hw
*hw
= &adapter
->hw
;
2103 struct net_device
*netdev
= adapter
->netdev
;
2105 if (hw
->mac
.ops
.reset_hw(hw
)) {
2106 hw_dbg(hw
, "PF still resetting\n");
2108 hw
->mac
.ops
.init_hw(hw
);
2109 ixgbevf_negotiate_api(adapter
);
2112 if (is_valid_ether_addr(adapter
->hw
.mac
.addr
)) {
2113 memcpy(netdev
->dev_addr
, adapter
->hw
.mac
.addr
,
2115 memcpy(netdev
->perm_addr
, adapter
->hw
.mac
.addr
,
2120 static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter
*adapter
,
2123 int vector_threshold
;
2125 /* We'll want at least 2 (vector_threshold):
2126 * 1) TxQ[0] + RxQ[0] handler
2127 * 2) Other (Link Status Change, etc.)
2129 vector_threshold
= MIN_MSIX_COUNT
;
2131 /* The more we get, the more we will assign to Tx/Rx Cleanup
2132 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
2133 * Right now, we simply care about how many we'll get; we'll
2134 * set them up later while requesting irq's.
2136 vectors
= pci_enable_msix_range(adapter
->pdev
, adapter
->msix_entries
,
2137 vector_threshold
, vectors
);
2140 dev_err(&adapter
->pdev
->dev
,
2141 "Unable to allocate MSI-X interrupts\n");
2142 kfree(adapter
->msix_entries
);
2143 adapter
->msix_entries
= NULL
;
2147 /* Adjust for only the vectors we'll use, which is minimum
2148 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
2149 * vectors we were allocated.
2151 adapter
->num_msix_vectors
= vectors
;
2157 * ixgbevf_set_num_queues - Allocate queues for device, feature dependent
2158 * @adapter: board private structure to initialize
2160 * This is the top level queue allocation routine. The order here is very
2161 * important, starting with the "most" number of features turned on at once,
2162 * and ending with the smallest set of features. This way large combinations
2163 * can be allocated if they're turned on, and smaller combinations are the
2164 * fallthrough conditions.
2167 static void ixgbevf_set_num_queues(struct ixgbevf_adapter
*adapter
)
2169 struct ixgbe_hw
*hw
= &adapter
->hw
;
2170 unsigned int def_q
= 0;
2171 unsigned int num_tcs
= 0;
2174 /* Start with base case */
2175 adapter
->num_rx_queues
= 1;
2176 adapter
->num_tx_queues
= 1;
2178 spin_lock_bh(&adapter
->mbx_lock
);
2180 /* fetch queue configuration from the PF */
2181 err
= ixgbevf_get_queues(hw
, &num_tcs
, &def_q
);
2183 spin_unlock_bh(&adapter
->mbx_lock
);
2188 /* we need as many queues as traffic classes */
2190 adapter
->num_rx_queues
= num_tcs
;
2194 * ixgbevf_alloc_queues - Allocate memory for all rings
2195 * @adapter: board private structure to initialize
2197 * We allocate one ring per queue at run-time since we don't know the
2198 * number of queues at compile-time. The polling_netdev array is
2199 * intended for Multiqueue, but should work fine with a single queue.
2201 static int ixgbevf_alloc_queues(struct ixgbevf_adapter
*adapter
)
2203 struct ixgbevf_ring
*ring
;
2206 for (; tx
< adapter
->num_tx_queues
; tx
++) {
2207 ring
= kzalloc(sizeof(*ring
), GFP_KERNEL
);
2209 goto err_allocation
;
2211 ring
->dev
= &adapter
->pdev
->dev
;
2212 ring
->netdev
= adapter
->netdev
;
2213 ring
->count
= adapter
->tx_ring_count
;
2214 ring
->queue_index
= tx
;
2217 adapter
->tx_ring
[tx
] = ring
;
2220 for (; rx
< adapter
->num_rx_queues
; rx
++) {
2221 ring
= kzalloc(sizeof(*ring
), GFP_KERNEL
);
2223 goto err_allocation
;
2225 ring
->dev
= &adapter
->pdev
->dev
;
2226 ring
->netdev
= adapter
->netdev
;
2228 ring
->count
= adapter
->rx_ring_count
;
2229 ring
->queue_index
= rx
;
2232 adapter
->rx_ring
[rx
] = ring
;
2239 kfree(adapter
->tx_ring
[--tx
]);
2240 adapter
->tx_ring
[tx
] = NULL
;
2244 kfree(adapter
->rx_ring
[--rx
]);
2245 adapter
->rx_ring
[rx
] = NULL
;
2251 * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
2252 * @adapter: board private structure to initialize
2254 * Attempt to configure the interrupts using the best available
2255 * capabilities of the hardware and the kernel.
2257 static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter
*adapter
)
2259 struct net_device
*netdev
= adapter
->netdev
;
2261 int vector
, v_budget
;
2264 * It's easy to be greedy for MSI-X vectors, but it really
2265 * doesn't do us much good if we have a lot more vectors
2266 * than CPU's. So let's be conservative and only ask for
2267 * (roughly) the same number of vectors as there are CPU's.
2268 * The default is to use pairs of vectors.
2270 v_budget
= max(adapter
->num_rx_queues
, adapter
->num_tx_queues
);
2271 v_budget
= min_t(int, v_budget
, num_online_cpus());
2272 v_budget
+= NON_Q_VECTORS
;
2274 /* A failure in MSI-X entry allocation isn't fatal, but it does
2275 * mean we disable MSI-X capabilities of the adapter. */
2276 adapter
->msix_entries
= kcalloc(v_budget
,
2277 sizeof(struct msix_entry
), GFP_KERNEL
);
2278 if (!adapter
->msix_entries
) {
2283 for (vector
= 0; vector
< v_budget
; vector
++)
2284 adapter
->msix_entries
[vector
].entry
= vector
;
2286 err
= ixgbevf_acquire_msix_vectors(adapter
, v_budget
);
2290 err
= netif_set_real_num_tx_queues(netdev
, adapter
->num_tx_queues
);
2294 err
= netif_set_real_num_rx_queues(netdev
, adapter
->num_rx_queues
);
2301 * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors
2302 * @adapter: board private structure to initialize
2304 * We allocate one q_vector per queue interrupt. If allocation fails we
2307 static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter
*adapter
)
2309 int q_idx
, num_q_vectors
;
2310 struct ixgbevf_q_vector
*q_vector
;
2312 num_q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
2314 for (q_idx
= 0; q_idx
< num_q_vectors
; q_idx
++) {
2315 q_vector
= kzalloc(sizeof(struct ixgbevf_q_vector
), GFP_KERNEL
);
2318 q_vector
->adapter
= adapter
;
2319 q_vector
->v_idx
= q_idx
;
2320 netif_napi_add(adapter
->netdev
, &q_vector
->napi
,
2322 #ifdef CONFIG_NET_RX_BUSY_POLL
2323 napi_hash_add(&q_vector
->napi
);
2325 adapter
->q_vector
[q_idx
] = q_vector
;
2333 q_vector
= adapter
->q_vector
[q_idx
];
2334 #ifdef CONFIG_NET_RX_BUSY_POLL
2335 napi_hash_del(&q_vector
->napi
);
2337 netif_napi_del(&q_vector
->napi
);
2339 adapter
->q_vector
[q_idx
] = NULL
;
2345 * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors
2346 * @adapter: board private structure to initialize
2348 * This function frees the memory allocated to the q_vectors. In addition if
2349 * NAPI is enabled it will delete any references to the NAPI struct prior
2350 * to freeing the q_vector.
2352 static void ixgbevf_free_q_vectors(struct ixgbevf_adapter
*adapter
)
2354 int q_idx
, num_q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
2356 for (q_idx
= 0; q_idx
< num_q_vectors
; q_idx
++) {
2357 struct ixgbevf_q_vector
*q_vector
= adapter
->q_vector
[q_idx
];
2359 adapter
->q_vector
[q_idx
] = NULL
;
2360 #ifdef CONFIG_NET_RX_BUSY_POLL
2361 napi_hash_del(&q_vector
->napi
);
2363 netif_napi_del(&q_vector
->napi
);
2369 * ixgbevf_reset_interrupt_capability - Reset MSIX setup
2370 * @adapter: board private structure
2373 static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter
*adapter
)
2375 pci_disable_msix(adapter
->pdev
);
2376 kfree(adapter
->msix_entries
);
2377 adapter
->msix_entries
= NULL
;
2381 * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init
2382 * @adapter: board private structure to initialize
2385 static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter
*adapter
)
2389 /* Number of supported queues */
2390 ixgbevf_set_num_queues(adapter
);
2392 err
= ixgbevf_set_interrupt_capability(adapter
);
2394 hw_dbg(&adapter
->hw
,
2395 "Unable to setup interrupt capabilities\n");
2396 goto err_set_interrupt
;
2399 err
= ixgbevf_alloc_q_vectors(adapter
);
2401 hw_dbg(&adapter
->hw
, "Unable to allocate memory for queue "
2403 goto err_alloc_q_vectors
;
2406 err
= ixgbevf_alloc_queues(adapter
);
2408 pr_err("Unable to allocate memory for queues\n");
2409 goto err_alloc_queues
;
2412 hw_dbg(&adapter
->hw
, "Multiqueue %s: Rx Queue count = %u, "
2413 "Tx Queue count = %u\n",
2414 (adapter
->num_rx_queues
> 1) ? "Enabled" :
2415 "Disabled", adapter
->num_rx_queues
, adapter
->num_tx_queues
);
2417 set_bit(__IXGBEVF_DOWN
, &adapter
->state
);
2421 ixgbevf_free_q_vectors(adapter
);
2422 err_alloc_q_vectors
:
2423 ixgbevf_reset_interrupt_capability(adapter
);
2429 * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings
2430 * @adapter: board private structure to clear interrupt scheme on
2432 * We go through and clear interrupt specific resources and reset the structure
2433 * to pre-load conditions
2435 static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter
*adapter
)
2439 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
2440 kfree(adapter
->tx_ring
[i
]);
2441 adapter
->tx_ring
[i
] = NULL
;
2443 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
2444 kfree(adapter
->rx_ring
[i
]);
2445 adapter
->rx_ring
[i
] = NULL
;
2448 adapter
->num_tx_queues
= 0;
2449 adapter
->num_rx_queues
= 0;
2451 ixgbevf_free_q_vectors(adapter
);
2452 ixgbevf_reset_interrupt_capability(adapter
);
2456 * ixgbevf_sw_init - Initialize general software structures
2457 * (struct ixgbevf_adapter)
2458 * @adapter: board private structure to initialize
2460 * ixgbevf_sw_init initializes the Adapter private data structure.
2461 * Fields are initialized based on PCI device information and
2462 * OS network device settings (MTU size).
2464 static int ixgbevf_sw_init(struct ixgbevf_adapter
*adapter
)
2466 struct ixgbe_hw
*hw
= &adapter
->hw
;
2467 struct pci_dev
*pdev
= adapter
->pdev
;
2468 struct net_device
*netdev
= adapter
->netdev
;
2471 /* PCI config space info */
2473 hw
->vendor_id
= pdev
->vendor
;
2474 hw
->device_id
= pdev
->device
;
2475 hw
->revision_id
= pdev
->revision
;
2476 hw
->subsystem_vendor_id
= pdev
->subsystem_vendor
;
2477 hw
->subsystem_device_id
= pdev
->subsystem_device
;
2479 hw
->mbx
.ops
.init_params(hw
);
2481 /* assume legacy case in which PF would only give VF 2 queues */
2482 hw
->mac
.max_tx_queues
= 2;
2483 hw
->mac
.max_rx_queues
= 2;
2485 /* lock to protect mailbox accesses */
2486 spin_lock_init(&adapter
->mbx_lock
);
2488 err
= hw
->mac
.ops
.reset_hw(hw
);
2490 dev_info(&pdev
->dev
,
2491 "PF still in reset state. Is the PF interface up?\n");
2493 err
= hw
->mac
.ops
.init_hw(hw
);
2495 pr_err("init_shared_code failed: %d\n", err
);
2498 ixgbevf_negotiate_api(adapter
);
2499 err
= hw
->mac
.ops
.get_mac_addr(hw
, hw
->mac
.addr
);
2501 dev_info(&pdev
->dev
, "Error reading MAC address\n");
2502 else if (is_zero_ether_addr(adapter
->hw
.mac
.addr
))
2503 dev_info(&pdev
->dev
,
2504 "MAC address not assigned by administrator.\n");
2505 memcpy(netdev
->dev_addr
, hw
->mac
.addr
, netdev
->addr_len
);
2508 if (!is_valid_ether_addr(netdev
->dev_addr
)) {
2509 dev_info(&pdev
->dev
, "Assigning random MAC address\n");
2510 eth_hw_addr_random(netdev
);
2511 memcpy(hw
->mac
.addr
, netdev
->dev_addr
, netdev
->addr_len
);
2514 /* Enable dynamic interrupt throttling rates */
2515 adapter
->rx_itr_setting
= 1;
2516 adapter
->tx_itr_setting
= 1;
2518 /* set default ring sizes */
2519 adapter
->tx_ring_count
= IXGBEVF_DEFAULT_TXD
;
2520 adapter
->rx_ring_count
= IXGBEVF_DEFAULT_RXD
;
2522 set_bit(__IXGBEVF_DOWN
, &adapter
->state
);
2529 #define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
2531 u32 current_counter = IXGBE_READ_REG(hw, reg); \
2532 if (current_counter < last_counter) \
2533 counter += 0x100000000LL; \
2534 last_counter = current_counter; \
2535 counter &= 0xFFFFFFFF00000000LL; \
2536 counter |= current_counter; \
2539 #define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
2541 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
2542 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
2543 u64 current_counter = (current_counter_msb << 32) | \
2544 current_counter_lsb; \
2545 if (current_counter < last_counter) \
2546 counter += 0x1000000000LL; \
2547 last_counter = current_counter; \
2548 counter &= 0xFFFFFFF000000000LL; \
2549 counter |= current_counter; \
2552 * ixgbevf_update_stats - Update the board statistics counters.
2553 * @adapter: board private structure
2555 void ixgbevf_update_stats(struct ixgbevf_adapter
*adapter
)
2557 struct ixgbe_hw
*hw
= &adapter
->hw
;
2560 if (!adapter
->link_up
)
2563 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC
, adapter
->stats
.last_vfgprc
,
2564 adapter
->stats
.vfgprc
);
2565 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC
, adapter
->stats
.last_vfgptc
,
2566 adapter
->stats
.vfgptc
);
2567 UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB
, IXGBE_VFGORC_MSB
,
2568 adapter
->stats
.last_vfgorc
,
2569 adapter
->stats
.vfgorc
);
2570 UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB
, IXGBE_VFGOTC_MSB
,
2571 adapter
->stats
.last_vfgotc
,
2572 adapter
->stats
.vfgotc
);
2573 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC
, adapter
->stats
.last_vfmprc
,
2574 adapter
->stats
.vfmprc
);
2576 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
2577 adapter
->hw_csum_rx_error
+=
2578 adapter
->rx_ring
[i
]->hw_csum_rx_error
;
2579 adapter
->rx_ring
[i
]->hw_csum_rx_error
= 0;
2584 * ixgbevf_watchdog - Timer Call-back
2585 * @data: pointer to adapter cast into an unsigned long
2587 static void ixgbevf_watchdog(unsigned long data
)
2589 struct ixgbevf_adapter
*adapter
= (struct ixgbevf_adapter
*)data
;
2590 struct ixgbe_hw
*hw
= &adapter
->hw
;
2595 * Do the watchdog outside of interrupt context due to the lovely
2596 * delays that some of the newer hardware requires
2599 if (test_bit(__IXGBEVF_DOWN
, &adapter
->state
))
2600 goto watchdog_short_circuit
;
2602 /* get one bit for every active tx/rx interrupt vector */
2603 for (i
= 0; i
< adapter
->num_msix_vectors
- NON_Q_VECTORS
; i
++) {
2604 struct ixgbevf_q_vector
*qv
= adapter
->q_vector
[i
];
2605 if (qv
->rx
.ring
|| qv
->tx
.ring
)
2609 IXGBE_WRITE_REG(hw
, IXGBE_VTEICS
, eics
);
2611 watchdog_short_circuit
:
2612 schedule_work(&adapter
->watchdog_task
);
2616 * ixgbevf_tx_timeout - Respond to a Tx Hang
2617 * @netdev: network interface device structure
2619 static void ixgbevf_tx_timeout(struct net_device
*netdev
)
2621 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
2623 /* Do the reset outside of interrupt context */
2624 schedule_work(&adapter
->reset_task
);
2627 static void ixgbevf_reset_task(struct work_struct
*work
)
2629 struct ixgbevf_adapter
*adapter
;
2630 adapter
= container_of(work
, struct ixgbevf_adapter
, reset_task
);
2632 /* If we're already down or resetting, just bail */
2633 if (test_bit(__IXGBEVF_DOWN
, &adapter
->state
) ||
2634 test_bit(__IXGBEVF_REMOVING
, &adapter
->state
) ||
2635 test_bit(__IXGBEVF_RESETTING
, &adapter
->state
))
2638 adapter
->tx_timeout_count
++;
2640 ixgbevf_reinit_locked(adapter
);
2644 * ixgbevf_watchdog_task - worker thread to bring link up
2645 * @work: pointer to work_struct containing our data
2647 static void ixgbevf_watchdog_task(struct work_struct
*work
)
2649 struct ixgbevf_adapter
*adapter
= container_of(work
,
2650 struct ixgbevf_adapter
,
2652 struct net_device
*netdev
= adapter
->netdev
;
2653 struct ixgbe_hw
*hw
= &adapter
->hw
;
2654 u32 link_speed
= adapter
->link_speed
;
2655 bool link_up
= adapter
->link_up
;
2658 if (IXGBE_REMOVED(hw
->hw_addr
)) {
2659 if (!test_bit(__IXGBEVF_DOWN
, &adapter
->state
)) {
2661 ixgbevf_down(adapter
);
2666 ixgbevf_queue_reset_subtask(adapter
);
2668 adapter
->flags
|= IXGBE_FLAG_IN_WATCHDOG_TASK
;
2671 * Always check the link on the watchdog because we have
2674 spin_lock_bh(&adapter
->mbx_lock
);
2676 need_reset
= hw
->mac
.ops
.check_link(hw
, &link_speed
, &link_up
, false);
2678 spin_unlock_bh(&adapter
->mbx_lock
);
2681 adapter
->link_up
= link_up
;
2682 adapter
->link_speed
= link_speed
;
2683 netif_carrier_off(netdev
);
2684 netif_tx_stop_all_queues(netdev
);
2685 schedule_work(&adapter
->reset_task
);
2688 adapter
->link_up
= link_up
;
2689 adapter
->link_speed
= link_speed
;
2692 if (!netif_carrier_ok(netdev
)) {
2693 char *link_speed_string
;
2694 switch (link_speed
) {
2695 case IXGBE_LINK_SPEED_10GB_FULL
:
2696 link_speed_string
= "10 Gbps";
2698 case IXGBE_LINK_SPEED_1GB_FULL
:
2699 link_speed_string
= "1 Gbps";
2701 case IXGBE_LINK_SPEED_100_FULL
:
2702 link_speed_string
= "100 Mbps";
2705 link_speed_string
= "unknown speed";
2708 dev_info(&adapter
->pdev
->dev
,
2709 "NIC Link is Up, %s\n", link_speed_string
);
2710 netif_carrier_on(netdev
);
2711 netif_tx_wake_all_queues(netdev
);
2714 adapter
->link_up
= false;
2715 adapter
->link_speed
= 0;
2716 if (netif_carrier_ok(netdev
)) {
2717 dev_info(&adapter
->pdev
->dev
, "NIC Link is Down\n");
2718 netif_carrier_off(netdev
);
2719 netif_tx_stop_all_queues(netdev
);
2723 ixgbevf_update_stats(adapter
);
2726 /* Reset the timer */
2727 if (!test_bit(__IXGBEVF_DOWN
, &adapter
->state
) &&
2728 !test_bit(__IXGBEVF_REMOVING
, &adapter
->state
))
2729 mod_timer(&adapter
->watchdog_timer
,
2730 round_jiffies(jiffies
+ (2 * HZ
)));
2732 adapter
->flags
&= ~IXGBE_FLAG_IN_WATCHDOG_TASK
;
2736 * ixgbevf_free_tx_resources - Free Tx Resources per Queue
2737 * @tx_ring: Tx descriptor ring for a specific queue
2739 * Free all transmit software resources
2741 void ixgbevf_free_tx_resources(struct ixgbevf_ring
*tx_ring
)
2743 ixgbevf_clean_tx_ring(tx_ring
);
2745 vfree(tx_ring
->tx_buffer_info
);
2746 tx_ring
->tx_buffer_info
= NULL
;
2748 /* if not set, then don't free */
2752 dma_free_coherent(tx_ring
->dev
, tx_ring
->size
, tx_ring
->desc
,
2755 tx_ring
->desc
= NULL
;
2759 * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues
2760 * @adapter: board private structure
2762 * Free all transmit software resources
2764 static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter
*adapter
)
2768 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
2769 if (adapter
->tx_ring
[i
]->desc
)
2770 ixgbevf_free_tx_resources(adapter
->tx_ring
[i
]);
2774 * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
2775 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2777 * Return 0 on success, negative on failure
2779 int ixgbevf_setup_tx_resources(struct ixgbevf_ring
*tx_ring
)
2783 size
= sizeof(struct ixgbevf_tx_buffer
) * tx_ring
->count
;
2784 tx_ring
->tx_buffer_info
= vzalloc(size
);
2785 if (!tx_ring
->tx_buffer_info
)
2788 /* round up to nearest 4K */
2789 tx_ring
->size
= tx_ring
->count
* sizeof(union ixgbe_adv_tx_desc
);
2790 tx_ring
->size
= ALIGN(tx_ring
->size
, 4096);
2792 tx_ring
->desc
= dma_alloc_coherent(tx_ring
->dev
, tx_ring
->size
,
2793 &tx_ring
->dma
, GFP_KERNEL
);
2800 vfree(tx_ring
->tx_buffer_info
);
2801 tx_ring
->tx_buffer_info
= NULL
;
2802 hw_dbg(&adapter
->hw
, "Unable to allocate memory for the transmit "
2803 "descriptor ring\n");
2808 * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources
2809 * @adapter: board private structure
2811 * If this function returns with an error, then it's possible one or
2812 * more of the rings is populated (while the rest are not). It is the
2813 * callers duty to clean those orphaned rings.
2815 * Return 0 on success, negative on failure
2817 static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter
*adapter
)
2821 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
2822 err
= ixgbevf_setup_tx_resources(adapter
->tx_ring
[i
]);
2825 hw_dbg(&adapter
->hw
,
2826 "Allocation for Tx Queue %u failed\n", i
);
2834 * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
2835 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2837 * Returns 0 on success, negative on failure
2839 int ixgbevf_setup_rx_resources(struct ixgbevf_ring
*rx_ring
)
2843 size
= sizeof(struct ixgbevf_rx_buffer
) * rx_ring
->count
;
2844 rx_ring
->rx_buffer_info
= vzalloc(size
);
2845 if (!rx_ring
->rx_buffer_info
)
2848 /* Round up to nearest 4K */
2849 rx_ring
->size
= rx_ring
->count
* sizeof(union ixgbe_adv_rx_desc
);
2850 rx_ring
->size
= ALIGN(rx_ring
->size
, 4096);
2852 rx_ring
->desc
= dma_alloc_coherent(rx_ring
->dev
, rx_ring
->size
,
2853 &rx_ring
->dma
, GFP_KERNEL
);
2860 vfree(rx_ring
->rx_buffer_info
);
2861 rx_ring
->rx_buffer_info
= NULL
;
2862 dev_err(rx_ring
->dev
, "Unable to allocate memory for the Rx descriptor ring\n");
2867 * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources
2868 * @adapter: board private structure
2870 * If this function returns with an error, then it's possible one or
2871 * more of the rings is populated (while the rest are not). It is the
2872 * callers duty to clean those orphaned rings.
2874 * Return 0 on success, negative on failure
2876 static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter
*adapter
)
2880 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
2881 err
= ixgbevf_setup_rx_resources(adapter
->rx_ring
[i
]);
2884 hw_dbg(&adapter
->hw
,
2885 "Allocation for Rx Queue %u failed\n", i
);
2892 * ixgbevf_free_rx_resources - Free Rx Resources
2893 * @rx_ring: ring to clean the resources from
2895 * Free all receive software resources
2897 void ixgbevf_free_rx_resources(struct ixgbevf_ring
*rx_ring
)
2899 ixgbevf_clean_rx_ring(rx_ring
);
2901 vfree(rx_ring
->rx_buffer_info
);
2902 rx_ring
->rx_buffer_info
= NULL
;
2904 dma_free_coherent(rx_ring
->dev
, rx_ring
->size
, rx_ring
->desc
,
2907 rx_ring
->desc
= NULL
;
2911 * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues
2912 * @adapter: board private structure
2914 * Free all receive software resources
2916 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter
*adapter
)
2920 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
2921 if (adapter
->rx_ring
[i
]->desc
)
2922 ixgbevf_free_rx_resources(adapter
->rx_ring
[i
]);
2926 * ixgbevf_open - Called when a network interface is made active
2927 * @netdev: network interface device structure
2929 * Returns 0 on success, negative value on failure
2931 * The open entry point is called when a network interface is made
2932 * active by the system (IFF_UP). At this point all resources needed
2933 * for transmit and receive operations are allocated, the interrupt
2934 * handler is registered with the OS, the watchdog timer is started,
2935 * and the stack is notified that the interface is ready.
2937 static int ixgbevf_open(struct net_device
*netdev
)
2939 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
2940 struct ixgbe_hw
*hw
= &adapter
->hw
;
2943 /* A previous failure to open the device because of a lack of
2944 * available MSIX vector resources may have reset the number
2945 * of msix vectors variable to zero. The only way to recover
2946 * is to unload/reload the driver and hope that the system has
2947 * been able to recover some MSIX vector resources.
2949 if (!adapter
->num_msix_vectors
)
2952 /* disallow open during test */
2953 if (test_bit(__IXGBEVF_TESTING
, &adapter
->state
))
2956 if (hw
->adapter_stopped
) {
2957 ixgbevf_reset(adapter
);
2958 /* if adapter is still stopped then PF isn't up and
2959 * the vf can't start. */
2960 if (hw
->adapter_stopped
) {
2961 err
= IXGBE_ERR_MBX
;
2962 pr_err("Unable to start - perhaps the PF Driver isn't "
2964 goto err_setup_reset
;
2968 /* allocate transmit descriptors */
2969 err
= ixgbevf_setup_all_tx_resources(adapter
);
2973 /* allocate receive descriptors */
2974 err
= ixgbevf_setup_all_rx_resources(adapter
);
2978 ixgbevf_configure(adapter
);
2981 * Map the Tx/Rx rings to the vectors we were allotted.
2982 * if request_irq will be called in this function map_rings
2983 * must be called *before* up_complete
2985 ixgbevf_map_rings_to_vectors(adapter
);
2987 ixgbevf_up_complete(adapter
);
2989 /* clear any pending interrupts, may auto mask */
2990 IXGBE_READ_REG(hw
, IXGBE_VTEICR
);
2991 err
= ixgbevf_request_irq(adapter
);
2995 ixgbevf_irq_enable(adapter
);
3000 ixgbevf_down(adapter
);
3002 ixgbevf_free_all_rx_resources(adapter
);
3004 ixgbevf_free_all_tx_resources(adapter
);
3005 ixgbevf_reset(adapter
);
3013 * ixgbevf_close - Disables a network interface
3014 * @netdev: network interface device structure
3016 * Returns 0, this is not allowed to fail
3018 * The close entry point is called when an interface is de-activated
3019 * by the OS. The hardware is still under the drivers control, but
3020 * needs to be disabled. A global MAC reset is issued to stop the
3021 * hardware, and all transmit and receive resources are freed.
3023 static int ixgbevf_close(struct net_device
*netdev
)
3025 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3027 ixgbevf_down(adapter
);
3028 ixgbevf_free_irq(adapter
);
3030 ixgbevf_free_all_tx_resources(adapter
);
3031 ixgbevf_free_all_rx_resources(adapter
);
3036 static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter
*adapter
)
3038 struct net_device
*dev
= adapter
->netdev
;
3040 if (!(adapter
->flags
& IXGBEVF_FLAG_QUEUE_RESET_REQUESTED
))
3043 adapter
->flags
&= ~IXGBEVF_FLAG_QUEUE_RESET_REQUESTED
;
3045 /* if interface is down do nothing */
3046 if (test_bit(__IXGBEVF_DOWN
, &adapter
->state
) ||
3047 test_bit(__IXGBEVF_RESETTING
, &adapter
->state
))
3050 /* Hardware has to reinitialize queues and interrupts to
3051 * match packet buffer alignment. Unfortunately, the
3052 * hardware is not flexible enough to do this dynamically.
3054 if (netif_running(dev
))
3057 ixgbevf_clear_interrupt_scheme(adapter
);
3058 ixgbevf_init_interrupt_scheme(adapter
);
3060 if (netif_running(dev
))
3064 static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring
*tx_ring
,
3065 u32 vlan_macip_lens
, u32 type_tucmd
,
3068 struct ixgbe_adv_tx_context_desc
*context_desc
;
3069 u16 i
= tx_ring
->next_to_use
;
3071 context_desc
= IXGBEVF_TX_CTXTDESC(tx_ring
, i
);
3074 tx_ring
->next_to_use
= (i
< tx_ring
->count
) ? i
: 0;
3076 /* set bits to identify this as an advanced context descriptor */
3077 type_tucmd
|= IXGBE_TXD_CMD_DEXT
| IXGBE_ADVTXD_DTYP_CTXT
;
3079 context_desc
->vlan_macip_lens
= cpu_to_le32(vlan_macip_lens
);
3080 context_desc
->seqnum_seed
= 0;
3081 context_desc
->type_tucmd_mlhl
= cpu_to_le32(type_tucmd
);
3082 context_desc
->mss_l4len_idx
= cpu_to_le32(mss_l4len_idx
);
3085 static int ixgbevf_tso(struct ixgbevf_ring
*tx_ring
,
3086 struct ixgbevf_tx_buffer
*first
,
3089 struct sk_buff
*skb
= first
->skb
;
3090 u32 vlan_macip_lens
, type_tucmd
;
3091 u32 mss_l4len_idx
, l4len
;
3094 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
3097 if (!skb_is_gso(skb
))
3100 err
= skb_cow_head(skb
, 0);
3104 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
3105 type_tucmd
= IXGBE_ADVTXD_TUCMD_L4T_TCP
;
3107 if (skb
->protocol
== htons(ETH_P_IP
)) {
3108 struct iphdr
*iph
= ip_hdr(skb
);
3111 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(iph
->saddr
,
3115 type_tucmd
|= IXGBE_ADVTXD_TUCMD_IPV4
;
3116 first
->tx_flags
|= IXGBE_TX_FLAGS_TSO
|
3117 IXGBE_TX_FLAGS_CSUM
|
3118 IXGBE_TX_FLAGS_IPV4
;
3119 } else if (skb_is_gso_v6(skb
)) {
3120 ipv6_hdr(skb
)->payload_len
= 0;
3121 tcp_hdr(skb
)->check
=
3122 ~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
3123 &ipv6_hdr(skb
)->daddr
,
3125 first
->tx_flags
|= IXGBE_TX_FLAGS_TSO
|
3126 IXGBE_TX_FLAGS_CSUM
;
3129 /* compute header lengths */
3130 l4len
= tcp_hdrlen(skb
);
3132 *hdr_len
= skb_transport_offset(skb
) + l4len
;
3134 /* update gso size and bytecount with header size */
3135 first
->gso_segs
= skb_shinfo(skb
)->gso_segs
;
3136 first
->bytecount
+= (first
->gso_segs
- 1) * *hdr_len
;
3138 /* mss_l4len_id: use 1 as index for TSO */
3139 mss_l4len_idx
= l4len
<< IXGBE_ADVTXD_L4LEN_SHIFT
;
3140 mss_l4len_idx
|= skb_shinfo(skb
)->gso_size
<< IXGBE_ADVTXD_MSS_SHIFT
;
3141 mss_l4len_idx
|= 1 << IXGBE_ADVTXD_IDX_SHIFT
;
3143 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
3144 vlan_macip_lens
= skb_network_header_len(skb
);
3145 vlan_macip_lens
|= skb_network_offset(skb
) << IXGBE_ADVTXD_MACLEN_SHIFT
;
3146 vlan_macip_lens
|= first
->tx_flags
& IXGBE_TX_FLAGS_VLAN_MASK
;
3148 ixgbevf_tx_ctxtdesc(tx_ring
, vlan_macip_lens
,
3149 type_tucmd
, mss_l4len_idx
);
3154 static void ixgbevf_tx_csum(struct ixgbevf_ring
*tx_ring
,
3155 struct ixgbevf_tx_buffer
*first
)
3157 struct sk_buff
*skb
= first
->skb
;
3158 u32 vlan_macip_lens
= 0;
3159 u32 mss_l4len_idx
= 0;
3162 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
3164 switch (skb
->protocol
) {
3165 case htons(ETH_P_IP
):
3166 vlan_macip_lens
|= skb_network_header_len(skb
);
3167 type_tucmd
|= IXGBE_ADVTXD_TUCMD_IPV4
;
3168 l4_hdr
= ip_hdr(skb
)->protocol
;
3170 case htons(ETH_P_IPV6
):
3171 vlan_macip_lens
|= skb_network_header_len(skb
);
3172 l4_hdr
= ipv6_hdr(skb
)->nexthdr
;
3175 if (unlikely(net_ratelimit())) {
3176 dev_warn(tx_ring
->dev
,
3177 "partial checksum but proto=%x!\n",
3185 type_tucmd
|= IXGBE_ADVTXD_TUCMD_L4T_TCP
;
3186 mss_l4len_idx
= tcp_hdrlen(skb
) <<
3187 IXGBE_ADVTXD_L4LEN_SHIFT
;
3190 type_tucmd
|= IXGBE_ADVTXD_TUCMD_L4T_SCTP
;
3191 mss_l4len_idx
= sizeof(struct sctphdr
) <<
3192 IXGBE_ADVTXD_L4LEN_SHIFT
;
3195 mss_l4len_idx
= sizeof(struct udphdr
) <<
3196 IXGBE_ADVTXD_L4LEN_SHIFT
;
3199 if (unlikely(net_ratelimit())) {
3200 dev_warn(tx_ring
->dev
,
3201 "partial checksum but l4 proto=%x!\n",
3207 /* update TX checksum flag */
3208 first
->tx_flags
|= IXGBE_TX_FLAGS_CSUM
;
3211 /* vlan_macip_lens: MACLEN, VLAN tag */
3212 vlan_macip_lens
|= skb_network_offset(skb
) << IXGBE_ADVTXD_MACLEN_SHIFT
;
3213 vlan_macip_lens
|= first
->tx_flags
& IXGBE_TX_FLAGS_VLAN_MASK
;
3215 ixgbevf_tx_ctxtdesc(tx_ring
, vlan_macip_lens
,
3216 type_tucmd
, mss_l4len_idx
);
3219 static __le32
ixgbevf_tx_cmd_type(u32 tx_flags
)
3221 /* set type for advanced descriptor with frame checksum insertion */
3222 __le32 cmd_type
= cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA
|
3223 IXGBE_ADVTXD_DCMD_IFCS
|
3224 IXGBE_ADVTXD_DCMD_DEXT
);
3226 /* set HW vlan bit if vlan is present */
3227 if (tx_flags
& IXGBE_TX_FLAGS_VLAN
)
3228 cmd_type
|= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE
);
3230 /* set segmentation enable bits for TSO/FSO */
3231 if (tx_flags
& IXGBE_TX_FLAGS_TSO
)
3232 cmd_type
|= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE
);
3237 static void ixgbevf_tx_olinfo_status(union ixgbe_adv_tx_desc
*tx_desc
,
3238 u32 tx_flags
, unsigned int paylen
)
3240 __le32 olinfo_status
= cpu_to_le32(paylen
<< IXGBE_ADVTXD_PAYLEN_SHIFT
);
3242 /* enable L4 checksum for TSO and TX checksum offload */
3243 if (tx_flags
& IXGBE_TX_FLAGS_CSUM
)
3244 olinfo_status
|= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM
);
3246 /* enble IPv4 checksum for TSO */
3247 if (tx_flags
& IXGBE_TX_FLAGS_IPV4
)
3248 olinfo_status
|= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM
);
3250 /* use index 1 context for TSO/FSO/FCOE */
3251 if (tx_flags
& IXGBE_TX_FLAGS_TSO
)
3252 olinfo_status
|= cpu_to_le32(1 << IXGBE_ADVTXD_IDX_SHIFT
);
3254 /* Check Context must be set if Tx switch is enabled, which it
3255 * always is for case where virtual functions are running
3257 olinfo_status
|= cpu_to_le32(IXGBE_ADVTXD_CC
);
3259 tx_desc
->read
.olinfo_status
= olinfo_status
;
3262 static void ixgbevf_tx_map(struct ixgbevf_ring
*tx_ring
,
3263 struct ixgbevf_tx_buffer
*first
,
3267 struct sk_buff
*skb
= first
->skb
;
3268 struct ixgbevf_tx_buffer
*tx_buffer
;
3269 union ixgbe_adv_tx_desc
*tx_desc
;
3270 struct skb_frag_struct
*frag
= &skb_shinfo(skb
)->frags
[0];
3271 unsigned int data_len
= skb
->data_len
;
3272 unsigned int size
= skb_headlen(skb
);
3273 unsigned int paylen
= skb
->len
- hdr_len
;
3274 u32 tx_flags
= first
->tx_flags
;
3276 u16 i
= tx_ring
->next_to_use
;
3278 tx_desc
= IXGBEVF_TX_DESC(tx_ring
, i
);
3280 ixgbevf_tx_olinfo_status(tx_desc
, tx_flags
, paylen
);
3281 cmd_type
= ixgbevf_tx_cmd_type(tx_flags
);
3283 dma
= dma_map_single(tx_ring
->dev
, skb
->data
, size
, DMA_TO_DEVICE
);
3284 if (dma_mapping_error(tx_ring
->dev
, dma
))
3287 /* record length, and DMA address */
3288 dma_unmap_len_set(first
, len
, size
);
3289 dma_unmap_addr_set(first
, dma
, dma
);
3291 tx_desc
->read
.buffer_addr
= cpu_to_le64(dma
);
3294 while (unlikely(size
> IXGBE_MAX_DATA_PER_TXD
)) {
3295 tx_desc
->read
.cmd_type_len
=
3296 cmd_type
| cpu_to_le32(IXGBE_MAX_DATA_PER_TXD
);
3300 if (i
== tx_ring
->count
) {
3301 tx_desc
= IXGBEVF_TX_DESC(tx_ring
, 0);
3305 dma
+= IXGBE_MAX_DATA_PER_TXD
;
3306 size
-= IXGBE_MAX_DATA_PER_TXD
;
3308 tx_desc
->read
.buffer_addr
= cpu_to_le64(dma
);
3309 tx_desc
->read
.olinfo_status
= 0;
3312 if (likely(!data_len
))
3315 tx_desc
->read
.cmd_type_len
= cmd_type
| cpu_to_le32(size
);
3319 if (i
== tx_ring
->count
) {
3320 tx_desc
= IXGBEVF_TX_DESC(tx_ring
, 0);
3324 size
= skb_frag_size(frag
);
3327 dma
= skb_frag_dma_map(tx_ring
->dev
, frag
, 0, size
,
3329 if (dma_mapping_error(tx_ring
->dev
, dma
))
3332 tx_buffer
= &tx_ring
->tx_buffer_info
[i
];
3333 dma_unmap_len_set(tx_buffer
, len
, size
);
3334 dma_unmap_addr_set(tx_buffer
, dma
, dma
);
3336 tx_desc
->read
.buffer_addr
= cpu_to_le64(dma
);
3337 tx_desc
->read
.olinfo_status
= 0;
3342 /* write last descriptor with RS and EOP bits */
3343 cmd_type
|= cpu_to_le32(size
) | cpu_to_le32(IXGBE_TXD_CMD
);
3344 tx_desc
->read
.cmd_type_len
= cmd_type
;
3346 /* set the timestamp */
3347 first
->time_stamp
= jiffies
;
3349 /* Force memory writes to complete before letting h/w know there
3350 * are new descriptors to fetch. (Only applicable for weak-ordered
3351 * memory model archs, such as IA-64).
3353 * We also need this memory barrier (wmb) to make certain all of the
3354 * status bits have been updated before next_to_watch is written.
3358 /* set next_to_watch value indicating a packet is present */
3359 first
->next_to_watch
= tx_desc
;
3362 if (i
== tx_ring
->count
)
3365 tx_ring
->next_to_use
= i
;
3367 /* notify HW of packet */
3368 ixgbevf_write_tail(tx_ring
, i
);
3372 dev_err(tx_ring
->dev
, "TX DMA map failed\n");
3374 /* clear dma mappings for failed tx_buffer_info map */
3376 tx_buffer
= &tx_ring
->tx_buffer_info
[i
];
3377 ixgbevf_unmap_and_free_tx_resource(tx_ring
, tx_buffer
);
3378 if (tx_buffer
== first
)
3385 tx_ring
->next_to_use
= i
;
3388 static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring
*tx_ring
, int size
)
3390 netif_stop_subqueue(tx_ring
->netdev
, tx_ring
->queue_index
);
3391 /* Herbert's original patch had:
3392 * smp_mb__after_netif_stop_queue();
3393 * but since that doesn't exist yet, just open code it. */
3396 /* We need to check again in a case another CPU has just
3397 * made room available. */
3398 if (likely(ixgbevf_desc_unused(tx_ring
) < size
))
3401 /* A reprieve! - use start_queue because it doesn't call schedule */
3402 netif_start_subqueue(tx_ring
->netdev
, tx_ring
->queue_index
);
3403 ++tx_ring
->tx_stats
.restart_queue
;
3408 static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring
*tx_ring
, int size
)
3410 if (likely(ixgbevf_desc_unused(tx_ring
) >= size
))
3412 return __ixgbevf_maybe_stop_tx(tx_ring
, size
);
3415 static int ixgbevf_xmit_frame(struct sk_buff
*skb
, struct net_device
*netdev
)
3417 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3418 struct ixgbevf_tx_buffer
*first
;
3419 struct ixgbevf_ring
*tx_ring
;
3422 u16 count
= TXD_USE_COUNT(skb_headlen(skb
));
3423 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
3427 u8
*dst_mac
= skb_header_pointer(skb
, 0, 0, NULL
);
3429 if (!dst_mac
|| is_link_local_ether_addr(dst_mac
)) {
3431 return NETDEV_TX_OK
;
3434 tx_ring
= adapter
->tx_ring
[skb
->queue_mapping
];
3437 * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
3438 * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD,
3439 * + 2 desc gap to keep tail from touching head,
3440 * + 1 desc for context descriptor,
3441 * otherwise try next time
3443 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
3444 for (f
= 0; f
< skb_shinfo(skb
)->nr_frags
; f
++)
3445 count
+= TXD_USE_COUNT(skb_shinfo(skb
)->frags
[f
].size
);
3447 count
+= skb_shinfo(skb
)->nr_frags
;
3449 if (ixgbevf_maybe_stop_tx(tx_ring
, count
+ 3)) {
3450 tx_ring
->tx_stats
.tx_busy
++;
3451 return NETDEV_TX_BUSY
;
3454 /* record the location of the first descriptor for this packet */
3455 first
= &tx_ring
->tx_buffer_info
[tx_ring
->next_to_use
];
3457 first
->bytecount
= skb
->len
;
3458 first
->gso_segs
= 1;
3460 if (vlan_tx_tag_present(skb
)) {
3461 tx_flags
|= vlan_tx_tag_get(skb
);
3462 tx_flags
<<= IXGBE_TX_FLAGS_VLAN_SHIFT
;
3463 tx_flags
|= IXGBE_TX_FLAGS_VLAN
;
3466 /* record initial flags and protocol */
3467 first
->tx_flags
= tx_flags
;
3468 first
->protocol
= vlan_get_protocol(skb
);
3470 tso
= ixgbevf_tso(tx_ring
, first
, &hdr_len
);
3474 ixgbevf_tx_csum(tx_ring
, first
);
3476 ixgbevf_tx_map(tx_ring
, first
, hdr_len
);
3478 ixgbevf_maybe_stop_tx(tx_ring
, DESC_NEEDED
);
3480 return NETDEV_TX_OK
;
3483 dev_kfree_skb_any(first
->skb
);
3486 return NETDEV_TX_OK
;
3490 * ixgbevf_set_mac - Change the Ethernet Address of the NIC
3491 * @netdev: network interface device structure
3492 * @p: pointer to an address structure
3494 * Returns 0 on success, negative on failure
3496 static int ixgbevf_set_mac(struct net_device
*netdev
, void *p
)
3498 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3499 struct ixgbe_hw
*hw
= &adapter
->hw
;
3500 struct sockaddr
*addr
= p
;
3502 if (!is_valid_ether_addr(addr
->sa_data
))
3503 return -EADDRNOTAVAIL
;
3505 memcpy(netdev
->dev_addr
, addr
->sa_data
, netdev
->addr_len
);
3506 memcpy(hw
->mac
.addr
, addr
->sa_data
, netdev
->addr_len
);
3508 spin_lock_bh(&adapter
->mbx_lock
);
3510 hw
->mac
.ops
.set_rar(hw
, 0, hw
->mac
.addr
, 0);
3512 spin_unlock_bh(&adapter
->mbx_lock
);
3518 * ixgbevf_change_mtu - Change the Maximum Transfer Unit
3519 * @netdev: network interface device structure
3520 * @new_mtu: new value for maximum frame size
3522 * Returns 0 on success, negative on failure
3524 static int ixgbevf_change_mtu(struct net_device
*netdev
, int new_mtu
)
3526 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3527 struct ixgbe_hw
*hw
= &adapter
->hw
;
3528 int max_frame
= new_mtu
+ ETH_HLEN
+ ETH_FCS_LEN
;
3529 int max_possible_frame
= MAXIMUM_ETHERNET_VLAN_SIZE
;
3531 switch (adapter
->hw
.api_version
) {
3532 case ixgbe_mbox_api_11
:
3533 max_possible_frame
= IXGBE_MAX_JUMBO_FRAME_SIZE
;
3536 if (adapter
->hw
.mac
.type
!= ixgbe_mac_82599_vf
)
3537 max_possible_frame
= IXGBE_MAX_JUMBO_FRAME_SIZE
;
3541 /* MTU < 68 is an error and causes problems on some kernels */
3542 if ((new_mtu
< 68) || (max_frame
> max_possible_frame
))
3545 hw_dbg(hw
, "changing MTU from %d to %d\n",
3546 netdev
->mtu
, new_mtu
);
3547 /* must set new MTU before calling down or up */
3548 netdev
->mtu
= new_mtu
;
3550 /* notify the PF of our intent to use this size of frame */
3551 ixgbevf_rlpml_set_vf(hw
, max_frame
);
3556 #ifdef CONFIG_NET_POLL_CONTROLLER
3557 /* Polling 'interrupt' - used by things like netconsole to send skbs
3558 * without having to re-enable interrupts. It's not called while
3559 * the interrupt routine is executing.
3561 static void ixgbevf_netpoll(struct net_device
*netdev
)
3563 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3566 /* if interface is down do nothing */
3567 if (test_bit(__IXGBEVF_DOWN
, &adapter
->state
))
3569 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
3570 ixgbevf_msix_clean_rings(0, adapter
->q_vector
[i
]);
3572 #endif /* CONFIG_NET_POLL_CONTROLLER */
3574 static int ixgbevf_suspend(struct pci_dev
*pdev
, pm_message_t state
)
3576 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3577 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3582 netif_device_detach(netdev
);
3584 if (netif_running(netdev
)) {
3586 ixgbevf_down(adapter
);
3587 ixgbevf_free_irq(adapter
);
3588 ixgbevf_free_all_tx_resources(adapter
);
3589 ixgbevf_free_all_rx_resources(adapter
);
3593 ixgbevf_clear_interrupt_scheme(adapter
);
3596 retval
= pci_save_state(pdev
);
3601 if (!test_and_set_bit(__IXGBEVF_DISABLED
, &adapter
->state
))
3602 pci_disable_device(pdev
);
3608 static int ixgbevf_resume(struct pci_dev
*pdev
)
3610 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3611 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3614 pci_restore_state(pdev
);
3616 * pci_restore_state clears dev->state_saved so call
3617 * pci_save_state to restore it.
3619 pci_save_state(pdev
);
3621 err
= pci_enable_device_mem(pdev
);
3623 dev_err(&pdev
->dev
, "Cannot enable PCI device from suspend\n");
3626 smp_mb__before_atomic();
3627 clear_bit(__IXGBEVF_DISABLED
, &adapter
->state
);
3628 pci_set_master(pdev
);
3630 ixgbevf_reset(adapter
);
3633 err
= ixgbevf_init_interrupt_scheme(adapter
);
3636 dev_err(&pdev
->dev
, "Cannot initialize interrupts\n");
3640 if (netif_running(netdev
)) {
3641 err
= ixgbevf_open(netdev
);
3646 netif_device_attach(netdev
);
3651 #endif /* CONFIG_PM */
3652 static void ixgbevf_shutdown(struct pci_dev
*pdev
)
3654 ixgbevf_suspend(pdev
, PMSG_SUSPEND
);
3657 static struct rtnl_link_stats64
*ixgbevf_get_stats(struct net_device
*netdev
,
3658 struct rtnl_link_stats64
*stats
)
3660 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3663 const struct ixgbevf_ring
*ring
;
3666 ixgbevf_update_stats(adapter
);
3668 stats
->multicast
= adapter
->stats
.vfmprc
- adapter
->stats
.base_vfmprc
;
3670 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
3671 ring
= adapter
->rx_ring
[i
];
3673 start
= u64_stats_fetch_begin_irq(&ring
->syncp
);
3674 bytes
= ring
->stats
.bytes
;
3675 packets
= ring
->stats
.packets
;
3676 } while (u64_stats_fetch_retry_irq(&ring
->syncp
, start
));
3677 stats
->rx_bytes
+= bytes
;
3678 stats
->rx_packets
+= packets
;
3681 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
3682 ring
= adapter
->tx_ring
[i
];
3684 start
= u64_stats_fetch_begin_irq(&ring
->syncp
);
3685 bytes
= ring
->stats
.bytes
;
3686 packets
= ring
->stats
.packets
;
3687 } while (u64_stats_fetch_retry_irq(&ring
->syncp
, start
));
3688 stats
->tx_bytes
+= bytes
;
3689 stats
->tx_packets
+= packets
;
3695 static const struct net_device_ops ixgbevf_netdev_ops
= {
3696 .ndo_open
= ixgbevf_open
,
3697 .ndo_stop
= ixgbevf_close
,
3698 .ndo_start_xmit
= ixgbevf_xmit_frame
,
3699 .ndo_set_rx_mode
= ixgbevf_set_rx_mode
,
3700 .ndo_get_stats64
= ixgbevf_get_stats
,
3701 .ndo_validate_addr
= eth_validate_addr
,
3702 .ndo_set_mac_address
= ixgbevf_set_mac
,
3703 .ndo_change_mtu
= ixgbevf_change_mtu
,
3704 .ndo_tx_timeout
= ixgbevf_tx_timeout
,
3705 .ndo_vlan_rx_add_vid
= ixgbevf_vlan_rx_add_vid
,
3706 .ndo_vlan_rx_kill_vid
= ixgbevf_vlan_rx_kill_vid
,
3707 #ifdef CONFIG_NET_RX_BUSY_POLL
3708 .ndo_busy_poll
= ixgbevf_busy_poll_recv
,
3710 #ifdef CONFIG_NET_POLL_CONTROLLER
3711 .ndo_poll_controller
= ixgbevf_netpoll
,
3715 static void ixgbevf_assign_netdev_ops(struct net_device
*dev
)
3717 dev
->netdev_ops
= &ixgbevf_netdev_ops
;
3718 ixgbevf_set_ethtool_ops(dev
);
3719 dev
->watchdog_timeo
= 5 * HZ
;
3723 * ixgbevf_probe - Device Initialization Routine
3724 * @pdev: PCI device information struct
3725 * @ent: entry in ixgbevf_pci_tbl
3727 * Returns 0 on success, negative on failure
3729 * ixgbevf_probe initializes an adapter identified by a pci_dev structure.
3730 * The OS initialization, configuring of the adapter private structure,
3731 * and a hardware reset occur.
3733 static int ixgbevf_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
3735 struct net_device
*netdev
;
3736 struct ixgbevf_adapter
*adapter
= NULL
;
3737 struct ixgbe_hw
*hw
= NULL
;
3738 const struct ixgbevf_info
*ii
= ixgbevf_info_tbl
[ent
->driver_data
];
3739 int err
, pci_using_dac
;
3741 err
= pci_enable_device(pdev
);
3745 if (!dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64))) {
3748 err
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
3750 dev_err(&pdev
->dev
, "No usable DMA "
3751 "configuration, aborting\n");
3757 err
= pci_request_regions(pdev
, ixgbevf_driver_name
);
3759 dev_err(&pdev
->dev
, "pci_request_regions failed 0x%x\n", err
);
3763 pci_set_master(pdev
);
3765 netdev
= alloc_etherdev_mq(sizeof(struct ixgbevf_adapter
),
3769 goto err_alloc_etherdev
;
3772 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
3774 pci_set_drvdata(pdev
, netdev
);
3775 adapter
= netdev_priv(netdev
);
3777 adapter
->netdev
= netdev
;
3778 adapter
->pdev
= pdev
;
3781 adapter
->msg_enable
= netif_msg_init(debug
, DEFAULT_MSG_ENABLE
);
3784 * call save state here in standalone driver because it relies on
3785 * adapter struct to exist, and needs to call netdev_priv
3787 pci_save_state(pdev
);
3789 hw
->hw_addr
= ioremap(pci_resource_start(pdev
, 0),
3790 pci_resource_len(pdev
, 0));
3791 adapter
->io_addr
= hw
->hw_addr
;
3797 ixgbevf_assign_netdev_ops(netdev
);
3800 memcpy(&hw
->mac
.ops
, ii
->mac_ops
, sizeof(hw
->mac
.ops
));
3801 hw
->mac
.type
= ii
->mac
;
3803 memcpy(&hw
->mbx
.ops
, &ixgbevf_mbx_ops
,
3804 sizeof(struct ixgbe_mbx_operations
));
3806 /* setup the private structure */
3807 err
= ixgbevf_sw_init(adapter
);
3811 /* The HW MAC address was set and/or determined in sw_init */
3812 if (!is_valid_ether_addr(netdev
->dev_addr
)) {
3813 pr_err("invalid MAC address\n");
3818 netdev
->hw_features
= NETIF_F_SG
|
3825 netdev
->features
= netdev
->hw_features
|
3826 NETIF_F_HW_VLAN_CTAG_TX
|
3827 NETIF_F_HW_VLAN_CTAG_RX
|
3828 NETIF_F_HW_VLAN_CTAG_FILTER
;
3830 netdev
->vlan_features
|= NETIF_F_TSO
;
3831 netdev
->vlan_features
|= NETIF_F_TSO6
;
3832 netdev
->vlan_features
|= NETIF_F_IP_CSUM
;
3833 netdev
->vlan_features
|= NETIF_F_IPV6_CSUM
;
3834 netdev
->vlan_features
|= NETIF_F_SG
;
3837 netdev
->features
|= NETIF_F_HIGHDMA
;
3839 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
3841 init_timer(&adapter
->watchdog_timer
);
3842 adapter
->watchdog_timer
.function
= ixgbevf_watchdog
;
3843 adapter
->watchdog_timer
.data
= (unsigned long)adapter
;
3845 if (IXGBE_REMOVED(hw
->hw_addr
)) {
3849 INIT_WORK(&adapter
->reset_task
, ixgbevf_reset_task
);
3850 INIT_WORK(&adapter
->watchdog_task
, ixgbevf_watchdog_task
);
3851 set_bit(__IXGBEVF_WORK_INIT
, &adapter
->state
);
3853 err
= ixgbevf_init_interrupt_scheme(adapter
);
3857 strcpy(netdev
->name
, "eth%d");
3859 err
= register_netdev(netdev
);
3863 netif_carrier_off(netdev
);
3865 ixgbevf_init_last_counter_stats(adapter
);
3867 /* print the VF info */
3868 dev_info(&pdev
->dev
, "%pM\n", netdev
->dev_addr
);
3869 dev_info(&pdev
->dev
, "MAC: %d\n", hw
->mac
.type
);
3871 switch (hw
->mac
.type
) {
3872 case ixgbe_mac_X550_vf
:
3873 dev_info(&pdev
->dev
, "Intel(R) X550 Virtual Function\n");
3875 case ixgbe_mac_X540_vf
:
3876 dev_info(&pdev
->dev
, "Intel(R) X540 Virtual Function\n");
3878 case ixgbe_mac_82599_vf
:
3880 dev_info(&pdev
->dev
, "Intel(R) 82599 Virtual Function\n");
3887 ixgbevf_clear_interrupt_scheme(adapter
);
3889 ixgbevf_reset_interrupt_capability(adapter
);
3890 iounmap(adapter
->io_addr
);
3892 free_netdev(netdev
);
3894 pci_release_regions(pdev
);
3897 if (!test_and_set_bit(__IXGBEVF_DISABLED
, &adapter
->state
))
3898 pci_disable_device(pdev
);
3903 * ixgbevf_remove - Device Removal Routine
3904 * @pdev: PCI device information struct
3906 * ixgbevf_remove is called by the PCI subsystem to alert the driver
3907 * that it should release a PCI device. The could be caused by a
3908 * Hot-Plug event, or because the driver is going to be removed from
3911 static void ixgbevf_remove(struct pci_dev
*pdev
)
3913 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3914 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3916 set_bit(__IXGBEVF_REMOVING
, &adapter
->state
);
3918 del_timer_sync(&adapter
->watchdog_timer
);
3920 cancel_work_sync(&adapter
->reset_task
);
3921 cancel_work_sync(&adapter
->watchdog_task
);
3923 if (netdev
->reg_state
== NETREG_REGISTERED
)
3924 unregister_netdev(netdev
);
3926 ixgbevf_clear_interrupt_scheme(adapter
);
3927 ixgbevf_reset_interrupt_capability(adapter
);
3929 iounmap(adapter
->io_addr
);
3930 pci_release_regions(pdev
);
3932 hw_dbg(&adapter
->hw
, "Remove complete\n");
3934 free_netdev(netdev
);
3936 if (!test_and_set_bit(__IXGBEVF_DISABLED
, &adapter
->state
))
3937 pci_disable_device(pdev
);
3941 * ixgbevf_io_error_detected - called when PCI error is detected
3942 * @pdev: Pointer to PCI device
3943 * @state: The current pci connection state
3945 * This function is called after a PCI bus error affecting
3946 * this device has been detected.
3948 static pci_ers_result_t
ixgbevf_io_error_detected(struct pci_dev
*pdev
,
3949 pci_channel_state_t state
)
3951 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3952 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3954 if (!test_bit(__IXGBEVF_WORK_INIT
, &adapter
->state
))
3955 return PCI_ERS_RESULT_DISCONNECT
;
3958 netif_device_detach(netdev
);
3960 if (state
== pci_channel_io_perm_failure
) {
3962 return PCI_ERS_RESULT_DISCONNECT
;
3965 if (netif_running(netdev
))
3966 ixgbevf_down(adapter
);
3968 if (!test_and_set_bit(__IXGBEVF_DISABLED
, &adapter
->state
))
3969 pci_disable_device(pdev
);
3972 /* Request a slot slot reset. */
3973 return PCI_ERS_RESULT_NEED_RESET
;
3977 * ixgbevf_io_slot_reset - called after the pci bus has been reset.
3978 * @pdev: Pointer to PCI device
3980 * Restart the card from scratch, as if from a cold-boot. Implementation
3981 * resembles the first-half of the ixgbevf_resume routine.
3983 static pci_ers_result_t
ixgbevf_io_slot_reset(struct pci_dev
*pdev
)
3985 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3986 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3988 if (pci_enable_device_mem(pdev
)) {
3990 "Cannot re-enable PCI device after reset.\n");
3991 return PCI_ERS_RESULT_DISCONNECT
;
3994 smp_mb__before_atomic();
3995 clear_bit(__IXGBEVF_DISABLED
, &adapter
->state
);
3996 pci_set_master(pdev
);
3998 ixgbevf_reset(adapter
);
4000 return PCI_ERS_RESULT_RECOVERED
;
4004 * ixgbevf_io_resume - called when traffic can start flowing again.
4005 * @pdev: Pointer to PCI device
4007 * This callback is called when the error recovery driver tells us that
4008 * its OK to resume normal operation. Implementation resembles the
4009 * second-half of the ixgbevf_resume routine.
4011 static void ixgbevf_io_resume(struct pci_dev
*pdev
)
4013 struct net_device
*netdev
= pci_get_drvdata(pdev
);
4014 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
4016 if (netif_running(netdev
))
4017 ixgbevf_up(adapter
);
4019 netif_device_attach(netdev
);
4022 /* PCI Error Recovery (ERS) */
4023 static const struct pci_error_handlers ixgbevf_err_handler
= {
4024 .error_detected
= ixgbevf_io_error_detected
,
4025 .slot_reset
= ixgbevf_io_slot_reset
,
4026 .resume
= ixgbevf_io_resume
,
4029 static struct pci_driver ixgbevf_driver
= {
4030 .name
= ixgbevf_driver_name
,
4031 .id_table
= ixgbevf_pci_tbl
,
4032 .probe
= ixgbevf_probe
,
4033 .remove
= ixgbevf_remove
,
4035 /* Power Management Hooks */
4036 .suspend
= ixgbevf_suspend
,
4037 .resume
= ixgbevf_resume
,
4039 .shutdown
= ixgbevf_shutdown
,
4040 .err_handler
= &ixgbevf_err_handler
4044 * ixgbevf_init_module - Driver Registration Routine
4046 * ixgbevf_init_module is the first routine called when the driver is
4047 * loaded. All it does is register with the PCI subsystem.
4049 static int __init
ixgbevf_init_module(void)
4052 pr_info("%s - version %s\n", ixgbevf_driver_string
,
4053 ixgbevf_driver_version
);
4055 pr_info("%s\n", ixgbevf_copyright
);
4057 ret
= pci_register_driver(&ixgbevf_driver
);
4061 module_init(ixgbevf_init_module
);
4064 * ixgbevf_exit_module - Driver Exit Cleanup Routine
4066 * ixgbevf_exit_module is called just before the driver is removed
4069 static void __exit
ixgbevf_exit_module(void)
4071 pci_unregister_driver(&ixgbevf_driver
);
4076 * ixgbevf_get_hw_dev_name - return device name string
4077 * used by hardware layer to print debugging information
4079 char *ixgbevf_get_hw_dev_name(struct ixgbe_hw
*hw
)
4081 struct ixgbevf_adapter
*adapter
= hw
->back
;
4082 return adapter
->netdev
->name
;
4086 module_exit(ixgbevf_exit_module
);
4088 /* ixgbevf_main.c */