1 /*******************************************************************************
3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2009 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *******************************************************************************/
29 /******************************************************************************
30 Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
31 ******************************************************************************/
32 #include <linux/types.h>
33 #include <linux/module.h>
34 #include <linux/pci.h>
35 #include <linux/netdevice.h>
36 #include <linux/vmalloc.h>
37 #include <linux/string.h>
40 #include <linux/tcp.h>
41 #include <linux/ipv6.h>
42 #include <linux/slab.h>
43 #include <net/checksum.h>
44 #include <net/ip6_checksum.h>
45 #include <linux/ethtool.h>
46 #include <linux/if_vlan.h>
50 char ixgbevf_driver_name
[] = "ixgbevf";
51 static const char ixgbevf_driver_string
[] =
52 "Intel(R) 82599 Virtual Function";
54 #define DRV_VERSION "1.0.0-k0"
55 const char ixgbevf_driver_version
[] = DRV_VERSION
;
56 static char ixgbevf_copyright
[] = "Copyright (c) 2009 Intel Corporation.";
58 static const struct ixgbevf_info
*ixgbevf_info_tbl
[] = {
59 [board_82599_vf
] = &ixgbevf_vf_info
,
62 /* ixgbevf_pci_tbl - PCI Device ID Table
64 * Wildcard entries (PCI_ANY_ID) should come last
65 * Last entry must be all 0s
67 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
68 * Class, Class Mask, private data (not used) }
70 static struct pci_device_id ixgbevf_pci_tbl
[] = {
71 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82599_VF
),
74 /* required last entry */
77 MODULE_DEVICE_TABLE(pci
, ixgbevf_pci_tbl
);
79 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
80 MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver");
81 MODULE_LICENSE("GPL");
82 MODULE_VERSION(DRV_VERSION
);
84 #define DEFAULT_DEBUG_LEVEL_SHIFT 3
87 static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector
*q_vector
);
88 static void ixgbevf_write_eitr(struct ixgbevf_adapter
*adapter
, int v_idx
,
91 static inline void ixgbevf_release_rx_desc(struct ixgbe_hw
*hw
,
92 struct ixgbevf_ring
*rx_ring
,
96 * Force memory writes to complete before letting h/w
97 * know there are new descriptors to fetch. (Only
98 * applicable for weak-ordered memory model archs,
102 IXGBE_WRITE_REG(hw
, IXGBE_VFRDT(rx_ring
->reg_idx
), val
);
106 * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors
107 * @adapter: pointer to adapter struct
108 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
109 * @queue: queue to map the corresponding interrupt to
110 * @msix_vector: the vector to map to the corresponding queue
113 static void ixgbevf_set_ivar(struct ixgbevf_adapter
*adapter
, s8 direction
,
114 u8 queue
, u8 msix_vector
)
117 struct ixgbe_hw
*hw
= &adapter
->hw
;
118 if (direction
== -1) {
120 msix_vector
|= IXGBE_IVAR_ALLOC_VAL
;
121 ivar
= IXGBE_READ_REG(hw
, IXGBE_VTIVAR_MISC
);
124 IXGBE_WRITE_REG(hw
, IXGBE_VTIVAR_MISC
, ivar
);
126 /* tx or rx causes */
127 msix_vector
|= IXGBE_IVAR_ALLOC_VAL
;
128 index
= ((16 * (queue
& 1)) + (8 * direction
));
129 ivar
= IXGBE_READ_REG(hw
, IXGBE_VTIVAR(queue
>> 1));
130 ivar
&= ~(0xFF << index
);
131 ivar
|= (msix_vector
<< index
);
132 IXGBE_WRITE_REG(hw
, IXGBE_VTIVAR(queue
>> 1), ivar
);
136 static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_adapter
*adapter
,
137 struct ixgbevf_tx_buffer
140 if (tx_buffer_info
->dma
) {
141 if (tx_buffer_info
->mapped_as_page
)
142 dma_unmap_page(&adapter
->pdev
->dev
,
144 tx_buffer_info
->length
,
147 dma_unmap_single(&adapter
->pdev
->dev
,
149 tx_buffer_info
->length
,
151 tx_buffer_info
->dma
= 0;
153 if (tx_buffer_info
->skb
) {
154 dev_kfree_skb_any(tx_buffer_info
->skb
);
155 tx_buffer_info
->skb
= NULL
;
157 tx_buffer_info
->time_stamp
= 0;
158 /* tx_buffer_info must be completely set up in the transmit path */
161 static inline bool ixgbevf_check_tx_hang(struct ixgbevf_adapter
*adapter
,
162 struct ixgbevf_ring
*tx_ring
,
165 struct ixgbe_hw
*hw
= &adapter
->hw
;
168 /* Detect a transmit hang in hardware, this serializes the
169 * check with the clearing of time_stamp and movement of eop */
170 head
= readl(hw
->hw_addr
+ tx_ring
->head
);
171 tail
= readl(hw
->hw_addr
+ tx_ring
->tail
);
172 adapter
->detect_tx_hung
= false;
173 if ((head
!= tail
) &&
174 tx_ring
->tx_buffer_info
[eop
].time_stamp
&&
175 time_after(jiffies
, tx_ring
->tx_buffer_info
[eop
].time_stamp
+ HZ
)) {
176 /* detected Tx unit hang */
177 union ixgbe_adv_tx_desc
*tx_desc
;
178 tx_desc
= IXGBE_TX_DESC_ADV(*tx_ring
, eop
);
179 printk(KERN_ERR
"Detected Tx Unit Hang\n"
181 " TDH, TDT <%x>, <%x>\n"
182 " next_to_use <%x>\n"
183 " next_to_clean <%x>\n"
184 "tx_buffer_info[next_to_clean]\n"
185 " time_stamp <%lx>\n"
187 tx_ring
->queue_index
,
189 tx_ring
->next_to_use
, eop
,
190 tx_ring
->tx_buffer_info
[eop
].time_stamp
, jiffies
);
197 #define IXGBE_MAX_TXD_PWR 14
198 #define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
200 /* Tx Descriptors needed, worst case */
201 #define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
202 (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
204 #define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
205 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
207 #define DESC_NEEDED TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD)
210 static void ixgbevf_tx_timeout(struct net_device
*netdev
);
213 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
214 * @adapter: board private structure
215 * @tx_ring: tx ring to clean
217 static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter
*adapter
,
218 struct ixgbevf_ring
*tx_ring
)
220 struct net_device
*netdev
= adapter
->netdev
;
221 struct ixgbe_hw
*hw
= &adapter
->hw
;
222 union ixgbe_adv_tx_desc
*tx_desc
, *eop_desc
;
223 struct ixgbevf_tx_buffer
*tx_buffer_info
;
224 unsigned int i
, eop
, count
= 0;
225 unsigned int total_bytes
= 0, total_packets
= 0;
227 i
= tx_ring
->next_to_clean
;
228 eop
= tx_ring
->tx_buffer_info
[i
].next_to_watch
;
229 eop_desc
= IXGBE_TX_DESC_ADV(*tx_ring
, eop
);
231 while ((eop_desc
->wb
.status
& cpu_to_le32(IXGBE_TXD_STAT_DD
)) &&
232 (count
< tx_ring
->work_limit
)) {
233 bool cleaned
= false;
234 rmb(); /* read buffer_info after eop_desc */
235 for ( ; !cleaned
; count
++) {
237 tx_desc
= IXGBE_TX_DESC_ADV(*tx_ring
, i
);
238 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
239 cleaned
= (i
== eop
);
240 skb
= tx_buffer_info
->skb
;
242 if (cleaned
&& skb
) {
243 unsigned int segs
, bytecount
;
245 /* gso_segs is currently only valid for tcp */
246 segs
= skb_shinfo(skb
)->gso_segs
?: 1;
247 /* multiply data chunks by size of headers */
248 bytecount
= ((segs
- 1) * skb_headlen(skb
)) +
250 total_packets
+= segs
;
251 total_bytes
+= bytecount
;
254 ixgbevf_unmap_and_free_tx_resource(adapter
,
257 tx_desc
->wb
.status
= 0;
260 if (i
== tx_ring
->count
)
264 eop
= tx_ring
->tx_buffer_info
[i
].next_to_watch
;
265 eop_desc
= IXGBE_TX_DESC_ADV(*tx_ring
, eop
);
268 tx_ring
->next_to_clean
= i
;
270 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
271 if (unlikely(count
&& netif_carrier_ok(netdev
) &&
272 (IXGBE_DESC_UNUSED(tx_ring
) >= TX_WAKE_THRESHOLD
))) {
273 /* Make sure that anybody stopping the queue after this
274 * sees the new next_to_clean.
278 if (__netif_subqueue_stopped(netdev
, tx_ring
->queue_index
) &&
279 !test_bit(__IXGBEVF_DOWN
, &adapter
->state
)) {
280 netif_wake_subqueue(netdev
, tx_ring
->queue_index
);
281 ++adapter
->restart_queue
;
284 if (netif_queue_stopped(netdev
) &&
285 !test_bit(__IXGBEVF_DOWN
, &adapter
->state
)) {
286 netif_wake_queue(netdev
);
287 ++adapter
->restart_queue
;
292 if (adapter
->detect_tx_hung
) {
293 if (ixgbevf_check_tx_hang(adapter
, tx_ring
, i
)) {
294 /* schedule immediate reset if we believe we hung */
296 "tx hang %d detected, resetting adapter\n",
297 adapter
->tx_timeout_count
+ 1);
298 ixgbevf_tx_timeout(adapter
->netdev
);
302 /* re-arm the interrupt */
303 if ((count
>= tx_ring
->work_limit
) &&
304 (!test_bit(__IXGBEVF_DOWN
, &adapter
->state
))) {
305 IXGBE_WRITE_REG(hw
, IXGBE_VTEICS
, tx_ring
->v_idx
);
308 tx_ring
->total_bytes
+= total_bytes
;
309 tx_ring
->total_packets
+= total_packets
;
311 adapter
->net_stats
.tx_bytes
+= total_bytes
;
312 adapter
->net_stats
.tx_packets
+= total_packets
;
314 return (count
< tx_ring
->work_limit
);
318 * ixgbevf_receive_skb - Send a completed packet up the stack
319 * @q_vector: structure containing interrupt and ring information
320 * @skb: packet to send up
321 * @status: hardware indication of status of receive
322 * @rx_ring: rx descriptor ring (for a specific queue) to setup
323 * @rx_desc: rx descriptor
325 static void ixgbevf_receive_skb(struct ixgbevf_q_vector
*q_vector
,
326 struct sk_buff
*skb
, u8 status
,
327 struct ixgbevf_ring
*ring
,
328 union ixgbe_adv_rx_desc
*rx_desc
)
330 struct ixgbevf_adapter
*adapter
= q_vector
->adapter
;
331 bool is_vlan
= (status
& IXGBE_RXD_STAT_VP
);
332 u16 tag
= le16_to_cpu(rx_desc
->wb
.upper
.vlan
);
335 if (!(adapter
->flags
& IXGBE_FLAG_IN_NETPOLL
)) {
336 if (adapter
->vlgrp
&& is_vlan
)
337 vlan_gro_receive(&q_vector
->napi
,
341 napi_gro_receive(&q_vector
->napi
, skb
);
343 if (adapter
->vlgrp
&& is_vlan
)
344 ret
= vlan_hwaccel_rx(skb
, adapter
->vlgrp
, tag
);
351 * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
352 * @adapter: address of board private structure
353 * @status_err: hardware indication of status of receive
354 * @skb: skb currently being received and modified
356 static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter
*adapter
,
357 u32 status_err
, struct sk_buff
*skb
)
359 skb
->ip_summed
= CHECKSUM_NONE
;
361 /* Rx csum disabled */
362 if (!(adapter
->flags
& IXGBE_FLAG_RX_CSUM_ENABLED
))
365 /* if IP and error */
366 if ((status_err
& IXGBE_RXD_STAT_IPCS
) &&
367 (status_err
& IXGBE_RXDADV_ERR_IPE
)) {
368 adapter
->hw_csum_rx_error
++;
372 if (!(status_err
& IXGBE_RXD_STAT_L4CS
))
375 if (status_err
& IXGBE_RXDADV_ERR_TCPE
) {
376 adapter
->hw_csum_rx_error
++;
380 /* It must be a TCP or UDP packet with a valid checksum */
381 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
382 adapter
->hw_csum_rx_good
++;
386 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
387 * @adapter: address of board private structure
389 static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter
*adapter
,
390 struct ixgbevf_ring
*rx_ring
,
393 struct pci_dev
*pdev
= adapter
->pdev
;
394 union ixgbe_adv_rx_desc
*rx_desc
;
395 struct ixgbevf_rx_buffer
*bi
;
398 unsigned int bufsz
= rx_ring
->rx_buf_len
+ NET_IP_ALIGN
;
400 i
= rx_ring
->next_to_use
;
401 bi
= &rx_ring
->rx_buffer_info
[i
];
403 while (cleaned_count
--) {
404 rx_desc
= IXGBE_RX_DESC_ADV(*rx_ring
, i
);
407 (adapter
->flags
& IXGBE_FLAG_RX_PS_ENABLED
)) {
409 bi
->page
= netdev_alloc_page(adapter
->netdev
);
411 adapter
->alloc_rx_page_failed
++;
416 /* use a half page if we're re-using */
417 bi
->page_offset
^= (PAGE_SIZE
/ 2);
420 bi
->page_dma
= dma_map_page(&pdev
->dev
, bi
->page
,
428 skb
= netdev_alloc_skb(adapter
->netdev
,
432 adapter
->alloc_rx_buff_failed
++;
437 * Make buffer alignment 2 beyond a 16 byte boundary
438 * this will result in a 16 byte aligned IP header after
439 * the 14 byte MAC header is removed
441 skb_reserve(skb
, NET_IP_ALIGN
);
446 bi
->dma
= dma_map_single(&pdev
->dev
, skb
->data
,
450 /* Refresh the desc even if buffer_addrs didn't change because
451 * each write-back erases this info. */
452 if (adapter
->flags
& IXGBE_FLAG_RX_PS_ENABLED
) {
453 rx_desc
->read
.pkt_addr
= cpu_to_le64(bi
->page_dma
);
454 rx_desc
->read
.hdr_addr
= cpu_to_le64(bi
->dma
);
456 rx_desc
->read
.pkt_addr
= cpu_to_le64(bi
->dma
);
460 if (i
== rx_ring
->count
)
462 bi
= &rx_ring
->rx_buffer_info
[i
];
466 if (rx_ring
->next_to_use
!= i
) {
467 rx_ring
->next_to_use
= i
;
469 i
= (rx_ring
->count
- 1);
471 ixgbevf_release_rx_desc(&adapter
->hw
, rx_ring
, i
);
475 static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter
*adapter
,
479 struct ixgbe_hw
*hw
= &adapter
->hw
;
481 mask
= (qmask
& 0xFFFFFFFF);
482 IXGBE_WRITE_REG(hw
, IXGBE_VTEIMS
, mask
);
485 static inline u16
ixgbevf_get_hdr_info(union ixgbe_adv_rx_desc
*rx_desc
)
487 return rx_desc
->wb
.lower
.lo_dword
.hs_rss
.hdr_info
;
490 static inline u16
ixgbevf_get_pkt_info(union ixgbe_adv_rx_desc
*rx_desc
)
492 return rx_desc
->wb
.lower
.lo_dword
.hs_rss
.pkt_info
;
495 static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector
*q_vector
,
496 struct ixgbevf_ring
*rx_ring
,
497 int *work_done
, int work_to_do
)
499 struct ixgbevf_adapter
*adapter
= q_vector
->adapter
;
500 struct pci_dev
*pdev
= adapter
->pdev
;
501 union ixgbe_adv_rx_desc
*rx_desc
, *next_rxd
;
502 struct ixgbevf_rx_buffer
*rx_buffer_info
, *next_buffer
;
507 bool cleaned
= false;
508 int cleaned_count
= 0;
509 unsigned int total_rx_bytes
= 0, total_rx_packets
= 0;
511 i
= rx_ring
->next_to_clean
;
512 rx_desc
= IXGBE_RX_DESC_ADV(*rx_ring
, i
);
513 staterr
= le32_to_cpu(rx_desc
->wb
.upper
.status_error
);
514 rx_buffer_info
= &rx_ring
->rx_buffer_info
[i
];
516 while (staterr
& IXGBE_RXD_STAT_DD
) {
518 if (*work_done
>= work_to_do
)
522 rmb(); /* read descriptor and rx_buffer_info after status DD */
523 if (adapter
->flags
& IXGBE_FLAG_RX_PS_ENABLED
) {
524 hdr_info
= le16_to_cpu(ixgbevf_get_hdr_info(rx_desc
));
525 len
= (hdr_info
& IXGBE_RXDADV_HDRBUFLEN_MASK
) >>
526 IXGBE_RXDADV_HDRBUFLEN_SHIFT
;
527 if (hdr_info
& IXGBE_RXDADV_SPH
)
528 adapter
->rx_hdr_split
++;
529 if (len
> IXGBEVF_RX_HDR_SIZE
)
530 len
= IXGBEVF_RX_HDR_SIZE
;
531 upper_len
= le16_to_cpu(rx_desc
->wb
.upper
.length
);
533 len
= le16_to_cpu(rx_desc
->wb
.upper
.length
);
536 skb
= rx_buffer_info
->skb
;
537 prefetch(skb
->data
- NET_IP_ALIGN
);
538 rx_buffer_info
->skb
= NULL
;
540 if (rx_buffer_info
->dma
) {
541 dma_unmap_single(&pdev
->dev
, rx_buffer_info
->dma
,
544 rx_buffer_info
->dma
= 0;
549 dma_unmap_page(&pdev
->dev
, rx_buffer_info
->page_dma
,
550 PAGE_SIZE
/ 2, DMA_FROM_DEVICE
);
551 rx_buffer_info
->page_dma
= 0;
552 skb_fill_page_desc(skb
, skb_shinfo(skb
)->nr_frags
,
553 rx_buffer_info
->page
,
554 rx_buffer_info
->page_offset
,
557 if ((rx_ring
->rx_buf_len
> (PAGE_SIZE
/ 2)) ||
558 (page_count(rx_buffer_info
->page
) != 1))
559 rx_buffer_info
->page
= NULL
;
561 get_page(rx_buffer_info
->page
);
563 skb
->len
+= upper_len
;
564 skb
->data_len
+= upper_len
;
565 skb
->truesize
+= upper_len
;
569 if (i
== rx_ring
->count
)
572 next_rxd
= IXGBE_RX_DESC_ADV(*rx_ring
, i
);
576 next_buffer
= &rx_ring
->rx_buffer_info
[i
];
578 if (!(staterr
& IXGBE_RXD_STAT_EOP
)) {
579 if (adapter
->flags
& IXGBE_FLAG_RX_PS_ENABLED
) {
580 rx_buffer_info
->skb
= next_buffer
->skb
;
581 rx_buffer_info
->dma
= next_buffer
->dma
;
582 next_buffer
->skb
= skb
;
583 next_buffer
->dma
= 0;
585 skb
->next
= next_buffer
->skb
;
586 skb
->next
->prev
= skb
;
588 adapter
->non_eop_descs
++;
592 /* ERR_MASK will only have valid bits if EOP set */
593 if (unlikely(staterr
& IXGBE_RXDADV_ERR_FRAME_ERR_MASK
)) {
594 dev_kfree_skb_irq(skb
);
598 ixgbevf_rx_checksum(adapter
, staterr
, skb
);
600 /* probably a little skewed due to removing CRC */
601 total_rx_bytes
+= skb
->len
;
604 if (staterr
& IXGBE_RXD_STAT_LB
) {
605 u32 header_fixup_len
= skb_headlen(skb
);
606 if (header_fixup_len
< 14)
607 skb_push(skb
, header_fixup_len
);
609 skb
->protocol
= eth_type_trans(skb
, adapter
->netdev
);
611 ixgbevf_receive_skb(q_vector
, skb
, staterr
, rx_ring
, rx_desc
);
614 rx_desc
->wb
.upper
.status_error
= 0;
616 /* return some buffers to hardware, one at a time is too slow */
617 if (cleaned_count
>= IXGBEVF_RX_BUFFER_WRITE
) {
618 ixgbevf_alloc_rx_buffers(adapter
, rx_ring
,
623 /* use prefetched values */
625 rx_buffer_info
= &rx_ring
->rx_buffer_info
[i
];
627 staterr
= le32_to_cpu(rx_desc
->wb
.upper
.status_error
);
630 rx_ring
->next_to_clean
= i
;
631 cleaned_count
= IXGBE_DESC_UNUSED(rx_ring
);
634 ixgbevf_alloc_rx_buffers(adapter
, rx_ring
, cleaned_count
);
636 rx_ring
->total_packets
+= total_rx_packets
;
637 rx_ring
->total_bytes
+= total_rx_bytes
;
638 adapter
->net_stats
.rx_bytes
+= total_rx_bytes
;
639 adapter
->net_stats
.rx_packets
+= total_rx_packets
;
645 * ixgbevf_clean_rxonly - msix (aka one shot) rx clean routine
646 * @napi: napi struct with our devices info in it
647 * @budget: amount of work driver is allowed to do this pass, in packets
649 * This function is optimized for cleaning one queue only on a single
652 static int ixgbevf_clean_rxonly(struct napi_struct
*napi
, int budget
)
654 struct ixgbevf_q_vector
*q_vector
=
655 container_of(napi
, struct ixgbevf_q_vector
, napi
);
656 struct ixgbevf_adapter
*adapter
= q_vector
->adapter
;
657 struct ixgbevf_ring
*rx_ring
= NULL
;
661 r_idx
= find_first_bit(q_vector
->rxr_idx
, adapter
->num_rx_queues
);
662 rx_ring
= &(adapter
->rx_ring
[r_idx
]);
664 ixgbevf_clean_rx_irq(q_vector
, rx_ring
, &work_done
, budget
);
666 /* If all Rx work done, exit the polling mode */
667 if (work_done
< budget
) {
669 if (adapter
->itr_setting
& 1)
670 ixgbevf_set_itr_msix(q_vector
);
671 if (!test_bit(__IXGBEVF_DOWN
, &adapter
->state
))
672 ixgbevf_irq_enable_queues(adapter
, rx_ring
->v_idx
);
679 * ixgbevf_clean_rxonly_many - msix (aka one shot) rx clean routine
680 * @napi: napi struct with our devices info in it
681 * @budget: amount of work driver is allowed to do this pass, in packets
683 * This function will clean more than one rx queue associated with a
686 static int ixgbevf_clean_rxonly_many(struct napi_struct
*napi
, int budget
)
688 struct ixgbevf_q_vector
*q_vector
=
689 container_of(napi
, struct ixgbevf_q_vector
, napi
);
690 struct ixgbevf_adapter
*adapter
= q_vector
->adapter
;
691 struct ixgbevf_ring
*rx_ring
= NULL
;
692 int work_done
= 0, i
;
696 /* attempt to distribute budget to each queue fairly, but don't allow
697 * the budget to go below 1 because we'll exit polling */
698 budget
/= (q_vector
->rxr_count
?: 1);
699 budget
= max(budget
, 1);
700 r_idx
= find_first_bit(q_vector
->rxr_idx
, adapter
->num_rx_queues
);
701 for (i
= 0; i
< q_vector
->rxr_count
; i
++) {
702 rx_ring
= &(adapter
->rx_ring
[r_idx
]);
703 ixgbevf_clean_rx_irq(q_vector
, rx_ring
, &work_done
, budget
);
704 enable_mask
|= rx_ring
->v_idx
;
705 r_idx
= find_next_bit(q_vector
->rxr_idx
, adapter
->num_rx_queues
,
709 #ifndef HAVE_NETDEV_NAPI_LIST
710 if (!netif_running(adapter
->netdev
))
714 r_idx
= find_first_bit(q_vector
->rxr_idx
, adapter
->num_rx_queues
);
715 rx_ring
= &(adapter
->rx_ring
[r_idx
]);
717 /* If all Rx work done, exit the polling mode */
718 if (work_done
< budget
) {
720 if (adapter
->itr_setting
& 1)
721 ixgbevf_set_itr_msix(q_vector
);
722 if (!test_bit(__IXGBEVF_DOWN
, &adapter
->state
))
723 ixgbevf_irq_enable_queues(adapter
, enable_mask
);
731 * ixgbevf_configure_msix - Configure MSI-X hardware
732 * @adapter: board private structure
734 * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X
737 static void ixgbevf_configure_msix(struct ixgbevf_adapter
*adapter
)
739 struct ixgbevf_q_vector
*q_vector
;
740 struct ixgbe_hw
*hw
= &adapter
->hw
;
741 int i
, j
, q_vectors
, v_idx
, r_idx
;
744 q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
747 * Populate the IVAR table and set the ITR values to the
748 * corresponding register.
750 for (v_idx
= 0; v_idx
< q_vectors
; v_idx
++) {
751 q_vector
= adapter
->q_vector
[v_idx
];
752 r_idx
= find_first_bit(q_vector
->rxr_idx
,
753 adapter
->num_rx_queues
);
755 for (i
= 0; i
< q_vector
->rxr_count
; i
++) {
756 j
= adapter
->rx_ring
[r_idx
].reg_idx
;
757 ixgbevf_set_ivar(adapter
, 0, j
, v_idx
);
758 r_idx
= find_next_bit(q_vector
->rxr_idx
,
759 adapter
->num_rx_queues
,
762 r_idx
= find_first_bit(q_vector
->txr_idx
,
763 adapter
->num_tx_queues
);
765 for (i
= 0; i
< q_vector
->txr_count
; i
++) {
766 j
= adapter
->tx_ring
[r_idx
].reg_idx
;
767 ixgbevf_set_ivar(adapter
, 1, j
, v_idx
);
768 r_idx
= find_next_bit(q_vector
->txr_idx
,
769 adapter
->num_tx_queues
,
773 /* if this is a tx only vector halve the interrupt rate */
774 if (q_vector
->txr_count
&& !q_vector
->rxr_count
)
775 q_vector
->eitr
= (adapter
->eitr_param
>> 1);
776 else if (q_vector
->rxr_count
)
778 q_vector
->eitr
= adapter
->eitr_param
;
780 ixgbevf_write_eitr(adapter
, v_idx
, q_vector
->eitr
);
783 ixgbevf_set_ivar(adapter
, -1, 1, v_idx
);
785 /* set up to autoclear timer, and the vectors */
786 mask
= IXGBE_EIMS_ENABLE_MASK
;
787 mask
&= ~IXGBE_EIMS_OTHER
;
788 IXGBE_WRITE_REG(hw
, IXGBE_VTEIAC
, mask
);
795 latency_invalid
= 255
799 * ixgbevf_update_itr - update the dynamic ITR value based on statistics
800 * @adapter: pointer to adapter
801 * @eitr: eitr setting (ints per sec) to give last timeslice
802 * @itr_setting: current throttle rate in ints/second
803 * @packets: the number of packets during this measurement interval
804 * @bytes: the number of bytes during this measurement interval
806 * Stores a new ITR value based on packets and byte
807 * counts during the last interrupt. The advantage of per interrupt
808 * computation is faster updates and more accurate ITR for the current
809 * traffic pattern. Constants in this function were computed
810 * based on theoretical maximum wire speed and thresholds were set based
811 * on testing data as well as attempting to minimize response time
812 * while increasing bulk throughput.
814 static u8
ixgbevf_update_itr(struct ixgbevf_adapter
*adapter
,
815 u32 eitr
, u8 itr_setting
,
816 int packets
, int bytes
)
818 unsigned int retval
= itr_setting
;
823 goto update_itr_done
;
826 /* simple throttlerate management
827 * 0-20MB/s lowest (100000 ints/s)
828 * 20-100MB/s low (20000 ints/s)
829 * 100-1249MB/s bulk (8000 ints/s)
831 /* what was last interrupt timeslice? */
832 timepassed_us
= 1000000/eitr
;
833 bytes_perint
= bytes
/ timepassed_us
; /* bytes/usec */
835 switch (itr_setting
) {
837 if (bytes_perint
> adapter
->eitr_low
)
838 retval
= low_latency
;
841 if (bytes_perint
> adapter
->eitr_high
)
842 retval
= bulk_latency
;
843 else if (bytes_perint
<= adapter
->eitr_low
)
844 retval
= lowest_latency
;
847 if (bytes_perint
<= adapter
->eitr_high
)
848 retval
= low_latency
;
857 * ixgbevf_write_eitr - write VTEITR register in hardware specific way
858 * @adapter: pointer to adapter struct
859 * @v_idx: vector index into q_vector array
860 * @itr_reg: new value to be written in *register* format, not ints/s
862 * This function is made to be called by ethtool and by the driver
863 * when it needs to update VTEITR registers at runtime. Hardware
864 * specific quirks/differences are taken care of here.
866 static void ixgbevf_write_eitr(struct ixgbevf_adapter
*adapter
, int v_idx
,
869 struct ixgbe_hw
*hw
= &adapter
->hw
;
871 itr_reg
= EITR_INTS_PER_SEC_TO_REG(itr_reg
);
874 * set the WDIS bit to not clear the timer bits and cause an
875 * immediate assertion of the interrupt
877 itr_reg
|= IXGBE_EITR_CNT_WDIS
;
879 IXGBE_WRITE_REG(hw
, IXGBE_VTEITR(v_idx
), itr_reg
);
882 static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector
*q_vector
)
884 struct ixgbevf_adapter
*adapter
= q_vector
->adapter
;
886 u8 current_itr
, ret_itr
;
887 int i
, r_idx
, v_idx
= q_vector
->v_idx
;
888 struct ixgbevf_ring
*rx_ring
, *tx_ring
;
890 r_idx
= find_first_bit(q_vector
->txr_idx
, adapter
->num_tx_queues
);
891 for (i
= 0; i
< q_vector
->txr_count
; i
++) {
892 tx_ring
= &(adapter
->tx_ring
[r_idx
]);
893 ret_itr
= ixgbevf_update_itr(adapter
, q_vector
->eitr
,
895 tx_ring
->total_packets
,
896 tx_ring
->total_bytes
);
897 /* if the result for this queue would decrease interrupt
898 * rate for this vector then use that result */
899 q_vector
->tx_itr
= ((q_vector
->tx_itr
> ret_itr
) ?
900 q_vector
->tx_itr
- 1 : ret_itr
);
901 r_idx
= find_next_bit(q_vector
->txr_idx
, adapter
->num_tx_queues
,
905 r_idx
= find_first_bit(q_vector
->rxr_idx
, adapter
->num_rx_queues
);
906 for (i
= 0; i
< q_vector
->rxr_count
; i
++) {
907 rx_ring
= &(adapter
->rx_ring
[r_idx
]);
908 ret_itr
= ixgbevf_update_itr(adapter
, q_vector
->eitr
,
910 rx_ring
->total_packets
,
911 rx_ring
->total_bytes
);
912 /* if the result for this queue would decrease interrupt
913 * rate for this vector then use that result */
914 q_vector
->rx_itr
= ((q_vector
->rx_itr
> ret_itr
) ?
915 q_vector
->rx_itr
- 1 : ret_itr
);
916 r_idx
= find_next_bit(q_vector
->rxr_idx
, adapter
->num_rx_queues
,
920 current_itr
= max(q_vector
->rx_itr
, q_vector
->tx_itr
);
922 switch (current_itr
) {
923 /* counts and packets in update_itr are dependent on these numbers */
928 new_itr
= 20000; /* aka hwitr = ~200 */
936 if (new_itr
!= q_vector
->eitr
) {
939 /* save the algorithm value here, not the smoothed one */
940 q_vector
->eitr
= new_itr
;
941 /* do an exponential smoothing */
942 new_itr
= ((q_vector
->eitr
* 90)/100) + ((new_itr
* 10)/100);
943 itr_reg
= EITR_INTS_PER_SEC_TO_REG(new_itr
);
944 ixgbevf_write_eitr(adapter
, v_idx
, itr_reg
);
948 static irqreturn_t
ixgbevf_msix_mbx(int irq
, void *data
)
950 struct net_device
*netdev
= data
;
951 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
952 struct ixgbe_hw
*hw
= &adapter
->hw
;
956 eicr
= IXGBE_READ_REG(hw
, IXGBE_VTEICS
);
957 IXGBE_WRITE_REG(hw
, IXGBE_VTEICR
, eicr
);
959 if (!hw
->mbx
.ops
.check_for_ack(hw
)) {
961 * checking for the ack clears the PFACK bit. Place
962 * it back in the v2p_mailbox cache so that anyone
963 * polling for an ack will not miss it. Also
964 * avoid the read below because the code to read
965 * the mailbox will also clear the ack bit. This was
966 * causing lost acks. Just cache the bit and exit
969 hw
->mbx
.v2p_mailbox
|= IXGBE_VFMAILBOX_PFACK
;
973 /* Not an ack interrupt, go ahead and read the message */
974 hw
->mbx
.ops
.read(hw
, &msg
, 1);
976 if ((msg
& IXGBE_MBVFICR_VFREQ_MASK
) == IXGBE_PF_CONTROL_MSG
)
977 mod_timer(&adapter
->watchdog_timer
,
978 round_jiffies(jiffies
+ 1));
984 static irqreturn_t
ixgbevf_msix_clean_tx(int irq
, void *data
)
986 struct ixgbevf_q_vector
*q_vector
= data
;
987 struct ixgbevf_adapter
*adapter
= q_vector
->adapter
;
988 struct ixgbevf_ring
*tx_ring
;
991 if (!q_vector
->txr_count
)
994 r_idx
= find_first_bit(q_vector
->txr_idx
, adapter
->num_tx_queues
);
995 for (i
= 0; i
< q_vector
->txr_count
; i
++) {
996 tx_ring
= &(adapter
->tx_ring
[r_idx
]);
997 tx_ring
->total_bytes
= 0;
998 tx_ring
->total_packets
= 0;
999 ixgbevf_clean_tx_irq(adapter
, tx_ring
);
1000 r_idx
= find_next_bit(q_vector
->txr_idx
, adapter
->num_tx_queues
,
1004 if (adapter
->itr_setting
& 1)
1005 ixgbevf_set_itr_msix(q_vector
);
1011 * ixgbe_msix_clean_rx - single unshared vector rx clean (all queues)
1013 * @data: pointer to our q_vector struct for this interrupt vector
1015 static irqreturn_t
ixgbevf_msix_clean_rx(int irq
, void *data
)
1017 struct ixgbevf_q_vector
*q_vector
= data
;
1018 struct ixgbevf_adapter
*adapter
= q_vector
->adapter
;
1019 struct ixgbe_hw
*hw
= &adapter
->hw
;
1020 struct ixgbevf_ring
*rx_ring
;
1024 r_idx
= find_first_bit(q_vector
->rxr_idx
, adapter
->num_rx_queues
);
1025 for (i
= 0; i
< q_vector
->rxr_count
; i
++) {
1026 rx_ring
= &(adapter
->rx_ring
[r_idx
]);
1027 rx_ring
->total_bytes
= 0;
1028 rx_ring
->total_packets
= 0;
1029 r_idx
= find_next_bit(q_vector
->rxr_idx
, adapter
->num_rx_queues
,
1033 if (!q_vector
->rxr_count
)
1036 r_idx
= find_first_bit(q_vector
->rxr_idx
, adapter
->num_rx_queues
);
1037 rx_ring
= &(adapter
->rx_ring
[r_idx
]);
1038 /* disable interrupts on this vector only */
1039 IXGBE_WRITE_REG(hw
, IXGBE_VTEIMC
, rx_ring
->v_idx
);
1040 napi_schedule(&q_vector
->napi
);
1046 static irqreturn_t
ixgbevf_msix_clean_many(int irq
, void *data
)
1048 ixgbevf_msix_clean_rx(irq
, data
);
1049 ixgbevf_msix_clean_tx(irq
, data
);
1054 static inline void map_vector_to_rxq(struct ixgbevf_adapter
*a
, int v_idx
,
1057 struct ixgbevf_q_vector
*q_vector
= a
->q_vector
[v_idx
];
1059 set_bit(r_idx
, q_vector
->rxr_idx
);
1060 q_vector
->rxr_count
++;
1061 a
->rx_ring
[r_idx
].v_idx
= 1 << v_idx
;
1064 static inline void map_vector_to_txq(struct ixgbevf_adapter
*a
, int v_idx
,
1067 struct ixgbevf_q_vector
*q_vector
= a
->q_vector
[v_idx
];
1069 set_bit(t_idx
, q_vector
->txr_idx
);
1070 q_vector
->txr_count
++;
1071 a
->tx_ring
[t_idx
].v_idx
= 1 << v_idx
;
1075 * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors
1076 * @adapter: board private structure to initialize
1078 * This function maps descriptor rings to the queue-specific vectors
1079 * we were allotted through the MSI-X enabling code. Ideally, we'd have
1080 * one vector per ring/queue, but on a constrained vector budget, we
1081 * group the rings as "efficiently" as possible. You would add new
1082 * mapping configurations in here.
1084 static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter
*adapter
)
1088 int rxr_idx
= 0, txr_idx
= 0;
1089 int rxr_remaining
= adapter
->num_rx_queues
;
1090 int txr_remaining
= adapter
->num_tx_queues
;
1095 q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
1098 * The ideal configuration...
1099 * We have enough vectors to map one per queue.
1101 if (q_vectors
== adapter
->num_rx_queues
+ adapter
->num_tx_queues
) {
1102 for (; rxr_idx
< rxr_remaining
; v_start
++, rxr_idx
++)
1103 map_vector_to_rxq(adapter
, v_start
, rxr_idx
);
1105 for (; txr_idx
< txr_remaining
; v_start
++, txr_idx
++)
1106 map_vector_to_txq(adapter
, v_start
, txr_idx
);
1111 * If we don't have enough vectors for a 1-to-1
1112 * mapping, we'll have to group them so there are
1113 * multiple queues per vector.
1115 /* Re-adjusting *qpv takes care of the remainder. */
1116 for (i
= v_start
; i
< q_vectors
; i
++) {
1117 rqpv
= DIV_ROUND_UP(rxr_remaining
, q_vectors
- i
);
1118 for (j
= 0; j
< rqpv
; j
++) {
1119 map_vector_to_rxq(adapter
, i
, rxr_idx
);
1124 for (i
= v_start
; i
< q_vectors
; i
++) {
1125 tqpv
= DIV_ROUND_UP(txr_remaining
, q_vectors
- i
);
1126 for (j
= 0; j
< tqpv
; j
++) {
1127 map_vector_to_txq(adapter
, i
, txr_idx
);
1138 * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
1139 * @adapter: board private structure
1141 * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests
1142 * interrupts from the kernel.
1144 static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter
*adapter
)
1146 struct net_device
*netdev
= adapter
->netdev
;
1147 irqreturn_t (*handler
)(int, void *);
1148 int i
, vector
, q_vectors
, err
;
1151 /* Decrement for Other and TCP Timer vectors */
1152 q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
1154 #define SET_HANDLER(_v) (((_v)->rxr_count && (_v)->txr_count) \
1155 ? &ixgbevf_msix_clean_many : \
1156 (_v)->rxr_count ? &ixgbevf_msix_clean_rx : \
1157 (_v)->txr_count ? &ixgbevf_msix_clean_tx : \
1159 for (vector
= 0; vector
< q_vectors
; vector
++) {
1160 handler
= SET_HANDLER(adapter
->q_vector
[vector
]);
1162 if (handler
== &ixgbevf_msix_clean_rx
) {
1163 sprintf(adapter
->name
[vector
], "%s-%s-%d",
1164 netdev
->name
, "rx", ri
++);
1165 } else if (handler
== &ixgbevf_msix_clean_tx
) {
1166 sprintf(adapter
->name
[vector
], "%s-%s-%d",
1167 netdev
->name
, "tx", ti
++);
1168 } else if (handler
== &ixgbevf_msix_clean_many
) {
1169 sprintf(adapter
->name
[vector
], "%s-%s-%d",
1170 netdev
->name
, "TxRx", vector
);
1172 /* skip this unused q_vector */
1175 err
= request_irq(adapter
->msix_entries
[vector
].vector
,
1176 handler
, 0, adapter
->name
[vector
],
1177 adapter
->q_vector
[vector
]);
1179 hw_dbg(&adapter
->hw
,
1180 "request_irq failed for MSIX interrupt "
1181 "Error: %d\n", err
);
1182 goto free_queue_irqs
;
1186 sprintf(adapter
->name
[vector
], "%s:mbx", netdev
->name
);
1187 err
= request_irq(adapter
->msix_entries
[vector
].vector
,
1188 &ixgbevf_msix_mbx
, 0, adapter
->name
[vector
], netdev
);
1190 hw_dbg(&adapter
->hw
,
1191 "request_irq for msix_mbx failed: %d\n", err
);
1192 goto free_queue_irqs
;
1198 for (i
= vector
- 1; i
>= 0; i
--)
1199 free_irq(adapter
->msix_entries
[--vector
].vector
,
1200 &(adapter
->q_vector
[i
]));
1201 pci_disable_msix(adapter
->pdev
);
1202 kfree(adapter
->msix_entries
);
1203 adapter
->msix_entries
= NULL
;
1207 static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter
*adapter
)
1209 int i
, q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
1211 for (i
= 0; i
< q_vectors
; i
++) {
1212 struct ixgbevf_q_vector
*q_vector
= adapter
->q_vector
[i
];
1213 bitmap_zero(q_vector
->rxr_idx
, MAX_RX_QUEUES
);
1214 bitmap_zero(q_vector
->txr_idx
, MAX_TX_QUEUES
);
1215 q_vector
->rxr_count
= 0;
1216 q_vector
->txr_count
= 0;
1217 q_vector
->eitr
= adapter
->eitr_param
;
1222 * ixgbevf_request_irq - initialize interrupts
1223 * @adapter: board private structure
1225 * Attempts to configure interrupts using the best available
1226 * capabilities of the hardware and kernel.
1228 static int ixgbevf_request_irq(struct ixgbevf_adapter
*adapter
)
1232 err
= ixgbevf_request_msix_irqs(adapter
);
1235 hw_dbg(&adapter
->hw
,
1236 "request_irq failed, Error %d\n", err
);
1241 static void ixgbevf_free_irq(struct ixgbevf_adapter
*adapter
)
1243 struct net_device
*netdev
= adapter
->netdev
;
1246 q_vectors
= adapter
->num_msix_vectors
;
1250 free_irq(adapter
->msix_entries
[i
].vector
, netdev
);
1253 for (; i
>= 0; i
--) {
1254 free_irq(adapter
->msix_entries
[i
].vector
,
1255 adapter
->q_vector
[i
]);
1258 ixgbevf_reset_q_vectors(adapter
);
1262 * ixgbevf_irq_disable - Mask off interrupt generation on the NIC
1263 * @adapter: board private structure
1265 static inline void ixgbevf_irq_disable(struct ixgbevf_adapter
*adapter
)
1268 struct ixgbe_hw
*hw
= &adapter
->hw
;
1270 IXGBE_WRITE_REG(hw
, IXGBE_VTEIMC
, ~0);
1272 IXGBE_WRITE_FLUSH(hw
);
1274 for (i
= 0; i
< adapter
->num_msix_vectors
; i
++)
1275 synchronize_irq(adapter
->msix_entries
[i
].vector
);
1279 * ixgbevf_irq_enable - Enable default interrupt generation settings
1280 * @adapter: board private structure
1282 static inline void ixgbevf_irq_enable(struct ixgbevf_adapter
*adapter
,
1283 bool queues
, bool flush
)
1285 struct ixgbe_hw
*hw
= &adapter
->hw
;
1289 mask
= (IXGBE_EIMS_ENABLE_MASK
& ~IXGBE_EIMS_RTX_QUEUE
);
1292 IXGBE_WRITE_REG(hw
, IXGBE_VTEIMS
, mask
);
1295 ixgbevf_irq_enable_queues(adapter
, qmask
);
1298 IXGBE_WRITE_FLUSH(hw
);
1302 * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
1303 * @adapter: board private structure
1305 * Configure the Tx unit of the MAC after a reset.
1307 static void ixgbevf_configure_tx(struct ixgbevf_adapter
*adapter
)
1310 struct ixgbe_hw
*hw
= &adapter
->hw
;
1311 u32 i
, j
, tdlen
, txctrl
;
1313 /* Setup the HW Tx Head and Tail descriptor pointers */
1314 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
1315 struct ixgbevf_ring
*ring
= &adapter
->tx_ring
[i
];
1318 tdlen
= ring
->count
* sizeof(union ixgbe_adv_tx_desc
);
1319 IXGBE_WRITE_REG(hw
, IXGBE_VFTDBAL(j
),
1320 (tdba
& DMA_BIT_MASK(32)));
1321 IXGBE_WRITE_REG(hw
, IXGBE_VFTDBAH(j
), (tdba
>> 32));
1322 IXGBE_WRITE_REG(hw
, IXGBE_VFTDLEN(j
), tdlen
);
1323 IXGBE_WRITE_REG(hw
, IXGBE_VFTDH(j
), 0);
1324 IXGBE_WRITE_REG(hw
, IXGBE_VFTDT(j
), 0);
1325 adapter
->tx_ring
[i
].head
= IXGBE_VFTDH(j
);
1326 adapter
->tx_ring
[i
].tail
= IXGBE_VFTDT(j
);
1327 /* Disable Tx Head Writeback RO bit, since this hoses
1328 * bookkeeping if things aren't delivered in order.
1330 txctrl
= IXGBE_READ_REG(hw
, IXGBE_VFDCA_TXCTRL(j
));
1331 txctrl
&= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN
;
1332 IXGBE_WRITE_REG(hw
, IXGBE_VFDCA_TXCTRL(j
), txctrl
);
1336 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1338 static void ixgbevf_configure_srrctl(struct ixgbevf_adapter
*adapter
, int index
)
1340 struct ixgbevf_ring
*rx_ring
;
1341 struct ixgbe_hw
*hw
= &adapter
->hw
;
1344 rx_ring
= &adapter
->rx_ring
[index
];
1346 srrctl
= IXGBE_SRRCTL_DROP_EN
;
1348 if (adapter
->flags
& IXGBE_FLAG_RX_PS_ENABLED
) {
1349 u16 bufsz
= IXGBEVF_RXBUFFER_2048
;
1350 /* grow the amount we can receive on large page machines */
1351 if (bufsz
< (PAGE_SIZE
/ 2))
1352 bufsz
= (PAGE_SIZE
/ 2);
1353 /* cap the bufsz at our largest descriptor size */
1354 bufsz
= min((u16
)IXGBEVF_MAX_RXBUFFER
, bufsz
);
1356 srrctl
|= bufsz
>> IXGBE_SRRCTL_BSIZEPKT_SHIFT
;
1357 srrctl
|= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS
;
1358 srrctl
|= ((IXGBEVF_RX_HDR_SIZE
<<
1359 IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT
) &
1360 IXGBE_SRRCTL_BSIZEHDR_MASK
);
1362 srrctl
|= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF
;
1364 if (rx_ring
->rx_buf_len
== MAXIMUM_ETHERNET_VLAN_SIZE
)
1365 srrctl
|= IXGBEVF_RXBUFFER_2048
>>
1366 IXGBE_SRRCTL_BSIZEPKT_SHIFT
;
1368 srrctl
|= rx_ring
->rx_buf_len
>>
1369 IXGBE_SRRCTL_BSIZEPKT_SHIFT
;
1371 IXGBE_WRITE_REG(hw
, IXGBE_VFSRRCTL(index
), srrctl
);
1375 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
1376 * @adapter: board private structure
1378 * Configure the Rx unit of the MAC after a reset.
1380 static void ixgbevf_configure_rx(struct ixgbevf_adapter
*adapter
)
1383 struct ixgbe_hw
*hw
= &adapter
->hw
;
1384 struct net_device
*netdev
= adapter
->netdev
;
1385 int max_frame
= netdev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
;
1390 /* Decide whether to use packet split mode or not */
1391 if (netdev
->mtu
> ETH_DATA_LEN
) {
1392 if (adapter
->flags
& IXGBE_FLAG_RX_PS_CAPABLE
)
1393 adapter
->flags
|= IXGBE_FLAG_RX_PS_ENABLED
;
1395 adapter
->flags
&= ~IXGBE_FLAG_RX_PS_ENABLED
;
1397 if (adapter
->flags
& IXGBE_FLAG_RX_1BUF_CAPABLE
)
1398 adapter
->flags
&= ~IXGBE_FLAG_RX_PS_ENABLED
;
1400 adapter
->flags
|= IXGBE_FLAG_RX_PS_ENABLED
;
1403 /* Set the RX buffer length according to the mode */
1404 if (adapter
->flags
& IXGBE_FLAG_RX_PS_ENABLED
) {
1405 /* PSRTYPE must be initialized in 82599 */
1406 u32 psrtype
= IXGBE_PSRTYPE_TCPHDR
|
1407 IXGBE_PSRTYPE_UDPHDR
|
1408 IXGBE_PSRTYPE_IPV4HDR
|
1409 IXGBE_PSRTYPE_IPV6HDR
|
1410 IXGBE_PSRTYPE_L2HDR
;
1411 IXGBE_WRITE_REG(hw
, IXGBE_VFPSRTYPE
, psrtype
);
1412 rx_buf_len
= IXGBEVF_RX_HDR_SIZE
;
1414 IXGBE_WRITE_REG(hw
, IXGBE_VFPSRTYPE
, 0);
1415 if (netdev
->mtu
<= ETH_DATA_LEN
)
1416 rx_buf_len
= MAXIMUM_ETHERNET_VLAN_SIZE
;
1418 rx_buf_len
= ALIGN(max_frame
, 1024);
1421 rdlen
= adapter
->rx_ring
[0].count
* sizeof(union ixgbe_adv_rx_desc
);
1422 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1423 * the Base and Length of the Rx Descriptor Ring */
1424 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
1425 rdba
= adapter
->rx_ring
[i
].dma
;
1426 j
= adapter
->rx_ring
[i
].reg_idx
;
1427 IXGBE_WRITE_REG(hw
, IXGBE_VFRDBAL(j
),
1428 (rdba
& DMA_BIT_MASK(32)));
1429 IXGBE_WRITE_REG(hw
, IXGBE_VFRDBAH(j
), (rdba
>> 32));
1430 IXGBE_WRITE_REG(hw
, IXGBE_VFRDLEN(j
), rdlen
);
1431 IXGBE_WRITE_REG(hw
, IXGBE_VFRDH(j
), 0);
1432 IXGBE_WRITE_REG(hw
, IXGBE_VFRDT(j
), 0);
1433 adapter
->rx_ring
[i
].head
= IXGBE_VFRDH(j
);
1434 adapter
->rx_ring
[i
].tail
= IXGBE_VFRDT(j
);
1435 adapter
->rx_ring
[i
].rx_buf_len
= rx_buf_len
;
1437 ixgbevf_configure_srrctl(adapter
, j
);
1441 static void ixgbevf_vlan_rx_register(struct net_device
*netdev
,
1442 struct vlan_group
*grp
)
1444 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
1445 struct ixgbe_hw
*hw
= &adapter
->hw
;
1449 adapter
->vlgrp
= grp
;
1451 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
1452 j
= adapter
->rx_ring
[i
].reg_idx
;
1453 ctrl
= IXGBE_READ_REG(hw
, IXGBE_VFRXDCTL(j
));
1454 ctrl
|= IXGBE_RXDCTL_VME
;
1455 IXGBE_WRITE_REG(hw
, IXGBE_VFRXDCTL(j
), ctrl
);
1459 static void ixgbevf_vlan_rx_add_vid(struct net_device
*netdev
, u16 vid
)
1461 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
1462 struct ixgbe_hw
*hw
= &adapter
->hw
;
1464 /* add VID to filter table */
1465 if (hw
->mac
.ops
.set_vfta
)
1466 hw
->mac
.ops
.set_vfta(hw
, vid
, 0, true);
1469 static void ixgbevf_vlan_rx_kill_vid(struct net_device
*netdev
, u16 vid
)
1471 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
1472 struct ixgbe_hw
*hw
= &adapter
->hw
;
1474 if (!test_bit(__IXGBEVF_DOWN
, &adapter
->state
))
1475 ixgbevf_irq_disable(adapter
);
1477 vlan_group_set_device(adapter
->vlgrp
, vid
, NULL
);
1479 if (!test_bit(__IXGBEVF_DOWN
, &adapter
->state
))
1480 ixgbevf_irq_enable(adapter
, true, true);
1482 /* remove VID from filter table */
1483 if (hw
->mac
.ops
.set_vfta
)
1484 hw
->mac
.ops
.set_vfta(hw
, vid
, 0, false);
1487 static void ixgbevf_restore_vlan(struct ixgbevf_adapter
*adapter
)
1489 ixgbevf_vlan_rx_register(adapter
->netdev
, adapter
->vlgrp
);
1491 if (adapter
->vlgrp
) {
1493 for (vid
= 0; vid
< VLAN_GROUP_ARRAY_LEN
; vid
++) {
1494 if (!vlan_group_get_device(adapter
->vlgrp
, vid
))
1496 ixgbevf_vlan_rx_add_vid(adapter
->netdev
, vid
);
1502 * ixgbevf_set_rx_mode - Multicast set
1503 * @netdev: network interface device structure
1505 * The set_rx_method entry point is called whenever the multicast address
1506 * list or the network interface flags are updated. This routine is
1507 * responsible for configuring the hardware for proper multicast mode.
1509 static void ixgbevf_set_rx_mode(struct net_device
*netdev
)
1511 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
1512 struct ixgbe_hw
*hw
= &adapter
->hw
;
1514 /* reprogram multicast list */
1515 if (hw
->mac
.ops
.update_mc_addr_list
)
1516 hw
->mac
.ops
.update_mc_addr_list(hw
, netdev
);
1519 static void ixgbevf_napi_enable_all(struct ixgbevf_adapter
*adapter
)
1522 struct ixgbevf_q_vector
*q_vector
;
1523 int q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
1525 for (q_idx
= 0; q_idx
< q_vectors
; q_idx
++) {
1526 struct napi_struct
*napi
;
1527 q_vector
= adapter
->q_vector
[q_idx
];
1528 if (!q_vector
->rxr_count
)
1530 napi
= &q_vector
->napi
;
1531 if (q_vector
->rxr_count
> 1)
1532 napi
->poll
= &ixgbevf_clean_rxonly_many
;
1538 static void ixgbevf_napi_disable_all(struct ixgbevf_adapter
*adapter
)
1541 struct ixgbevf_q_vector
*q_vector
;
1542 int q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
1544 for (q_idx
= 0; q_idx
< q_vectors
; q_idx
++) {
1545 q_vector
= adapter
->q_vector
[q_idx
];
1546 if (!q_vector
->rxr_count
)
1548 napi_disable(&q_vector
->napi
);
1552 static void ixgbevf_configure(struct ixgbevf_adapter
*adapter
)
1554 struct net_device
*netdev
= adapter
->netdev
;
1557 ixgbevf_set_rx_mode(netdev
);
1559 ixgbevf_restore_vlan(adapter
);
1561 ixgbevf_configure_tx(adapter
);
1562 ixgbevf_configure_rx(adapter
);
1563 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
1564 struct ixgbevf_ring
*ring
= &adapter
->rx_ring
[i
];
1565 ixgbevf_alloc_rx_buffers(adapter
, ring
, ring
->count
);
1566 ring
->next_to_use
= ring
->count
- 1;
1567 writel(ring
->next_to_use
, adapter
->hw
.hw_addr
+ ring
->tail
);
1571 #define IXGBE_MAX_RX_DESC_POLL 10
1572 static inline void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter
*adapter
,
1575 struct ixgbe_hw
*hw
= &adapter
->hw
;
1576 int j
= adapter
->rx_ring
[rxr
].reg_idx
;
1579 for (k
= 0; k
< IXGBE_MAX_RX_DESC_POLL
; k
++) {
1580 if (IXGBE_READ_REG(hw
, IXGBE_VFRXDCTL(j
)) & IXGBE_RXDCTL_ENABLE
)
1585 if (k
>= IXGBE_MAX_RX_DESC_POLL
) {
1586 hw_dbg(hw
, "RXDCTL.ENABLE on Rx queue %d "
1587 "not set within the polling period\n", rxr
);
1590 ixgbevf_release_rx_desc(&adapter
->hw
, &adapter
->rx_ring
[rxr
],
1591 (adapter
->rx_ring
[rxr
].count
- 1));
1594 static void ixgbevf_save_reset_stats(struct ixgbevf_adapter
*adapter
)
1596 /* Only save pre-reset stats if there are some */
1597 if (adapter
->stats
.vfgprc
|| adapter
->stats
.vfgptc
) {
1598 adapter
->stats
.saved_reset_vfgprc
+= adapter
->stats
.vfgprc
-
1599 adapter
->stats
.base_vfgprc
;
1600 adapter
->stats
.saved_reset_vfgptc
+= adapter
->stats
.vfgptc
-
1601 adapter
->stats
.base_vfgptc
;
1602 adapter
->stats
.saved_reset_vfgorc
+= adapter
->stats
.vfgorc
-
1603 adapter
->stats
.base_vfgorc
;
1604 adapter
->stats
.saved_reset_vfgotc
+= adapter
->stats
.vfgotc
-
1605 adapter
->stats
.base_vfgotc
;
1606 adapter
->stats
.saved_reset_vfmprc
+= adapter
->stats
.vfmprc
-
1607 adapter
->stats
.base_vfmprc
;
1611 static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter
*adapter
)
1613 struct ixgbe_hw
*hw
= &adapter
->hw
;
1615 adapter
->stats
.last_vfgprc
= IXGBE_READ_REG(hw
, IXGBE_VFGPRC
);
1616 adapter
->stats
.last_vfgorc
= IXGBE_READ_REG(hw
, IXGBE_VFGORC_LSB
);
1617 adapter
->stats
.last_vfgorc
|=
1618 (((u64
)(IXGBE_READ_REG(hw
, IXGBE_VFGORC_MSB
))) << 32);
1619 adapter
->stats
.last_vfgptc
= IXGBE_READ_REG(hw
, IXGBE_VFGPTC
);
1620 adapter
->stats
.last_vfgotc
= IXGBE_READ_REG(hw
, IXGBE_VFGOTC_LSB
);
1621 adapter
->stats
.last_vfgotc
|=
1622 (((u64
)(IXGBE_READ_REG(hw
, IXGBE_VFGOTC_MSB
))) << 32);
1623 adapter
->stats
.last_vfmprc
= IXGBE_READ_REG(hw
, IXGBE_VFMPRC
);
1625 adapter
->stats
.base_vfgprc
= adapter
->stats
.last_vfgprc
;
1626 adapter
->stats
.base_vfgorc
= adapter
->stats
.last_vfgorc
;
1627 adapter
->stats
.base_vfgptc
= adapter
->stats
.last_vfgptc
;
1628 adapter
->stats
.base_vfgotc
= adapter
->stats
.last_vfgotc
;
1629 adapter
->stats
.base_vfmprc
= adapter
->stats
.last_vfmprc
;
1632 static int ixgbevf_up_complete(struct ixgbevf_adapter
*adapter
)
1634 struct net_device
*netdev
= adapter
->netdev
;
1635 struct ixgbe_hw
*hw
= &adapter
->hw
;
1637 int num_rx_rings
= adapter
->num_rx_queues
;
1640 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
1641 j
= adapter
->tx_ring
[i
].reg_idx
;
1642 txdctl
= IXGBE_READ_REG(hw
, IXGBE_VFTXDCTL(j
));
1643 /* enable WTHRESH=8 descriptors, to encourage burst writeback */
1644 txdctl
|= (8 << 16);
1645 IXGBE_WRITE_REG(hw
, IXGBE_VFTXDCTL(j
), txdctl
);
1648 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
1649 j
= adapter
->tx_ring
[i
].reg_idx
;
1650 txdctl
= IXGBE_READ_REG(hw
, IXGBE_VFTXDCTL(j
));
1651 txdctl
|= IXGBE_TXDCTL_ENABLE
;
1652 IXGBE_WRITE_REG(hw
, IXGBE_VFTXDCTL(j
), txdctl
);
1655 for (i
= 0; i
< num_rx_rings
; i
++) {
1656 j
= adapter
->rx_ring
[i
].reg_idx
;
1657 rxdctl
= IXGBE_READ_REG(hw
, IXGBE_VFRXDCTL(j
));
1658 rxdctl
|= IXGBE_RXDCTL_ENABLE
;
1659 IXGBE_WRITE_REG(hw
, IXGBE_VFRXDCTL(j
), rxdctl
);
1660 ixgbevf_rx_desc_queue_enable(adapter
, i
);
1663 ixgbevf_configure_msix(adapter
);
1665 if (hw
->mac
.ops
.set_rar
) {
1666 if (is_valid_ether_addr(hw
->mac
.addr
))
1667 hw
->mac
.ops
.set_rar(hw
, 0, hw
->mac
.addr
, 0);
1669 hw
->mac
.ops
.set_rar(hw
, 0, hw
->mac
.perm_addr
, 0);
1672 clear_bit(__IXGBEVF_DOWN
, &adapter
->state
);
1673 ixgbevf_napi_enable_all(adapter
);
1675 /* enable transmits */
1676 netif_tx_start_all_queues(netdev
);
1678 ixgbevf_save_reset_stats(adapter
);
1679 ixgbevf_init_last_counter_stats(adapter
);
1681 /* bring the link up in the watchdog, this could race with our first
1682 * link up interrupt but shouldn't be a problem */
1683 adapter
->flags
|= IXGBE_FLAG_NEED_LINK_UPDATE
;
1684 adapter
->link_check_timeout
= jiffies
;
1685 mod_timer(&adapter
->watchdog_timer
, jiffies
);
1689 int ixgbevf_up(struct ixgbevf_adapter
*adapter
)
1692 struct ixgbe_hw
*hw
= &adapter
->hw
;
1694 ixgbevf_configure(adapter
);
1696 err
= ixgbevf_up_complete(adapter
);
1698 /* clear any pending interrupts, may auto mask */
1699 IXGBE_READ_REG(hw
, IXGBE_VTEICR
);
1701 ixgbevf_irq_enable(adapter
, true, true);
1707 * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
1708 * @adapter: board private structure
1709 * @rx_ring: ring to free buffers from
1711 static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter
*adapter
,
1712 struct ixgbevf_ring
*rx_ring
)
1714 struct pci_dev
*pdev
= adapter
->pdev
;
1718 if (!rx_ring
->rx_buffer_info
)
1721 /* Free all the Rx ring sk_buffs */
1722 for (i
= 0; i
< rx_ring
->count
; i
++) {
1723 struct ixgbevf_rx_buffer
*rx_buffer_info
;
1725 rx_buffer_info
= &rx_ring
->rx_buffer_info
[i
];
1726 if (rx_buffer_info
->dma
) {
1727 dma_unmap_single(&pdev
->dev
, rx_buffer_info
->dma
,
1728 rx_ring
->rx_buf_len
,
1730 rx_buffer_info
->dma
= 0;
1732 if (rx_buffer_info
->skb
) {
1733 struct sk_buff
*skb
= rx_buffer_info
->skb
;
1734 rx_buffer_info
->skb
= NULL
;
1736 struct sk_buff
*this = skb
;
1738 dev_kfree_skb(this);
1741 if (!rx_buffer_info
->page
)
1743 dma_unmap_page(&pdev
->dev
, rx_buffer_info
->page_dma
,
1744 PAGE_SIZE
/ 2, DMA_FROM_DEVICE
);
1745 rx_buffer_info
->page_dma
= 0;
1746 put_page(rx_buffer_info
->page
);
1747 rx_buffer_info
->page
= NULL
;
1748 rx_buffer_info
->page_offset
= 0;
1751 size
= sizeof(struct ixgbevf_rx_buffer
) * rx_ring
->count
;
1752 memset(rx_ring
->rx_buffer_info
, 0, size
);
1754 /* Zero out the descriptor ring */
1755 memset(rx_ring
->desc
, 0, rx_ring
->size
);
1757 rx_ring
->next_to_clean
= 0;
1758 rx_ring
->next_to_use
= 0;
1761 writel(0, adapter
->hw
.hw_addr
+ rx_ring
->head
);
1763 writel(0, adapter
->hw
.hw_addr
+ rx_ring
->tail
);
1767 * ixgbevf_clean_tx_ring - Free Tx Buffers
1768 * @adapter: board private structure
1769 * @tx_ring: ring to be cleaned
1771 static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter
*adapter
,
1772 struct ixgbevf_ring
*tx_ring
)
1774 struct ixgbevf_tx_buffer
*tx_buffer_info
;
1778 if (!tx_ring
->tx_buffer_info
)
1781 /* Free all the Tx ring sk_buffs */
1783 for (i
= 0; i
< tx_ring
->count
; i
++) {
1784 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
1785 ixgbevf_unmap_and_free_tx_resource(adapter
, tx_buffer_info
);
1788 size
= sizeof(struct ixgbevf_tx_buffer
) * tx_ring
->count
;
1789 memset(tx_ring
->tx_buffer_info
, 0, size
);
1791 memset(tx_ring
->desc
, 0, tx_ring
->size
);
1793 tx_ring
->next_to_use
= 0;
1794 tx_ring
->next_to_clean
= 0;
1797 writel(0, adapter
->hw
.hw_addr
+ tx_ring
->head
);
1799 writel(0, adapter
->hw
.hw_addr
+ tx_ring
->tail
);
1803 * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues
1804 * @adapter: board private structure
1806 static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter
*adapter
)
1810 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
1811 ixgbevf_clean_rx_ring(adapter
, &adapter
->rx_ring
[i
]);
1815 * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues
1816 * @adapter: board private structure
1818 static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter
*adapter
)
1822 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
1823 ixgbevf_clean_tx_ring(adapter
, &adapter
->tx_ring
[i
]);
1826 void ixgbevf_down(struct ixgbevf_adapter
*adapter
)
1828 struct net_device
*netdev
= adapter
->netdev
;
1829 struct ixgbe_hw
*hw
= &adapter
->hw
;
1833 /* signal that we are down to the interrupt handler */
1834 set_bit(__IXGBEVF_DOWN
, &adapter
->state
);
1835 /* disable receives */
1837 netif_tx_disable(netdev
);
1841 netif_tx_stop_all_queues(netdev
);
1843 ixgbevf_irq_disable(adapter
);
1845 ixgbevf_napi_disable_all(adapter
);
1847 del_timer_sync(&adapter
->watchdog_timer
);
1848 /* can't call flush scheduled work here because it can deadlock
1849 * if linkwatch_event tries to acquire the rtnl_lock which we are
1851 while (adapter
->flags
& IXGBE_FLAG_IN_WATCHDOG_TASK
)
1854 /* disable transmits in the hardware now that interrupts are off */
1855 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
1856 j
= adapter
->tx_ring
[i
].reg_idx
;
1857 txdctl
= IXGBE_READ_REG(hw
, IXGBE_VFTXDCTL(j
));
1858 IXGBE_WRITE_REG(hw
, IXGBE_VFTXDCTL(j
),
1859 (txdctl
& ~IXGBE_TXDCTL_ENABLE
));
1862 netif_carrier_off(netdev
);
1864 if (!pci_channel_offline(adapter
->pdev
))
1865 ixgbevf_reset(adapter
);
1867 ixgbevf_clean_all_tx_rings(adapter
);
1868 ixgbevf_clean_all_rx_rings(adapter
);
1871 void ixgbevf_reinit_locked(struct ixgbevf_adapter
*adapter
)
1873 struct ixgbe_hw
*hw
= &adapter
->hw
;
1875 WARN_ON(in_interrupt());
1877 while (test_and_set_bit(__IXGBEVF_RESETTING
, &adapter
->state
))
1881 * Check if PF is up before re-init. If not then skip until
1882 * later when the PF is up and ready to service requests from
1883 * the VF via mailbox. If the VF is up and running then the
1884 * watchdog task will continue to schedule reset tasks until
1885 * the PF is up and running.
1887 if (!hw
->mac
.ops
.reset_hw(hw
)) {
1888 ixgbevf_down(adapter
);
1889 ixgbevf_up(adapter
);
1892 clear_bit(__IXGBEVF_RESETTING
, &adapter
->state
);
1895 void ixgbevf_reset(struct ixgbevf_adapter
*adapter
)
1897 struct ixgbe_hw
*hw
= &adapter
->hw
;
1898 struct net_device
*netdev
= adapter
->netdev
;
1900 if (hw
->mac
.ops
.reset_hw(hw
))
1901 hw_dbg(hw
, "PF still resetting\n");
1903 hw
->mac
.ops
.init_hw(hw
);
1905 if (is_valid_ether_addr(adapter
->hw
.mac
.addr
)) {
1906 memcpy(netdev
->dev_addr
, adapter
->hw
.mac
.addr
,
1908 memcpy(netdev
->perm_addr
, adapter
->hw
.mac
.addr
,
1913 static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter
*adapter
,
1916 int err
, vector_threshold
;
1918 /* We'll want at least 3 (vector_threshold):
1921 * 3) Other (Link Status Change, etc.)
1923 vector_threshold
= MIN_MSIX_COUNT
;
1925 /* The more we get, the more we will assign to Tx/Rx Cleanup
1926 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
1927 * Right now, we simply care about how many we'll get; we'll
1928 * set them up later while requesting irq's.
1930 while (vectors
>= vector_threshold
) {
1931 err
= pci_enable_msix(adapter
->pdev
, adapter
->msix_entries
,
1933 if (!err
) /* Success in acquiring all requested vectors. */
1936 vectors
= 0; /* Nasty failure, quit now */
1937 else /* err == number of vectors we should try again with */
1941 if (vectors
< vector_threshold
) {
1942 /* Can't allocate enough MSI-X interrupts? Oh well.
1943 * This just means we'll go with either a single MSI
1944 * vector or fall back to legacy interrupts.
1946 hw_dbg(&adapter
->hw
,
1947 "Unable to allocate MSI-X interrupts\n");
1948 kfree(adapter
->msix_entries
);
1949 adapter
->msix_entries
= NULL
;
1952 * Adjust for only the vectors we'll use, which is minimum
1953 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
1954 * vectors we were allocated.
1956 adapter
->num_msix_vectors
= vectors
;
1961 * ixgbe_set_num_queues: Allocate queues for device, feature dependant
1962 * @adapter: board private structure to initialize
1964 * This is the top level queue allocation routine. The order here is very
1965 * important, starting with the "most" number of features turned on at once,
1966 * and ending with the smallest set of features. This way large combinations
1967 * can be allocated if they're turned on, and smaller combinations are the
1968 * fallthrough conditions.
1971 static void ixgbevf_set_num_queues(struct ixgbevf_adapter
*adapter
)
1973 /* Start with base case */
1974 adapter
->num_rx_queues
= 1;
1975 adapter
->num_tx_queues
= 1;
1976 adapter
->num_rx_pools
= adapter
->num_rx_queues
;
1977 adapter
->num_rx_queues_per_pool
= 1;
1981 * ixgbevf_alloc_queues - Allocate memory for all rings
1982 * @adapter: board private structure to initialize
1984 * We allocate one ring per queue at run-time since we don't know the
1985 * number of queues at compile-time. The polling_netdev array is
1986 * intended for Multiqueue, but should work fine with a single queue.
1988 static int ixgbevf_alloc_queues(struct ixgbevf_adapter
*adapter
)
1992 adapter
->tx_ring
= kcalloc(adapter
->num_tx_queues
,
1993 sizeof(struct ixgbevf_ring
), GFP_KERNEL
);
1994 if (!adapter
->tx_ring
)
1995 goto err_tx_ring_allocation
;
1997 adapter
->rx_ring
= kcalloc(adapter
->num_rx_queues
,
1998 sizeof(struct ixgbevf_ring
), GFP_KERNEL
);
1999 if (!adapter
->rx_ring
)
2000 goto err_rx_ring_allocation
;
2002 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
2003 adapter
->tx_ring
[i
].count
= adapter
->tx_ring_count
;
2004 adapter
->tx_ring
[i
].queue_index
= i
;
2005 adapter
->tx_ring
[i
].reg_idx
= i
;
2008 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
2009 adapter
->rx_ring
[i
].count
= adapter
->rx_ring_count
;
2010 adapter
->rx_ring
[i
].queue_index
= i
;
2011 adapter
->rx_ring
[i
].reg_idx
= i
;
2016 err_rx_ring_allocation
:
2017 kfree(adapter
->tx_ring
);
2018 err_tx_ring_allocation
:
2023 * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
2024 * @adapter: board private structure to initialize
2026 * Attempt to configure the interrupts using the best available
2027 * capabilities of the hardware and the kernel.
2029 static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter
*adapter
)
2032 int vector
, v_budget
;
2035 * It's easy to be greedy for MSI-X vectors, but it really
2036 * doesn't do us much good if we have a lot more vectors
2037 * than CPU's. So let's be conservative and only ask for
2038 * (roughly) twice the number of vectors as there are CPU's.
2040 v_budget
= min(adapter
->num_rx_queues
+ adapter
->num_tx_queues
,
2041 (int)(num_online_cpus() * 2)) + NON_Q_VECTORS
;
2043 /* A failure in MSI-X entry allocation isn't fatal, but it does
2044 * mean we disable MSI-X capabilities of the adapter. */
2045 adapter
->msix_entries
= kcalloc(v_budget
,
2046 sizeof(struct msix_entry
), GFP_KERNEL
);
2047 if (!adapter
->msix_entries
) {
2052 for (vector
= 0; vector
< v_budget
; vector
++)
2053 adapter
->msix_entries
[vector
].entry
= vector
;
2055 ixgbevf_acquire_msix_vectors(adapter
, v_budget
);
2062 * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors
2063 * @adapter: board private structure to initialize
2065 * We allocate one q_vector per queue interrupt. If allocation fails we
2068 static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter
*adapter
)
2070 int q_idx
, num_q_vectors
;
2071 struct ixgbevf_q_vector
*q_vector
;
2073 int (*poll
)(struct napi_struct
*, int);
2075 num_q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
2076 napi_vectors
= adapter
->num_rx_queues
;
2077 poll
= &ixgbevf_clean_rxonly
;
2079 for (q_idx
= 0; q_idx
< num_q_vectors
; q_idx
++) {
2080 q_vector
= kzalloc(sizeof(struct ixgbevf_q_vector
), GFP_KERNEL
);
2083 q_vector
->adapter
= adapter
;
2084 q_vector
->v_idx
= q_idx
;
2085 q_vector
->eitr
= adapter
->eitr_param
;
2086 if (q_idx
< napi_vectors
)
2087 netif_napi_add(adapter
->netdev
, &q_vector
->napi
,
2089 adapter
->q_vector
[q_idx
] = q_vector
;
2097 q_vector
= adapter
->q_vector
[q_idx
];
2098 netif_napi_del(&q_vector
->napi
);
2100 adapter
->q_vector
[q_idx
] = NULL
;
2106 * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors
2107 * @adapter: board private structure to initialize
2109 * This function frees the memory allocated to the q_vectors. In addition if
2110 * NAPI is enabled it will delete any references to the NAPI struct prior
2111 * to freeing the q_vector.
2113 static void ixgbevf_free_q_vectors(struct ixgbevf_adapter
*adapter
)
2115 int q_idx
, num_q_vectors
;
2118 num_q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
2119 napi_vectors
= adapter
->num_rx_queues
;
2121 for (q_idx
= 0; q_idx
< num_q_vectors
; q_idx
++) {
2122 struct ixgbevf_q_vector
*q_vector
= adapter
->q_vector
[q_idx
];
2124 adapter
->q_vector
[q_idx
] = NULL
;
2125 if (q_idx
< napi_vectors
)
2126 netif_napi_del(&q_vector
->napi
);
2132 * ixgbevf_reset_interrupt_capability - Reset MSIX setup
2133 * @adapter: board private structure
2136 static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter
*adapter
)
2138 pci_disable_msix(adapter
->pdev
);
2139 kfree(adapter
->msix_entries
);
2140 adapter
->msix_entries
= NULL
;
2144 * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init
2145 * @adapter: board private structure to initialize
2148 static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter
*adapter
)
2152 /* Number of supported queues */
2153 ixgbevf_set_num_queues(adapter
);
2155 err
= ixgbevf_set_interrupt_capability(adapter
);
2157 hw_dbg(&adapter
->hw
,
2158 "Unable to setup interrupt capabilities\n");
2159 goto err_set_interrupt
;
2162 err
= ixgbevf_alloc_q_vectors(adapter
);
2164 hw_dbg(&adapter
->hw
, "Unable to allocate memory for queue "
2166 goto err_alloc_q_vectors
;
2169 err
= ixgbevf_alloc_queues(adapter
);
2171 printk(KERN_ERR
"Unable to allocate memory for queues\n");
2172 goto err_alloc_queues
;
2175 hw_dbg(&adapter
->hw
, "Multiqueue %s: Rx Queue count = %u, "
2176 "Tx Queue count = %u\n",
2177 (adapter
->num_rx_queues
> 1) ? "Enabled" :
2178 "Disabled", adapter
->num_rx_queues
, adapter
->num_tx_queues
);
2180 set_bit(__IXGBEVF_DOWN
, &adapter
->state
);
2184 ixgbevf_free_q_vectors(adapter
);
2185 err_alloc_q_vectors
:
2186 ixgbevf_reset_interrupt_capability(adapter
);
2192 * ixgbevf_sw_init - Initialize general software structures
2193 * (struct ixgbevf_adapter)
2194 * @adapter: board private structure to initialize
2196 * ixgbevf_sw_init initializes the Adapter private data structure.
2197 * Fields are initialized based on PCI device information and
2198 * OS network device settings (MTU size).
2200 static int __devinit
ixgbevf_sw_init(struct ixgbevf_adapter
*adapter
)
2202 struct ixgbe_hw
*hw
= &adapter
->hw
;
2203 struct pci_dev
*pdev
= adapter
->pdev
;
2206 /* PCI config space info */
2208 hw
->vendor_id
= pdev
->vendor
;
2209 hw
->device_id
= pdev
->device
;
2210 pci_read_config_byte(pdev
, PCI_REVISION_ID
, &hw
->revision_id
);
2211 hw
->subsystem_vendor_id
= pdev
->subsystem_vendor
;
2212 hw
->subsystem_device_id
= pdev
->subsystem_device
;
2214 hw
->mbx
.ops
.init_params(hw
);
2215 hw
->mac
.max_tx_queues
= MAX_TX_QUEUES
;
2216 hw
->mac
.max_rx_queues
= MAX_RX_QUEUES
;
2217 err
= hw
->mac
.ops
.reset_hw(hw
);
2219 dev_info(&pdev
->dev
,
2220 "PF still in reset state, assigning new address\n");
2221 dev_hw_addr_random(adapter
->netdev
, hw
->mac
.addr
);
2223 err
= hw
->mac
.ops
.init_hw(hw
);
2225 printk(KERN_ERR
"init_shared_code failed: %d\n", err
);
2230 /* Enable dynamic interrupt throttling rates */
2231 adapter
->eitr_param
= 20000;
2232 adapter
->itr_setting
= 1;
2234 /* set defaults for eitr in MegaBytes */
2235 adapter
->eitr_low
= 10;
2236 adapter
->eitr_high
= 20;
2238 /* set default ring sizes */
2239 adapter
->tx_ring_count
= IXGBEVF_DEFAULT_TXD
;
2240 adapter
->rx_ring_count
= IXGBEVF_DEFAULT_RXD
;
2242 /* enable rx csum by default */
2243 adapter
->flags
|= IXGBE_FLAG_RX_CSUM_ENABLED
;
2245 set_bit(__IXGBEVF_DOWN
, &adapter
->state
);
2251 #define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
2253 u32 current_counter = IXGBE_READ_REG(hw, reg); \
2254 if (current_counter < last_counter) \
2255 counter += 0x100000000LL; \
2256 last_counter = current_counter; \
2257 counter &= 0xFFFFFFFF00000000LL; \
2258 counter |= current_counter; \
2261 #define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
2263 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
2264 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
2265 u64 current_counter = (current_counter_msb << 32) | \
2266 current_counter_lsb; \
2267 if (current_counter < last_counter) \
2268 counter += 0x1000000000LL; \
2269 last_counter = current_counter; \
2270 counter &= 0xFFFFFFF000000000LL; \
2271 counter |= current_counter; \
2274 * ixgbevf_update_stats - Update the board statistics counters.
2275 * @adapter: board private structure
2277 void ixgbevf_update_stats(struct ixgbevf_adapter
*adapter
)
2279 struct ixgbe_hw
*hw
= &adapter
->hw
;
2281 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC
, adapter
->stats
.last_vfgprc
,
2282 adapter
->stats
.vfgprc
);
2283 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC
, adapter
->stats
.last_vfgptc
,
2284 adapter
->stats
.vfgptc
);
2285 UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB
, IXGBE_VFGORC_MSB
,
2286 adapter
->stats
.last_vfgorc
,
2287 adapter
->stats
.vfgorc
);
2288 UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB
, IXGBE_VFGOTC_MSB
,
2289 adapter
->stats
.last_vfgotc
,
2290 adapter
->stats
.vfgotc
);
2291 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC
, adapter
->stats
.last_vfmprc
,
2292 adapter
->stats
.vfmprc
);
2294 /* Fill out the OS statistics structure */
2295 adapter
->net_stats
.multicast
= adapter
->stats
.vfmprc
-
2296 adapter
->stats
.base_vfmprc
;
2300 * ixgbevf_watchdog - Timer Call-back
2301 * @data: pointer to adapter cast into an unsigned long
2303 static void ixgbevf_watchdog(unsigned long data
)
2305 struct ixgbevf_adapter
*adapter
= (struct ixgbevf_adapter
*)data
;
2306 struct ixgbe_hw
*hw
= &adapter
->hw
;
2311 * Do the watchdog outside of interrupt context due to the lovely
2312 * delays that some of the newer hardware requires
2315 if (test_bit(__IXGBEVF_DOWN
, &adapter
->state
))
2316 goto watchdog_short_circuit
;
2318 /* get one bit for every active tx/rx interrupt vector */
2319 for (i
= 0; i
< adapter
->num_msix_vectors
- NON_Q_VECTORS
; i
++) {
2320 struct ixgbevf_q_vector
*qv
= adapter
->q_vector
[i
];
2321 if (qv
->rxr_count
|| qv
->txr_count
)
2325 IXGBE_WRITE_REG(hw
, IXGBE_VTEICS
, (u32
)eics
);
2327 watchdog_short_circuit
:
2328 schedule_work(&adapter
->watchdog_task
);
2332 * ixgbevf_tx_timeout - Respond to a Tx Hang
2333 * @netdev: network interface device structure
2335 static void ixgbevf_tx_timeout(struct net_device
*netdev
)
2337 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
2339 /* Do the reset outside of interrupt context */
2340 schedule_work(&adapter
->reset_task
);
2343 static void ixgbevf_reset_task(struct work_struct
*work
)
2345 struct ixgbevf_adapter
*adapter
;
2346 adapter
= container_of(work
, struct ixgbevf_adapter
, reset_task
);
2348 /* If we're already down or resetting, just bail */
2349 if (test_bit(__IXGBEVF_DOWN
, &adapter
->state
) ||
2350 test_bit(__IXGBEVF_RESETTING
, &adapter
->state
))
2353 adapter
->tx_timeout_count
++;
2355 ixgbevf_reinit_locked(adapter
);
2359 * ixgbevf_watchdog_task - worker thread to bring link up
2360 * @work: pointer to work_struct containing our data
2362 static void ixgbevf_watchdog_task(struct work_struct
*work
)
2364 struct ixgbevf_adapter
*adapter
= container_of(work
,
2365 struct ixgbevf_adapter
,
2367 struct net_device
*netdev
= adapter
->netdev
;
2368 struct ixgbe_hw
*hw
= &adapter
->hw
;
2369 u32 link_speed
= adapter
->link_speed
;
2370 bool link_up
= adapter
->link_up
;
2372 adapter
->flags
|= IXGBE_FLAG_IN_WATCHDOG_TASK
;
2375 * Always check the link on the watchdog because we have
2378 if (hw
->mac
.ops
.check_link
) {
2379 if ((hw
->mac
.ops
.check_link(hw
, &link_speed
,
2380 &link_up
, false)) != 0) {
2381 adapter
->link_up
= link_up
;
2382 adapter
->link_speed
= link_speed
;
2383 netif_carrier_off(netdev
);
2384 netif_tx_stop_all_queues(netdev
);
2385 schedule_work(&adapter
->reset_task
);
2389 /* always assume link is up, if no check link
2391 link_speed
= IXGBE_LINK_SPEED_10GB_FULL
;
2394 adapter
->link_up
= link_up
;
2395 adapter
->link_speed
= link_speed
;
2398 if (!netif_carrier_ok(netdev
)) {
2399 hw_dbg(&adapter
->hw
, "NIC Link is Up, %u Gbps\n",
2400 (link_speed
== IXGBE_LINK_SPEED_10GB_FULL
) ?
2402 netif_carrier_on(netdev
);
2403 netif_tx_wake_all_queues(netdev
);
2405 /* Force detection of hung controller */
2406 adapter
->detect_tx_hung
= true;
2409 adapter
->link_up
= false;
2410 adapter
->link_speed
= 0;
2411 if (netif_carrier_ok(netdev
)) {
2412 hw_dbg(&adapter
->hw
, "NIC Link is Down\n");
2413 netif_carrier_off(netdev
);
2414 netif_tx_stop_all_queues(netdev
);
2418 ixgbevf_update_stats(adapter
);
2421 /* Force detection of hung controller every watchdog period */
2422 adapter
->detect_tx_hung
= true;
2424 /* Reset the timer */
2425 if (!test_bit(__IXGBEVF_DOWN
, &adapter
->state
))
2426 mod_timer(&adapter
->watchdog_timer
,
2427 round_jiffies(jiffies
+ (2 * HZ
)));
2429 adapter
->flags
&= ~IXGBE_FLAG_IN_WATCHDOG_TASK
;
2433 * ixgbevf_free_tx_resources - Free Tx Resources per Queue
2434 * @adapter: board private structure
2435 * @tx_ring: Tx descriptor ring for a specific queue
2437 * Free all transmit software resources
2439 void ixgbevf_free_tx_resources(struct ixgbevf_adapter
*adapter
,
2440 struct ixgbevf_ring
*tx_ring
)
2442 struct pci_dev
*pdev
= adapter
->pdev
;
2444 ixgbevf_clean_tx_ring(adapter
, tx_ring
);
2446 vfree(tx_ring
->tx_buffer_info
);
2447 tx_ring
->tx_buffer_info
= NULL
;
2449 dma_free_coherent(&pdev
->dev
, tx_ring
->size
, tx_ring
->desc
,
2452 tx_ring
->desc
= NULL
;
2456 * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues
2457 * @adapter: board private structure
2459 * Free all transmit software resources
2461 static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter
*adapter
)
2465 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
2466 if (adapter
->tx_ring
[i
].desc
)
2467 ixgbevf_free_tx_resources(adapter
,
2468 &adapter
->tx_ring
[i
]);
2473 * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
2474 * @adapter: board private structure
2475 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2477 * Return 0 on success, negative on failure
2479 int ixgbevf_setup_tx_resources(struct ixgbevf_adapter
*adapter
,
2480 struct ixgbevf_ring
*tx_ring
)
2482 struct pci_dev
*pdev
= adapter
->pdev
;
2485 size
= sizeof(struct ixgbevf_tx_buffer
) * tx_ring
->count
;
2486 tx_ring
->tx_buffer_info
= vmalloc(size
);
2487 if (!tx_ring
->tx_buffer_info
)
2489 memset(tx_ring
->tx_buffer_info
, 0, size
);
2491 /* round up to nearest 4K */
2492 tx_ring
->size
= tx_ring
->count
* sizeof(union ixgbe_adv_tx_desc
);
2493 tx_ring
->size
= ALIGN(tx_ring
->size
, 4096);
2495 tx_ring
->desc
= dma_alloc_coherent(&pdev
->dev
, tx_ring
->size
,
2496 &tx_ring
->dma
, GFP_KERNEL
);
2500 tx_ring
->next_to_use
= 0;
2501 tx_ring
->next_to_clean
= 0;
2502 tx_ring
->work_limit
= tx_ring
->count
;
2506 vfree(tx_ring
->tx_buffer_info
);
2507 tx_ring
->tx_buffer_info
= NULL
;
2508 hw_dbg(&adapter
->hw
, "Unable to allocate memory for the transmit "
2509 "descriptor ring\n");
2514 * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources
2515 * @adapter: board private structure
2517 * If this function returns with an error, then it's possible one or
2518 * more of the rings is populated (while the rest are not). It is the
2519 * callers duty to clean those orphaned rings.
2521 * Return 0 on success, negative on failure
2523 static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter
*adapter
)
2527 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
2528 err
= ixgbevf_setup_tx_resources(adapter
, &adapter
->tx_ring
[i
]);
2531 hw_dbg(&adapter
->hw
,
2532 "Allocation for Tx Queue %u failed\n", i
);
2540 * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
2541 * @adapter: board private structure
2542 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2544 * Returns 0 on success, negative on failure
2546 int ixgbevf_setup_rx_resources(struct ixgbevf_adapter
*adapter
,
2547 struct ixgbevf_ring
*rx_ring
)
2549 struct pci_dev
*pdev
= adapter
->pdev
;
2552 size
= sizeof(struct ixgbevf_rx_buffer
) * rx_ring
->count
;
2553 rx_ring
->rx_buffer_info
= vmalloc(size
);
2554 if (!rx_ring
->rx_buffer_info
) {
2555 hw_dbg(&adapter
->hw
,
2556 "Unable to vmalloc buffer memory for "
2557 "the receive descriptor ring\n");
2560 memset(rx_ring
->rx_buffer_info
, 0, size
);
2562 /* Round up to nearest 4K */
2563 rx_ring
->size
= rx_ring
->count
* sizeof(union ixgbe_adv_rx_desc
);
2564 rx_ring
->size
= ALIGN(rx_ring
->size
, 4096);
2566 rx_ring
->desc
= dma_alloc_coherent(&pdev
->dev
, rx_ring
->size
,
2567 &rx_ring
->dma
, GFP_KERNEL
);
2569 if (!rx_ring
->desc
) {
2570 hw_dbg(&adapter
->hw
,
2571 "Unable to allocate memory for "
2572 "the receive descriptor ring\n");
2573 vfree(rx_ring
->rx_buffer_info
);
2574 rx_ring
->rx_buffer_info
= NULL
;
2578 rx_ring
->next_to_clean
= 0;
2579 rx_ring
->next_to_use
= 0;
2587 * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources
2588 * @adapter: board private structure
2590 * If this function returns with an error, then it's possible one or
2591 * more of the rings is populated (while the rest are not). It is the
2592 * callers duty to clean those orphaned rings.
2594 * Return 0 on success, negative on failure
2596 static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter
*adapter
)
2600 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
2601 err
= ixgbevf_setup_rx_resources(adapter
, &adapter
->rx_ring
[i
]);
2604 hw_dbg(&adapter
->hw
,
2605 "Allocation for Rx Queue %u failed\n", i
);
2612 * ixgbevf_free_rx_resources - Free Rx Resources
2613 * @adapter: board private structure
2614 * @rx_ring: ring to clean the resources from
2616 * Free all receive software resources
2618 void ixgbevf_free_rx_resources(struct ixgbevf_adapter
*adapter
,
2619 struct ixgbevf_ring
*rx_ring
)
2621 struct pci_dev
*pdev
= adapter
->pdev
;
2623 ixgbevf_clean_rx_ring(adapter
, rx_ring
);
2625 vfree(rx_ring
->rx_buffer_info
);
2626 rx_ring
->rx_buffer_info
= NULL
;
2628 dma_free_coherent(&pdev
->dev
, rx_ring
->size
, rx_ring
->desc
,
2631 rx_ring
->desc
= NULL
;
2635 * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues
2636 * @adapter: board private structure
2638 * Free all receive software resources
2640 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter
*adapter
)
2644 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
2645 if (adapter
->rx_ring
[i
].desc
)
2646 ixgbevf_free_rx_resources(adapter
,
2647 &adapter
->rx_ring
[i
]);
2651 * ixgbevf_open - Called when a network interface is made active
2652 * @netdev: network interface device structure
2654 * Returns 0 on success, negative value on failure
2656 * The open entry point is called when a network interface is made
2657 * active by the system (IFF_UP). At this point all resources needed
2658 * for transmit and receive operations are allocated, the interrupt
2659 * handler is registered with the OS, the watchdog timer is started,
2660 * and the stack is notified that the interface is ready.
2662 static int ixgbevf_open(struct net_device
*netdev
)
2664 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
2665 struct ixgbe_hw
*hw
= &adapter
->hw
;
2668 /* disallow open during test */
2669 if (test_bit(__IXGBEVF_TESTING
, &adapter
->state
))
2672 if (hw
->adapter_stopped
) {
2673 ixgbevf_reset(adapter
);
2674 /* if adapter is still stopped then PF isn't up and
2675 * the vf can't start. */
2676 if (hw
->adapter_stopped
) {
2677 err
= IXGBE_ERR_MBX
;
2678 printk(KERN_ERR
"Unable to start - perhaps the PF"
2679 " Driver isn't up yet\n");
2680 goto err_setup_reset
;
2684 /* allocate transmit descriptors */
2685 err
= ixgbevf_setup_all_tx_resources(adapter
);
2689 /* allocate receive descriptors */
2690 err
= ixgbevf_setup_all_rx_resources(adapter
);
2694 ixgbevf_configure(adapter
);
2697 * Map the Tx/Rx rings to the vectors we were allotted.
2698 * if request_irq will be called in this function map_rings
2699 * must be called *before* up_complete
2701 ixgbevf_map_rings_to_vectors(adapter
);
2703 err
= ixgbevf_up_complete(adapter
);
2707 /* clear any pending interrupts, may auto mask */
2708 IXGBE_READ_REG(hw
, IXGBE_VTEICR
);
2709 err
= ixgbevf_request_irq(adapter
);
2713 ixgbevf_irq_enable(adapter
, true, true);
2718 ixgbevf_down(adapter
);
2720 ixgbevf_free_irq(adapter
);
2722 ixgbevf_free_all_rx_resources(adapter
);
2724 ixgbevf_free_all_tx_resources(adapter
);
2725 ixgbevf_reset(adapter
);
2733 * ixgbevf_close - Disables a network interface
2734 * @netdev: network interface device structure
2736 * Returns 0, this is not allowed to fail
2738 * The close entry point is called when an interface is de-activated
2739 * by the OS. The hardware is still under the drivers control, but
2740 * needs to be disabled. A global MAC reset is issued to stop the
2741 * hardware, and all transmit and receive resources are freed.
2743 static int ixgbevf_close(struct net_device
*netdev
)
2745 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
2747 ixgbevf_down(adapter
);
2748 ixgbevf_free_irq(adapter
);
2750 ixgbevf_free_all_tx_resources(adapter
);
2751 ixgbevf_free_all_rx_resources(adapter
);
2756 static int ixgbevf_tso(struct ixgbevf_adapter
*adapter
,
2757 struct ixgbevf_ring
*tx_ring
,
2758 struct sk_buff
*skb
, u32 tx_flags
, u8
*hdr_len
)
2760 struct ixgbe_adv_tx_context_desc
*context_desc
;
2763 struct ixgbevf_tx_buffer
*tx_buffer_info
;
2764 u32 vlan_macip_lens
= 0, type_tucmd_mlhl
;
2765 u32 mss_l4len_idx
, l4len
;
2767 if (skb_is_gso(skb
)) {
2768 if (skb_header_cloned(skb
)) {
2769 err
= pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
);
2773 l4len
= tcp_hdrlen(skb
);
2776 if (skb
->protocol
== htons(ETH_P_IP
)) {
2777 struct iphdr
*iph
= ip_hdr(skb
);
2780 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(iph
->saddr
,
2784 adapter
->hw_tso_ctxt
++;
2785 } else if (skb_is_gso_v6(skb
)) {
2786 ipv6_hdr(skb
)->payload_len
= 0;
2787 tcp_hdr(skb
)->check
=
2788 ~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
2789 &ipv6_hdr(skb
)->daddr
,
2791 adapter
->hw_tso6_ctxt
++;
2794 i
= tx_ring
->next_to_use
;
2796 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
2797 context_desc
= IXGBE_TX_CTXTDESC_ADV(*tx_ring
, i
);
2799 /* VLAN MACLEN IPLEN */
2800 if (tx_flags
& IXGBE_TX_FLAGS_VLAN
)
2802 (tx_flags
& IXGBE_TX_FLAGS_VLAN_MASK
);
2803 vlan_macip_lens
|= ((skb_network_offset(skb
)) <<
2804 IXGBE_ADVTXD_MACLEN_SHIFT
);
2805 *hdr_len
+= skb_network_offset(skb
);
2807 (skb_transport_header(skb
) - skb_network_header(skb
));
2809 (skb_transport_header(skb
) - skb_network_header(skb
));
2810 context_desc
->vlan_macip_lens
= cpu_to_le32(vlan_macip_lens
);
2811 context_desc
->seqnum_seed
= 0;
2813 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
2814 type_tucmd_mlhl
= (IXGBE_TXD_CMD_DEXT
|
2815 IXGBE_ADVTXD_DTYP_CTXT
);
2817 if (skb
->protocol
== htons(ETH_P_IP
))
2818 type_tucmd_mlhl
|= IXGBE_ADVTXD_TUCMD_IPV4
;
2819 type_tucmd_mlhl
|= IXGBE_ADVTXD_TUCMD_L4T_TCP
;
2820 context_desc
->type_tucmd_mlhl
= cpu_to_le32(type_tucmd_mlhl
);
2824 (skb_shinfo(skb
)->gso_size
<< IXGBE_ADVTXD_MSS_SHIFT
);
2825 mss_l4len_idx
|= (l4len
<< IXGBE_ADVTXD_L4LEN_SHIFT
);
2826 /* use index 1 for TSO */
2827 mss_l4len_idx
|= (1 << IXGBE_ADVTXD_IDX_SHIFT
);
2828 context_desc
->mss_l4len_idx
= cpu_to_le32(mss_l4len_idx
);
2830 tx_buffer_info
->time_stamp
= jiffies
;
2831 tx_buffer_info
->next_to_watch
= i
;
2834 if (i
== tx_ring
->count
)
2836 tx_ring
->next_to_use
= i
;
2844 static bool ixgbevf_tx_csum(struct ixgbevf_adapter
*adapter
,
2845 struct ixgbevf_ring
*tx_ring
,
2846 struct sk_buff
*skb
, u32 tx_flags
)
2848 struct ixgbe_adv_tx_context_desc
*context_desc
;
2850 struct ixgbevf_tx_buffer
*tx_buffer_info
;
2851 u32 vlan_macip_lens
= 0, type_tucmd_mlhl
= 0;
2853 if (skb
->ip_summed
== CHECKSUM_PARTIAL
||
2854 (tx_flags
& IXGBE_TX_FLAGS_VLAN
)) {
2855 i
= tx_ring
->next_to_use
;
2856 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
2857 context_desc
= IXGBE_TX_CTXTDESC_ADV(*tx_ring
, i
);
2859 if (tx_flags
& IXGBE_TX_FLAGS_VLAN
)
2860 vlan_macip_lens
|= (tx_flags
&
2861 IXGBE_TX_FLAGS_VLAN_MASK
);
2862 vlan_macip_lens
|= (skb_network_offset(skb
) <<
2863 IXGBE_ADVTXD_MACLEN_SHIFT
);
2864 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
2865 vlan_macip_lens
|= (skb_transport_header(skb
) -
2866 skb_network_header(skb
));
2868 context_desc
->vlan_macip_lens
= cpu_to_le32(vlan_macip_lens
);
2869 context_desc
->seqnum_seed
= 0;
2871 type_tucmd_mlhl
|= (IXGBE_TXD_CMD_DEXT
|
2872 IXGBE_ADVTXD_DTYP_CTXT
);
2874 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
2875 switch (skb
->protocol
) {
2876 case __constant_htons(ETH_P_IP
):
2877 type_tucmd_mlhl
|= IXGBE_ADVTXD_TUCMD_IPV4
;
2878 if (ip_hdr(skb
)->protocol
== IPPROTO_TCP
)
2880 IXGBE_ADVTXD_TUCMD_L4T_TCP
;
2882 case __constant_htons(ETH_P_IPV6
):
2883 if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_TCP
)
2885 IXGBE_ADVTXD_TUCMD_L4T_TCP
;
2888 if (unlikely(net_ratelimit())) {
2890 "partial checksum but "
2898 context_desc
->type_tucmd_mlhl
= cpu_to_le32(type_tucmd_mlhl
);
2899 /* use index zero for tx checksum offload */
2900 context_desc
->mss_l4len_idx
= 0;
2902 tx_buffer_info
->time_stamp
= jiffies
;
2903 tx_buffer_info
->next_to_watch
= i
;
2905 adapter
->hw_csum_tx_good
++;
2907 if (i
== tx_ring
->count
)
2909 tx_ring
->next_to_use
= i
;
2917 static int ixgbevf_tx_map(struct ixgbevf_adapter
*adapter
,
2918 struct ixgbevf_ring
*tx_ring
,
2919 struct sk_buff
*skb
, u32 tx_flags
,
2922 struct pci_dev
*pdev
= adapter
->pdev
;
2923 struct ixgbevf_tx_buffer
*tx_buffer_info
;
2925 unsigned int total
= skb
->len
;
2926 unsigned int offset
= 0, size
;
2928 unsigned int nr_frags
= skb_shinfo(skb
)->nr_frags
;
2932 i
= tx_ring
->next_to_use
;
2934 len
= min(skb_headlen(skb
), total
);
2936 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
2937 size
= min(len
, (unsigned int)IXGBE_MAX_DATA_PER_TXD
);
2939 tx_buffer_info
->length
= size
;
2940 tx_buffer_info
->mapped_as_page
= false;
2941 tx_buffer_info
->dma
= dma_map_single(&adapter
->pdev
->dev
,
2943 size
, DMA_TO_DEVICE
);
2944 if (dma_mapping_error(&pdev
->dev
, tx_buffer_info
->dma
))
2946 tx_buffer_info
->time_stamp
= jiffies
;
2947 tx_buffer_info
->next_to_watch
= i
;
2954 if (i
== tx_ring
->count
)
2958 for (f
= 0; f
< nr_frags
; f
++) {
2959 struct skb_frag_struct
*frag
;
2961 frag
= &skb_shinfo(skb
)->frags
[f
];
2962 len
= min((unsigned int)frag
->size
, total
);
2963 offset
= frag
->page_offset
;
2966 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
2967 size
= min(len
, (unsigned int)IXGBE_MAX_DATA_PER_TXD
);
2969 tx_buffer_info
->length
= size
;
2970 tx_buffer_info
->dma
= dma_map_page(&adapter
->pdev
->dev
,
2975 tx_buffer_info
->mapped_as_page
= true;
2976 if (dma_mapping_error(&pdev
->dev
, tx_buffer_info
->dma
))
2978 tx_buffer_info
->time_stamp
= jiffies
;
2979 tx_buffer_info
->next_to_watch
= i
;
2986 if (i
== tx_ring
->count
)
2994 i
= tx_ring
->count
- 1;
2997 tx_ring
->tx_buffer_info
[i
].skb
= skb
;
2998 tx_ring
->tx_buffer_info
[first
].next_to_watch
= i
;
3003 dev_err(&pdev
->dev
, "TX DMA map failed\n");
3005 /* clear timestamp and dma mappings for failed tx_buffer_info map */
3006 tx_buffer_info
->dma
= 0;
3007 tx_buffer_info
->time_stamp
= 0;
3008 tx_buffer_info
->next_to_watch
= 0;
3011 /* clear timestamp and dma mappings for remaining portion of packet */
3012 while (count
>= 0) {
3016 i
+= tx_ring
->count
;
3017 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
3018 ixgbevf_unmap_and_free_tx_resource(adapter
, tx_buffer_info
);
3024 static void ixgbevf_tx_queue(struct ixgbevf_adapter
*adapter
,
3025 struct ixgbevf_ring
*tx_ring
, int tx_flags
,
3026 int count
, u32 paylen
, u8 hdr_len
)
3028 union ixgbe_adv_tx_desc
*tx_desc
= NULL
;
3029 struct ixgbevf_tx_buffer
*tx_buffer_info
;
3030 u32 olinfo_status
= 0, cmd_type_len
= 0;
3033 u32 txd_cmd
= IXGBE_TXD_CMD_EOP
| IXGBE_TXD_CMD_RS
| IXGBE_TXD_CMD_IFCS
;
3035 cmd_type_len
|= IXGBE_ADVTXD_DTYP_DATA
;
3037 cmd_type_len
|= IXGBE_ADVTXD_DCMD_IFCS
| IXGBE_ADVTXD_DCMD_DEXT
;
3039 if (tx_flags
& IXGBE_TX_FLAGS_VLAN
)
3040 cmd_type_len
|= IXGBE_ADVTXD_DCMD_VLE
;
3042 if (tx_flags
& IXGBE_TX_FLAGS_TSO
) {
3043 cmd_type_len
|= IXGBE_ADVTXD_DCMD_TSE
;
3045 olinfo_status
|= IXGBE_TXD_POPTS_TXSM
<<
3046 IXGBE_ADVTXD_POPTS_SHIFT
;
3048 /* use index 1 context for tso */
3049 olinfo_status
|= (1 << IXGBE_ADVTXD_IDX_SHIFT
);
3050 if (tx_flags
& IXGBE_TX_FLAGS_IPV4
)
3051 olinfo_status
|= IXGBE_TXD_POPTS_IXSM
<<
3052 IXGBE_ADVTXD_POPTS_SHIFT
;
3054 } else if (tx_flags
& IXGBE_TX_FLAGS_CSUM
)
3055 olinfo_status
|= IXGBE_TXD_POPTS_TXSM
<<
3056 IXGBE_ADVTXD_POPTS_SHIFT
;
3058 olinfo_status
|= ((paylen
- hdr_len
) << IXGBE_ADVTXD_PAYLEN_SHIFT
);
3060 i
= tx_ring
->next_to_use
;
3062 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
3063 tx_desc
= IXGBE_TX_DESC_ADV(*tx_ring
, i
);
3064 tx_desc
->read
.buffer_addr
= cpu_to_le64(tx_buffer_info
->dma
);
3065 tx_desc
->read
.cmd_type_len
=
3066 cpu_to_le32(cmd_type_len
| tx_buffer_info
->length
);
3067 tx_desc
->read
.olinfo_status
= cpu_to_le32(olinfo_status
);
3069 if (i
== tx_ring
->count
)
3073 tx_desc
->read
.cmd_type_len
|= cpu_to_le32(txd_cmd
);
3076 * Force memory writes to complete before letting h/w
3077 * know there are new descriptors to fetch. (Only
3078 * applicable for weak-ordered memory model archs,
3083 tx_ring
->next_to_use
= i
;
3084 writel(i
, adapter
->hw
.hw_addr
+ tx_ring
->tail
);
3087 static int __ixgbevf_maybe_stop_tx(struct net_device
*netdev
,
3088 struct ixgbevf_ring
*tx_ring
, int size
)
3090 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3092 netif_stop_subqueue(netdev
, tx_ring
->queue_index
);
3093 /* Herbert's original patch had:
3094 * smp_mb__after_netif_stop_queue();
3095 * but since that doesn't exist yet, just open code it. */
3098 /* We need to check again in a case another CPU has just
3099 * made room available. */
3100 if (likely(IXGBE_DESC_UNUSED(tx_ring
) < size
))
3103 /* A reprieve! - use start_queue because it doesn't call schedule */
3104 netif_start_subqueue(netdev
, tx_ring
->queue_index
);
3105 ++adapter
->restart_queue
;
3109 static int ixgbevf_maybe_stop_tx(struct net_device
*netdev
,
3110 struct ixgbevf_ring
*tx_ring
, int size
)
3112 if (likely(IXGBE_DESC_UNUSED(tx_ring
) >= size
))
3114 return __ixgbevf_maybe_stop_tx(netdev
, tx_ring
, size
);
3117 static int ixgbevf_xmit_frame(struct sk_buff
*skb
, struct net_device
*netdev
)
3119 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3120 struct ixgbevf_ring
*tx_ring
;
3122 unsigned int tx_flags
= 0;
3129 tx_ring
= &adapter
->tx_ring
[r_idx
];
3131 if (adapter
->vlgrp
&& vlan_tx_tag_present(skb
)) {
3132 tx_flags
|= vlan_tx_tag_get(skb
);
3133 tx_flags
<<= IXGBE_TX_FLAGS_VLAN_SHIFT
;
3134 tx_flags
|= IXGBE_TX_FLAGS_VLAN
;
3137 /* four things can cause us to need a context descriptor */
3138 if (skb_is_gso(skb
) ||
3139 (skb
->ip_summed
== CHECKSUM_PARTIAL
) ||
3140 (tx_flags
& IXGBE_TX_FLAGS_VLAN
))
3143 count
+= TXD_USE_COUNT(skb_headlen(skb
));
3144 for (f
= 0; f
< skb_shinfo(skb
)->nr_frags
; f
++)
3145 count
+= TXD_USE_COUNT(skb_shinfo(skb
)->frags
[f
].size
);
3147 if (ixgbevf_maybe_stop_tx(netdev
, tx_ring
, count
)) {
3149 return NETDEV_TX_BUSY
;
3152 first
= tx_ring
->next_to_use
;
3154 if (skb
->protocol
== htons(ETH_P_IP
))
3155 tx_flags
|= IXGBE_TX_FLAGS_IPV4
;
3156 tso
= ixgbevf_tso(adapter
, tx_ring
, skb
, tx_flags
, &hdr_len
);
3158 dev_kfree_skb_any(skb
);
3159 return NETDEV_TX_OK
;
3163 tx_flags
|= IXGBE_TX_FLAGS_TSO
;
3164 else if (ixgbevf_tx_csum(adapter
, tx_ring
, skb
, tx_flags
) &&
3165 (skb
->ip_summed
== CHECKSUM_PARTIAL
))
3166 tx_flags
|= IXGBE_TX_FLAGS_CSUM
;
3168 ixgbevf_tx_queue(adapter
, tx_ring
, tx_flags
,
3169 ixgbevf_tx_map(adapter
, tx_ring
, skb
, tx_flags
, first
),
3172 ixgbevf_maybe_stop_tx(netdev
, tx_ring
, DESC_NEEDED
);
3174 return NETDEV_TX_OK
;
3178 * ixgbevf_get_stats - Get System Network Statistics
3179 * @netdev: network interface device structure
3181 * Returns the address of the device statistics structure.
3182 * The statistics are actually updated from the timer callback.
3184 static struct net_device_stats
*ixgbevf_get_stats(struct net_device
*netdev
)
3186 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3188 /* only return the current stats */
3189 return &adapter
->net_stats
;
3193 * ixgbevf_set_mac - Change the Ethernet Address of the NIC
3194 * @netdev: network interface device structure
3195 * @p: pointer to an address structure
3197 * Returns 0 on success, negative on failure
3199 static int ixgbevf_set_mac(struct net_device
*netdev
, void *p
)
3201 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3202 struct ixgbe_hw
*hw
= &adapter
->hw
;
3203 struct sockaddr
*addr
= p
;
3205 if (!is_valid_ether_addr(addr
->sa_data
))
3206 return -EADDRNOTAVAIL
;
3208 memcpy(netdev
->dev_addr
, addr
->sa_data
, netdev
->addr_len
);
3209 memcpy(hw
->mac
.addr
, addr
->sa_data
, netdev
->addr_len
);
3211 if (hw
->mac
.ops
.set_rar
)
3212 hw
->mac
.ops
.set_rar(hw
, 0, hw
->mac
.addr
, 0);
3218 * ixgbevf_change_mtu - Change the Maximum Transfer Unit
3219 * @netdev: network interface device structure
3220 * @new_mtu: new value for maximum frame size
3222 * Returns 0 on success, negative on failure
3224 static int ixgbevf_change_mtu(struct net_device
*netdev
, int new_mtu
)
3226 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3227 int max_frame
= new_mtu
+ ETH_HLEN
+ ETH_FCS_LEN
;
3229 /* MTU < 68 is an error and causes problems on some kernels */
3230 if ((new_mtu
< 68) || (max_frame
> MAXIMUM_ETHERNET_VLAN_SIZE
))
3233 hw_dbg(&adapter
->hw
, "changing MTU from %d to %d\n",
3234 netdev
->mtu
, new_mtu
);
3235 /* must set new MTU before calling down or up */
3236 netdev
->mtu
= new_mtu
;
3238 if (netif_running(netdev
))
3239 ixgbevf_reinit_locked(adapter
);
3244 static void ixgbevf_shutdown(struct pci_dev
*pdev
)
3246 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3247 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3249 netif_device_detach(netdev
);
3251 if (netif_running(netdev
)) {
3252 ixgbevf_down(adapter
);
3253 ixgbevf_free_irq(adapter
);
3254 ixgbevf_free_all_tx_resources(adapter
);
3255 ixgbevf_free_all_rx_resources(adapter
);
3259 pci_save_state(pdev
);
3262 pci_disable_device(pdev
);
3265 static const struct net_device_ops ixgbe_netdev_ops
= {
3266 .ndo_open
= &ixgbevf_open
,
3267 .ndo_stop
= &ixgbevf_close
,
3268 .ndo_start_xmit
= &ixgbevf_xmit_frame
,
3269 .ndo_get_stats
= &ixgbevf_get_stats
,
3270 .ndo_set_rx_mode
= &ixgbevf_set_rx_mode
,
3271 .ndo_set_multicast_list
= &ixgbevf_set_rx_mode
,
3272 .ndo_validate_addr
= eth_validate_addr
,
3273 .ndo_set_mac_address
= &ixgbevf_set_mac
,
3274 .ndo_change_mtu
= &ixgbevf_change_mtu
,
3275 .ndo_tx_timeout
= &ixgbevf_tx_timeout
,
3276 .ndo_vlan_rx_register
= &ixgbevf_vlan_rx_register
,
3277 .ndo_vlan_rx_add_vid
= &ixgbevf_vlan_rx_add_vid
,
3278 .ndo_vlan_rx_kill_vid
= &ixgbevf_vlan_rx_kill_vid
,
3281 static void ixgbevf_assign_netdev_ops(struct net_device
*dev
)
3283 struct ixgbevf_adapter
*adapter
;
3284 adapter
= netdev_priv(dev
);
3285 dev
->netdev_ops
= &ixgbe_netdev_ops
;
3286 ixgbevf_set_ethtool_ops(dev
);
3287 dev
->watchdog_timeo
= 5 * HZ
;
3291 * ixgbevf_probe - Device Initialization Routine
3292 * @pdev: PCI device information struct
3293 * @ent: entry in ixgbevf_pci_tbl
3295 * Returns 0 on success, negative on failure
3297 * ixgbevf_probe initializes an adapter identified by a pci_dev structure.
3298 * The OS initialization, configuring of the adapter private structure,
3299 * and a hardware reset occur.
3301 static int __devinit
ixgbevf_probe(struct pci_dev
*pdev
,
3302 const struct pci_device_id
*ent
)
3304 struct net_device
*netdev
;
3305 struct ixgbevf_adapter
*adapter
= NULL
;
3306 struct ixgbe_hw
*hw
= NULL
;
3307 const struct ixgbevf_info
*ii
= ixgbevf_info_tbl
[ent
->driver_data
];
3308 static int cards_found
;
3309 int err
, pci_using_dac
;
3311 err
= pci_enable_device(pdev
);
3315 if (!dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(64)) &&
3316 !dma_set_coherent_mask(&pdev
->dev
, DMA_BIT_MASK(64))) {
3319 err
= dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(32));
3321 err
= dma_set_coherent_mask(&pdev
->dev
,
3324 dev_err(&pdev
->dev
, "No usable DMA "
3325 "configuration, aborting\n");
3332 err
= pci_request_regions(pdev
, ixgbevf_driver_name
);
3334 dev_err(&pdev
->dev
, "pci_request_regions failed 0x%x\n", err
);
3338 pci_set_master(pdev
);
3341 netdev
= alloc_etherdev_mq(sizeof(struct ixgbevf_adapter
),
3344 netdev
= alloc_etherdev(sizeof(struct ixgbevf_adapter
));
3348 goto err_alloc_etherdev
;
3351 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
3353 pci_set_drvdata(pdev
, netdev
);
3354 adapter
= netdev_priv(netdev
);
3356 adapter
->netdev
= netdev
;
3357 adapter
->pdev
= pdev
;
3360 adapter
->msg_enable
= (1 << DEFAULT_DEBUG_LEVEL_SHIFT
) - 1;
3363 * call save state here in standalone driver because it relies on
3364 * adapter struct to exist, and needs to call netdev_priv
3366 pci_save_state(pdev
);
3368 hw
->hw_addr
= ioremap(pci_resource_start(pdev
, 0),
3369 pci_resource_len(pdev
, 0));
3375 ixgbevf_assign_netdev_ops(netdev
);
3377 adapter
->bd_number
= cards_found
;
3380 memcpy(&hw
->mac
.ops
, ii
->mac_ops
, sizeof(hw
->mac
.ops
));
3381 hw
->mac
.type
= ii
->mac
;
3383 memcpy(&hw
->mbx
.ops
, &ixgbevf_mbx_ops
,
3384 sizeof(struct ixgbe_mac_operations
));
3386 adapter
->flags
&= ~IXGBE_FLAG_RX_PS_CAPABLE
;
3387 adapter
->flags
&= ~IXGBE_FLAG_RX_PS_ENABLED
;
3388 adapter
->flags
|= IXGBE_FLAG_RX_1BUF_CAPABLE
;
3390 /* setup the private structure */
3391 err
= ixgbevf_sw_init(adapter
);
3393 netdev
->features
= NETIF_F_SG
|
3395 NETIF_F_HW_VLAN_TX
|
3396 NETIF_F_HW_VLAN_RX
|
3397 NETIF_F_HW_VLAN_FILTER
;
3399 netdev
->features
|= NETIF_F_IPV6_CSUM
;
3400 netdev
->features
|= NETIF_F_TSO
;
3401 netdev
->features
|= NETIF_F_TSO6
;
3402 netdev
->features
|= NETIF_F_GRO
;
3403 netdev
->vlan_features
|= NETIF_F_TSO
;
3404 netdev
->vlan_features
|= NETIF_F_TSO6
;
3405 netdev
->vlan_features
|= NETIF_F_IP_CSUM
;
3406 netdev
->vlan_features
|= NETIF_F_IPV6_CSUM
;
3407 netdev
->vlan_features
|= NETIF_F_SG
;
3410 netdev
->features
|= NETIF_F_HIGHDMA
;
3412 /* The HW MAC address was set and/or determined in sw_init */
3413 memcpy(netdev
->dev_addr
, adapter
->hw
.mac
.addr
, netdev
->addr_len
);
3414 memcpy(netdev
->perm_addr
, adapter
->hw
.mac
.addr
, netdev
->addr_len
);
3416 if (!is_valid_ether_addr(netdev
->dev_addr
)) {
3417 printk(KERN_ERR
"invalid MAC address\n");
3422 init_timer(&adapter
->watchdog_timer
);
3423 adapter
->watchdog_timer
.function
= &ixgbevf_watchdog
;
3424 adapter
->watchdog_timer
.data
= (unsigned long)adapter
;
3426 INIT_WORK(&adapter
->reset_task
, ixgbevf_reset_task
);
3427 INIT_WORK(&adapter
->watchdog_task
, ixgbevf_watchdog_task
);
3429 err
= ixgbevf_init_interrupt_scheme(adapter
);
3433 /* pick up the PCI bus settings for reporting later */
3434 if (hw
->mac
.ops
.get_bus_info
)
3435 hw
->mac
.ops
.get_bus_info(hw
);
3438 netif_carrier_off(netdev
);
3439 netif_tx_stop_all_queues(netdev
);
3441 strcpy(netdev
->name
, "eth%d");
3443 err
= register_netdev(netdev
);
3447 adapter
->netdev_registered
= true;
3449 ixgbevf_init_last_counter_stats(adapter
);
3451 /* print the MAC address */
3452 hw_dbg(hw
, "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
3453 netdev
->dev_addr
[0],
3454 netdev
->dev_addr
[1],
3455 netdev
->dev_addr
[2],
3456 netdev
->dev_addr
[3],
3457 netdev
->dev_addr
[4],
3458 netdev
->dev_addr
[5]);
3460 hw_dbg(hw
, "MAC: %d\n", hw
->mac
.type
);
3462 hw_dbg(hw
, "LRO is disabled\n");
3464 hw_dbg(hw
, "Intel(R) 82599 Virtual Function\n");
3470 ixgbevf_reset_interrupt_capability(adapter
);
3471 iounmap(hw
->hw_addr
);
3473 free_netdev(netdev
);
3475 pci_release_regions(pdev
);
3478 pci_disable_device(pdev
);
3483 * ixgbevf_remove - Device Removal Routine
3484 * @pdev: PCI device information struct
3486 * ixgbevf_remove is called by the PCI subsystem to alert the driver
3487 * that it should release a PCI device. The could be caused by a
3488 * Hot-Plug event, or because the driver is going to be removed from
3491 static void __devexit
ixgbevf_remove(struct pci_dev
*pdev
)
3493 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3494 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3496 set_bit(__IXGBEVF_DOWN
, &adapter
->state
);
3498 del_timer_sync(&adapter
->watchdog_timer
);
3500 cancel_work_sync(&adapter
->watchdog_task
);
3502 flush_scheduled_work();
3504 if (adapter
->netdev_registered
) {
3505 unregister_netdev(netdev
);
3506 adapter
->netdev_registered
= false;
3509 ixgbevf_reset_interrupt_capability(adapter
);
3511 iounmap(adapter
->hw
.hw_addr
);
3512 pci_release_regions(pdev
);
3514 hw_dbg(&adapter
->hw
, "Remove complete\n");
3516 kfree(adapter
->tx_ring
);
3517 kfree(adapter
->rx_ring
);
3519 free_netdev(netdev
);
3521 pci_disable_device(pdev
);
3524 static struct pci_driver ixgbevf_driver
= {
3525 .name
= ixgbevf_driver_name
,
3526 .id_table
= ixgbevf_pci_tbl
,
3527 .probe
= ixgbevf_probe
,
3528 .remove
= __devexit_p(ixgbevf_remove
),
3529 .shutdown
= ixgbevf_shutdown
,
3533 * ixgbe_init_module - Driver Registration Routine
3535 * ixgbe_init_module is the first routine called when the driver is
3536 * loaded. All it does is register with the PCI subsystem.
3538 static int __init
ixgbevf_init_module(void)
3541 printk(KERN_INFO
"ixgbevf: %s - version %s\n", ixgbevf_driver_string
,
3542 ixgbevf_driver_version
);
3544 printk(KERN_INFO
"%s\n", ixgbevf_copyright
);
3546 ret
= pci_register_driver(&ixgbevf_driver
);
3550 module_init(ixgbevf_init_module
);
3553 * ixgbe_exit_module - Driver Exit Cleanup Routine
3555 * ixgbe_exit_module is called just before the driver is removed
3558 static void __exit
ixgbevf_exit_module(void)
3560 pci_unregister_driver(&ixgbevf_driver
);
3565 * ixgbe_get_hw_dev_name - return device name string
3566 * used by hardware layer to print debugging information
3568 char *ixgbevf_get_hw_dev_name(struct ixgbe_hw
*hw
)
3570 struct ixgbevf_adapter
*adapter
= hw
->back
;
3571 return adapter
->netdev
->name
;
3575 module_exit(ixgbevf_exit_module
);
3577 /* ixgbevf_main.c */