1 /*******************************************************************************
3 Intel(R) 82576 Virtual Function Linux driver
4 Copyright(c) 2009 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *******************************************************************************/
28 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/init.h>
31 #include <linux/pci.h>
32 #include <linux/vmalloc.h>
33 #include <linux/pagemap.h>
34 #include <linux/delay.h>
35 #include <linux/netdevice.h>
36 #include <linux/tcp.h>
37 #include <linux/ipv6.h>
38 #include <net/checksum.h>
39 #include <net/ip6_checksum.h>
40 #include <linux/mii.h>
41 #include <linux/ethtool.h>
42 #include <linux/if_vlan.h>
43 #include <linux/pm_qos_params.h>
47 #define DRV_VERSION "1.0.0-k0"
48 char igbvf_driver_name
[] = "igbvf";
49 const char igbvf_driver_version
[] = DRV_VERSION
;
50 static const char igbvf_driver_string
[] =
51 "Intel(R) Virtual Function Network Driver";
52 static const char igbvf_copyright
[] = "Copyright (c) 2009 Intel Corporation.";
54 static int igbvf_poll(struct napi_struct
*napi
, int budget
);
55 static void igbvf_reset(struct igbvf_adapter
*);
56 static void igbvf_set_interrupt_capability(struct igbvf_adapter
*);
57 static void igbvf_reset_interrupt_capability(struct igbvf_adapter
*);
59 static struct igbvf_info igbvf_vf_info
= {
61 .flags
= FLAG_HAS_JUMBO_FRAMES
62 | FLAG_RX_CSUM_ENABLED
,
64 .init_ops
= e1000_init_function_pointers_vf
,
67 static const struct igbvf_info
*igbvf_info_tbl
[] = {
68 [board_vf
] = &igbvf_vf_info
,
72 * igbvf_desc_unused - calculate if we have unused descriptors
74 static int igbvf_desc_unused(struct igbvf_ring
*ring
)
76 if (ring
->next_to_clean
> ring
->next_to_use
)
77 return ring
->next_to_clean
- ring
->next_to_use
- 1;
79 return ring
->count
+ ring
->next_to_clean
- ring
->next_to_use
- 1;
83 * igbvf_receive_skb - helper function to handle Rx indications
84 * @adapter: board private structure
85 * @status: descriptor status field as written by hardware
86 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
87 * @skb: pointer to sk_buff to be indicated to stack
89 static void igbvf_receive_skb(struct igbvf_adapter
*adapter
,
90 struct net_device
*netdev
,
94 if (adapter
->vlgrp
&& (status
& E1000_RXD_STAT_VP
))
95 vlan_hwaccel_receive_skb(skb
, adapter
->vlgrp
,
97 E1000_RXD_SPC_VLAN_MASK
);
99 netif_receive_skb(skb
);
101 netdev
->last_rx
= jiffies
;
104 static inline void igbvf_rx_checksum_adv(struct igbvf_adapter
*adapter
,
105 u32 status_err
, struct sk_buff
*skb
)
107 skb
->ip_summed
= CHECKSUM_NONE
;
109 /* Ignore Checksum bit is set or checksum is disabled through ethtool */
110 if ((status_err
& E1000_RXD_STAT_IXSM
))
112 /* TCP/UDP checksum error bit is set */
114 (E1000_RXDEXT_STATERR_TCPE
| E1000_RXDEXT_STATERR_IPE
)) {
115 /* let the stack verify checksum errors */
116 adapter
->hw_csum_err
++;
119 /* It must be a TCP or UDP packet with a valid checksum */
120 if (status_err
& (E1000_RXD_STAT_TCPCS
| E1000_RXD_STAT_UDPCS
))
121 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
123 adapter
->hw_csum_good
++;
127 * igbvf_alloc_rx_buffers - Replace used receive buffers; packet split
128 * @rx_ring: address of ring structure to repopulate
129 * @cleaned_count: number of buffers to repopulate
131 static void igbvf_alloc_rx_buffers(struct igbvf_ring
*rx_ring
,
134 struct igbvf_adapter
*adapter
= rx_ring
->adapter
;
135 struct net_device
*netdev
= adapter
->netdev
;
136 struct pci_dev
*pdev
= adapter
->pdev
;
137 union e1000_adv_rx_desc
*rx_desc
;
138 struct igbvf_buffer
*buffer_info
;
143 i
= rx_ring
->next_to_use
;
144 buffer_info
= &rx_ring
->buffer_info
[i
];
146 if (adapter
->rx_ps_hdr_size
)
147 bufsz
= adapter
->rx_ps_hdr_size
;
149 bufsz
= adapter
->rx_buffer_len
;
150 bufsz
+= NET_IP_ALIGN
;
152 while (cleaned_count
--) {
153 rx_desc
= IGBVF_RX_DESC_ADV(*rx_ring
, i
);
155 if (adapter
->rx_ps_hdr_size
&& !buffer_info
->page_dma
) {
156 if (!buffer_info
->page
) {
157 buffer_info
->page
= alloc_page(GFP_ATOMIC
);
158 if (!buffer_info
->page
) {
159 adapter
->alloc_rx_buff_failed
++;
162 buffer_info
->page_offset
= 0;
164 buffer_info
->page_offset
^= PAGE_SIZE
/ 2;
166 buffer_info
->page_dma
=
167 pci_map_page(pdev
, buffer_info
->page
,
168 buffer_info
->page_offset
,
173 if (!buffer_info
->skb
) {
174 skb
= netdev_alloc_skb(netdev
, bufsz
);
176 adapter
->alloc_rx_buff_failed
++;
180 /* Make buffer alignment 2 beyond a 16 byte boundary
181 * this will result in a 16 byte aligned IP header after
182 * the 14 byte MAC header is removed
184 skb_reserve(skb
, NET_IP_ALIGN
);
186 buffer_info
->skb
= skb
;
187 buffer_info
->dma
= pci_map_single(pdev
, skb
->data
,
191 /* Refresh the desc even if buffer_addrs didn't change because
192 * each write-back erases this info. */
193 if (adapter
->rx_ps_hdr_size
) {
194 rx_desc
->read
.pkt_addr
=
195 cpu_to_le64(buffer_info
->page_dma
);
196 rx_desc
->read
.hdr_addr
= cpu_to_le64(buffer_info
->dma
);
198 rx_desc
->read
.pkt_addr
=
199 cpu_to_le64(buffer_info
->dma
);
200 rx_desc
->read
.hdr_addr
= 0;
204 if (i
== rx_ring
->count
)
206 buffer_info
= &rx_ring
->buffer_info
[i
];
210 if (rx_ring
->next_to_use
!= i
) {
211 rx_ring
->next_to_use
= i
;
213 i
= (rx_ring
->count
- 1);
217 /* Force memory writes to complete before letting h/w
218 * know there are new descriptors to fetch. (Only
219 * applicable for weak-ordered memory model archs,
222 writel(i
, adapter
->hw
.hw_addr
+ rx_ring
->tail
);
227 * igbvf_clean_rx_irq - Send received data up the network stack; legacy
228 * @adapter: board private structure
230 * the return value indicates whether actual cleaning was done, there
231 * is no guarantee that everything was cleaned
233 static bool igbvf_clean_rx_irq(struct igbvf_adapter
*adapter
,
234 int *work_done
, int work_to_do
)
236 struct igbvf_ring
*rx_ring
= adapter
->rx_ring
;
237 struct net_device
*netdev
= adapter
->netdev
;
238 struct pci_dev
*pdev
= adapter
->pdev
;
239 union e1000_adv_rx_desc
*rx_desc
, *next_rxd
;
240 struct igbvf_buffer
*buffer_info
, *next_buffer
;
242 bool cleaned
= false;
243 int cleaned_count
= 0;
244 unsigned int total_bytes
= 0, total_packets
= 0;
246 u32 length
, hlen
, staterr
;
248 i
= rx_ring
->next_to_clean
;
249 rx_desc
= IGBVF_RX_DESC_ADV(*rx_ring
, i
);
250 staterr
= le32_to_cpu(rx_desc
->wb
.upper
.status_error
);
252 while (staterr
& E1000_RXD_STAT_DD
) {
253 if (*work_done
>= work_to_do
)
257 buffer_info
= &rx_ring
->buffer_info
[i
];
259 /* HW will not DMA in data larger than the given buffer, even
260 * if it parses the (NFS, of course) header to be larger. In
261 * that case, it fills the header buffer and spills the rest
264 hlen
= (le16_to_cpu(rx_desc
->wb
.lower
.lo_dword
.hs_rss
.hdr_info
) &
265 E1000_RXDADV_HDRBUFLEN_MASK
) >> E1000_RXDADV_HDRBUFLEN_SHIFT
;
266 if (hlen
> adapter
->rx_ps_hdr_size
)
267 hlen
= adapter
->rx_ps_hdr_size
;
269 length
= le16_to_cpu(rx_desc
->wb
.upper
.length
);
273 skb
= buffer_info
->skb
;
274 prefetch(skb
->data
- NET_IP_ALIGN
);
275 buffer_info
->skb
= NULL
;
276 if (!adapter
->rx_ps_hdr_size
) {
277 pci_unmap_single(pdev
, buffer_info
->dma
,
278 adapter
->rx_buffer_len
,
280 buffer_info
->dma
= 0;
281 skb_put(skb
, length
);
285 if (!skb_shinfo(skb
)->nr_frags
) {
286 pci_unmap_single(pdev
, buffer_info
->dma
,
287 adapter
->rx_ps_hdr_size
+ NET_IP_ALIGN
,
293 pci_unmap_page(pdev
, buffer_info
->page_dma
,
296 buffer_info
->page_dma
= 0;
298 skb_fill_page_desc(skb
, skb_shinfo(skb
)->nr_frags
++,
300 buffer_info
->page_offset
,
303 if ((adapter
->rx_buffer_len
> (PAGE_SIZE
/ 2)) ||
304 (page_count(buffer_info
->page
) != 1))
305 buffer_info
->page
= NULL
;
307 get_page(buffer_info
->page
);
310 skb
->data_len
+= length
;
311 skb
->truesize
+= length
;
315 if (i
== rx_ring
->count
)
317 next_rxd
= IGBVF_RX_DESC_ADV(*rx_ring
, i
);
319 next_buffer
= &rx_ring
->buffer_info
[i
];
321 if (!(staterr
& E1000_RXD_STAT_EOP
)) {
322 buffer_info
->skb
= next_buffer
->skb
;
323 buffer_info
->dma
= next_buffer
->dma
;
324 next_buffer
->skb
= skb
;
325 next_buffer
->dma
= 0;
329 if (staterr
& E1000_RXDEXT_ERR_FRAME_ERR_MASK
) {
330 dev_kfree_skb_irq(skb
);
334 total_bytes
+= skb
->len
;
337 igbvf_rx_checksum_adv(adapter
, staterr
, skb
);
339 skb
->protocol
= eth_type_trans(skb
, netdev
);
341 igbvf_receive_skb(adapter
, netdev
, skb
, staterr
,
342 rx_desc
->wb
.upper
.vlan
);
344 netdev
->last_rx
= jiffies
;
347 rx_desc
->wb
.upper
.status_error
= 0;
349 /* return some buffers to hardware, one at a time is too slow */
350 if (cleaned_count
>= IGBVF_RX_BUFFER_WRITE
) {
351 igbvf_alloc_rx_buffers(rx_ring
, cleaned_count
);
355 /* use prefetched values */
357 buffer_info
= next_buffer
;
359 staterr
= le32_to_cpu(rx_desc
->wb
.upper
.status_error
);
362 rx_ring
->next_to_clean
= i
;
363 cleaned_count
= igbvf_desc_unused(rx_ring
);
366 igbvf_alloc_rx_buffers(rx_ring
, cleaned_count
);
368 adapter
->total_rx_packets
+= total_packets
;
369 adapter
->total_rx_bytes
+= total_bytes
;
370 adapter
->net_stats
.rx_bytes
+= total_bytes
;
371 adapter
->net_stats
.rx_packets
+= total_packets
;
375 static void igbvf_put_txbuf(struct igbvf_adapter
*adapter
,
376 struct igbvf_buffer
*buffer_info
)
378 buffer_info
->dma
= 0;
379 if (buffer_info
->skb
) {
380 skb_dma_unmap(&adapter
->pdev
->dev
, buffer_info
->skb
,
382 dev_kfree_skb_any(buffer_info
->skb
);
383 buffer_info
->skb
= NULL
;
385 buffer_info
->time_stamp
= 0;
388 static void igbvf_print_tx_hang(struct igbvf_adapter
*adapter
)
390 struct igbvf_ring
*tx_ring
= adapter
->tx_ring
;
391 unsigned int i
= tx_ring
->next_to_clean
;
392 unsigned int eop
= tx_ring
->buffer_info
[i
].next_to_watch
;
393 union e1000_adv_tx_desc
*eop_desc
= IGBVF_TX_DESC_ADV(*tx_ring
, eop
);
395 /* detected Tx unit hang */
396 dev_err(&adapter
->pdev
->dev
,
397 "Detected Tx Unit Hang:\n"
400 " next_to_use <%x>\n"
401 " next_to_clean <%x>\n"
402 "buffer_info[next_to_clean]:\n"
403 " time_stamp <%lx>\n"
404 " next_to_watch <%x>\n"
406 " next_to_watch.status <%x>\n",
407 readl(adapter
->hw
.hw_addr
+ tx_ring
->head
),
408 readl(adapter
->hw
.hw_addr
+ tx_ring
->tail
),
409 tx_ring
->next_to_use
,
410 tx_ring
->next_to_clean
,
411 tx_ring
->buffer_info
[eop
].time_stamp
,
414 eop_desc
->wb
.status
);
418 * igbvf_setup_tx_resources - allocate Tx resources (Descriptors)
419 * @adapter: board private structure
421 * Return 0 on success, negative on failure
423 int igbvf_setup_tx_resources(struct igbvf_adapter
*adapter
,
424 struct igbvf_ring
*tx_ring
)
426 struct pci_dev
*pdev
= adapter
->pdev
;
429 size
= sizeof(struct igbvf_buffer
) * tx_ring
->count
;
430 tx_ring
->buffer_info
= vmalloc(size
);
431 if (!tx_ring
->buffer_info
)
433 memset(tx_ring
->buffer_info
, 0, size
);
435 /* round up to nearest 4K */
436 tx_ring
->size
= tx_ring
->count
* sizeof(union e1000_adv_tx_desc
);
437 tx_ring
->size
= ALIGN(tx_ring
->size
, 4096);
439 tx_ring
->desc
= pci_alloc_consistent(pdev
, tx_ring
->size
,
445 tx_ring
->adapter
= adapter
;
446 tx_ring
->next_to_use
= 0;
447 tx_ring
->next_to_clean
= 0;
451 vfree(tx_ring
->buffer_info
);
452 dev_err(&adapter
->pdev
->dev
,
453 "Unable to allocate memory for the transmit descriptor ring\n");
458 * igbvf_setup_rx_resources - allocate Rx resources (Descriptors)
459 * @adapter: board private structure
461 * Returns 0 on success, negative on failure
463 int igbvf_setup_rx_resources(struct igbvf_adapter
*adapter
,
464 struct igbvf_ring
*rx_ring
)
466 struct pci_dev
*pdev
= adapter
->pdev
;
469 size
= sizeof(struct igbvf_buffer
) * rx_ring
->count
;
470 rx_ring
->buffer_info
= vmalloc(size
);
471 if (!rx_ring
->buffer_info
)
473 memset(rx_ring
->buffer_info
, 0, size
);
475 desc_len
= sizeof(union e1000_adv_rx_desc
);
477 /* Round up to nearest 4K */
478 rx_ring
->size
= rx_ring
->count
* desc_len
;
479 rx_ring
->size
= ALIGN(rx_ring
->size
, 4096);
481 rx_ring
->desc
= pci_alloc_consistent(pdev
, rx_ring
->size
,
487 rx_ring
->next_to_clean
= 0;
488 rx_ring
->next_to_use
= 0;
490 rx_ring
->adapter
= adapter
;
495 vfree(rx_ring
->buffer_info
);
496 rx_ring
->buffer_info
= NULL
;
497 dev_err(&adapter
->pdev
->dev
,
498 "Unable to allocate memory for the receive descriptor ring\n");
503 * igbvf_clean_tx_ring - Free Tx Buffers
504 * @tx_ring: ring to be cleaned
506 static void igbvf_clean_tx_ring(struct igbvf_ring
*tx_ring
)
508 struct igbvf_adapter
*adapter
= tx_ring
->adapter
;
509 struct igbvf_buffer
*buffer_info
;
513 if (!tx_ring
->buffer_info
)
516 /* Free all the Tx ring sk_buffs */
517 for (i
= 0; i
< tx_ring
->count
; i
++) {
518 buffer_info
= &tx_ring
->buffer_info
[i
];
519 igbvf_put_txbuf(adapter
, buffer_info
);
522 size
= sizeof(struct igbvf_buffer
) * tx_ring
->count
;
523 memset(tx_ring
->buffer_info
, 0, size
);
525 /* Zero out the descriptor ring */
526 memset(tx_ring
->desc
, 0, tx_ring
->size
);
528 tx_ring
->next_to_use
= 0;
529 tx_ring
->next_to_clean
= 0;
531 writel(0, adapter
->hw
.hw_addr
+ tx_ring
->head
);
532 writel(0, adapter
->hw
.hw_addr
+ tx_ring
->tail
);
536 * igbvf_free_tx_resources - Free Tx Resources per Queue
537 * @tx_ring: ring to free resources from
539 * Free all transmit software resources
541 void igbvf_free_tx_resources(struct igbvf_ring
*tx_ring
)
543 struct pci_dev
*pdev
= tx_ring
->adapter
->pdev
;
545 igbvf_clean_tx_ring(tx_ring
);
547 vfree(tx_ring
->buffer_info
);
548 tx_ring
->buffer_info
= NULL
;
550 pci_free_consistent(pdev
, tx_ring
->size
, tx_ring
->desc
, tx_ring
->dma
);
552 tx_ring
->desc
= NULL
;
556 * igbvf_clean_rx_ring - Free Rx Buffers per Queue
557 * @adapter: board private structure
559 static void igbvf_clean_rx_ring(struct igbvf_ring
*rx_ring
)
561 struct igbvf_adapter
*adapter
= rx_ring
->adapter
;
562 struct igbvf_buffer
*buffer_info
;
563 struct pci_dev
*pdev
= adapter
->pdev
;
567 if (!rx_ring
->buffer_info
)
570 /* Free all the Rx ring sk_buffs */
571 for (i
= 0; i
< rx_ring
->count
; i
++) {
572 buffer_info
= &rx_ring
->buffer_info
[i
];
573 if (buffer_info
->dma
) {
574 if (adapter
->rx_ps_hdr_size
){
575 pci_unmap_single(pdev
, buffer_info
->dma
,
576 adapter
->rx_ps_hdr_size
,
579 pci_unmap_single(pdev
, buffer_info
->dma
,
580 adapter
->rx_buffer_len
,
583 buffer_info
->dma
= 0;
586 if (buffer_info
->skb
) {
587 dev_kfree_skb(buffer_info
->skb
);
588 buffer_info
->skb
= NULL
;
591 if (buffer_info
->page
) {
592 if (buffer_info
->page_dma
)
593 pci_unmap_page(pdev
, buffer_info
->page_dma
,
596 put_page(buffer_info
->page
);
597 buffer_info
->page
= NULL
;
598 buffer_info
->page_dma
= 0;
599 buffer_info
->page_offset
= 0;
603 size
= sizeof(struct igbvf_buffer
) * rx_ring
->count
;
604 memset(rx_ring
->buffer_info
, 0, size
);
606 /* Zero out the descriptor ring */
607 memset(rx_ring
->desc
, 0, rx_ring
->size
);
609 rx_ring
->next_to_clean
= 0;
610 rx_ring
->next_to_use
= 0;
612 writel(0, adapter
->hw
.hw_addr
+ rx_ring
->head
);
613 writel(0, adapter
->hw
.hw_addr
+ rx_ring
->tail
);
617 * igbvf_free_rx_resources - Free Rx Resources
618 * @rx_ring: ring to clean the resources from
620 * Free all receive software resources
623 void igbvf_free_rx_resources(struct igbvf_ring
*rx_ring
)
625 struct pci_dev
*pdev
= rx_ring
->adapter
->pdev
;
627 igbvf_clean_rx_ring(rx_ring
);
629 vfree(rx_ring
->buffer_info
);
630 rx_ring
->buffer_info
= NULL
;
632 dma_free_coherent(&pdev
->dev
, rx_ring
->size
, rx_ring
->desc
,
634 rx_ring
->desc
= NULL
;
638 * igbvf_update_itr - update the dynamic ITR value based on statistics
639 * @adapter: pointer to adapter
640 * @itr_setting: current adapter->itr
641 * @packets: the number of packets during this measurement interval
642 * @bytes: the number of bytes during this measurement interval
644 * Stores a new ITR value based on packets and byte
645 * counts during the last interrupt. The advantage of per interrupt
646 * computation is faster updates and more accurate ITR for the current
647 * traffic pattern. Constants in this function were computed
648 * based on theoretical maximum wire speed and thresholds were set based
649 * on testing data as well as attempting to minimize response time
650 * while increasing bulk throughput. This functionality is controlled
651 * by the InterruptThrottleRate module parameter.
653 static unsigned int igbvf_update_itr(struct igbvf_adapter
*adapter
,
654 u16 itr_setting
, int packets
,
657 unsigned int retval
= itr_setting
;
660 goto update_itr_done
;
662 switch (itr_setting
) {
664 /* handle TSO and jumbo frames */
665 if (bytes
/packets
> 8000)
666 retval
= bulk_latency
;
667 else if ((packets
< 5) && (bytes
> 512))
668 retval
= low_latency
;
670 case low_latency
: /* 50 usec aka 20000 ints/s */
672 /* this if handles the TSO accounting */
673 if (bytes
/packets
> 8000)
674 retval
= bulk_latency
;
675 else if ((packets
< 10) || ((bytes
/packets
) > 1200))
676 retval
= bulk_latency
;
677 else if ((packets
> 35))
678 retval
= lowest_latency
;
679 } else if (bytes
/packets
> 2000) {
680 retval
= bulk_latency
;
681 } else if (packets
<= 2 && bytes
< 512) {
682 retval
= lowest_latency
;
685 case bulk_latency
: /* 250 usec aka 4000 ints/s */
688 retval
= low_latency
;
689 } else if (bytes
< 6000) {
690 retval
= low_latency
;
699 static void igbvf_set_itr(struct igbvf_adapter
*adapter
)
701 struct e1000_hw
*hw
= &adapter
->hw
;
703 u32 new_itr
= adapter
->itr
;
705 adapter
->tx_itr
= igbvf_update_itr(adapter
, adapter
->tx_itr
,
706 adapter
->total_tx_packets
,
707 adapter
->total_tx_bytes
);
708 /* conservative mode (itr 3) eliminates the lowest_latency setting */
709 if (adapter
->itr_setting
== 3 && adapter
->tx_itr
== lowest_latency
)
710 adapter
->tx_itr
= low_latency
;
712 adapter
->rx_itr
= igbvf_update_itr(adapter
, adapter
->rx_itr
,
713 adapter
->total_rx_packets
,
714 adapter
->total_rx_bytes
);
715 /* conservative mode (itr 3) eliminates the lowest_latency setting */
716 if (adapter
->itr_setting
== 3 && adapter
->rx_itr
== lowest_latency
)
717 adapter
->rx_itr
= low_latency
;
719 current_itr
= max(adapter
->rx_itr
, adapter
->tx_itr
);
721 switch (current_itr
) {
722 /* counts and packets in update_itr are dependent on these numbers */
727 new_itr
= 20000; /* aka hwitr = ~200 */
736 if (new_itr
!= adapter
->itr
) {
738 * this attempts to bias the interrupt rate towards Bulk
739 * by adding intermediate steps when interrupt rate is
742 new_itr
= new_itr
> adapter
->itr
?
743 min(adapter
->itr
+ (new_itr
>> 2), new_itr
) :
745 adapter
->itr
= new_itr
;
746 adapter
->rx_ring
->itr_val
= 1952;
748 if (adapter
->msix_entries
)
749 adapter
->rx_ring
->set_itr
= 1;
756 * igbvf_clean_tx_irq - Reclaim resources after transmit completes
757 * @adapter: board private structure
758 * returns true if ring is completely cleaned
760 static bool igbvf_clean_tx_irq(struct igbvf_ring
*tx_ring
)
762 struct igbvf_adapter
*adapter
= tx_ring
->adapter
;
763 struct e1000_hw
*hw
= &adapter
->hw
;
764 struct net_device
*netdev
= adapter
->netdev
;
765 struct igbvf_buffer
*buffer_info
;
767 union e1000_adv_tx_desc
*tx_desc
, *eop_desc
;
768 unsigned int total_bytes
= 0, total_packets
= 0;
769 unsigned int i
, eop
, count
= 0;
770 bool cleaned
= false;
772 i
= tx_ring
->next_to_clean
;
773 eop
= tx_ring
->buffer_info
[i
].next_to_watch
;
774 eop_desc
= IGBVF_TX_DESC_ADV(*tx_ring
, eop
);
776 while ((eop_desc
->wb
.status
& cpu_to_le32(E1000_TXD_STAT_DD
)) &&
777 (count
< tx_ring
->count
)) {
778 for (cleaned
= false; !cleaned
; count
++) {
779 tx_desc
= IGBVF_TX_DESC_ADV(*tx_ring
, i
);
780 buffer_info
= &tx_ring
->buffer_info
[i
];
781 cleaned
= (i
== eop
);
782 skb
= buffer_info
->skb
;
785 unsigned int segs
, bytecount
;
787 /* gso_segs is currently only valid for tcp */
788 segs
= skb_shinfo(skb
)->gso_segs
?: 1;
789 /* multiply data chunks by size of headers */
790 bytecount
= ((segs
- 1) * skb_headlen(skb
)) +
792 total_packets
+= segs
;
793 total_bytes
+= bytecount
;
796 igbvf_put_txbuf(adapter
, buffer_info
);
797 tx_desc
->wb
.status
= 0;
800 if (i
== tx_ring
->count
)
803 eop
= tx_ring
->buffer_info
[i
].next_to_watch
;
804 eop_desc
= IGBVF_TX_DESC_ADV(*tx_ring
, eop
);
807 tx_ring
->next_to_clean
= i
;
809 if (unlikely(count
&&
810 netif_carrier_ok(netdev
) &&
811 igbvf_desc_unused(tx_ring
) >= IGBVF_TX_QUEUE_WAKE
)) {
812 /* Make sure that anybody stopping the queue after this
813 * sees the new next_to_clean.
816 if (netif_queue_stopped(netdev
) &&
817 !(test_bit(__IGBVF_DOWN
, &adapter
->state
))) {
818 netif_wake_queue(netdev
);
819 ++adapter
->restart_queue
;
823 if (adapter
->detect_tx_hung
) {
824 /* Detect a transmit hang in hardware, this serializes the
825 * check with the clearing of time_stamp and movement of i */
826 adapter
->detect_tx_hung
= false;
827 if (tx_ring
->buffer_info
[i
].time_stamp
&&
828 time_after(jiffies
, tx_ring
->buffer_info
[i
].time_stamp
+
829 (adapter
->tx_timeout_factor
* HZ
))
830 && !(er32(STATUS
) & E1000_STATUS_TXOFF
)) {
832 tx_desc
= IGBVF_TX_DESC_ADV(*tx_ring
, i
);
833 /* detected Tx unit hang */
834 igbvf_print_tx_hang(adapter
);
836 netif_stop_queue(netdev
);
839 adapter
->net_stats
.tx_bytes
+= total_bytes
;
840 adapter
->net_stats
.tx_packets
+= total_packets
;
841 return (count
< tx_ring
->count
);
844 static irqreturn_t
igbvf_msix_other(int irq
, void *data
)
846 struct net_device
*netdev
= data
;
847 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
848 struct e1000_hw
*hw
= &adapter
->hw
;
850 adapter
->int_counter1
++;
852 netif_carrier_off(netdev
);
853 hw
->mac
.get_link_status
= 1;
854 if (!test_bit(__IGBVF_DOWN
, &adapter
->state
))
855 mod_timer(&adapter
->watchdog_timer
, jiffies
+ 1);
857 ew32(EIMS
, adapter
->eims_other
);
862 static irqreturn_t
igbvf_intr_msix_tx(int irq
, void *data
)
864 struct net_device
*netdev
= data
;
865 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
866 struct e1000_hw
*hw
= &adapter
->hw
;
867 struct igbvf_ring
*tx_ring
= adapter
->tx_ring
;
870 adapter
->total_tx_bytes
= 0;
871 adapter
->total_tx_packets
= 0;
873 /* auto mask will automatically reenable the interrupt when we write
875 if (!igbvf_clean_tx_irq(tx_ring
))
876 /* Ring was not completely cleaned, so fire another interrupt */
877 ew32(EICS
, tx_ring
->eims_value
);
879 ew32(EIMS
, tx_ring
->eims_value
);
884 static irqreturn_t
igbvf_intr_msix_rx(int irq
, void *data
)
886 struct net_device
*netdev
= data
;
887 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
889 adapter
->int_counter0
++;
891 /* Write the ITR value calculated at the end of the
892 * previous interrupt.
894 if (adapter
->rx_ring
->set_itr
) {
895 writel(adapter
->rx_ring
->itr_val
,
896 adapter
->hw
.hw_addr
+ adapter
->rx_ring
->itr_register
);
897 adapter
->rx_ring
->set_itr
= 0;
900 if (napi_schedule_prep(&adapter
->rx_ring
->napi
)) {
901 adapter
->total_rx_bytes
= 0;
902 adapter
->total_rx_packets
= 0;
903 __napi_schedule(&adapter
->rx_ring
->napi
);
909 #define IGBVF_NO_QUEUE -1
911 static void igbvf_assign_vector(struct igbvf_adapter
*adapter
, int rx_queue
,
912 int tx_queue
, int msix_vector
)
914 struct e1000_hw
*hw
= &adapter
->hw
;
917 /* 82576 uses a table-based method for assigning vectors.
918 Each queue has a single entry in the table to which we write
919 a vector number along with a "valid" bit. Sadly, the layout
920 of the table is somewhat counterintuitive. */
921 if (rx_queue
> IGBVF_NO_QUEUE
) {
922 index
= (rx_queue
>> 1);
923 ivar
= array_er32(IVAR0
, index
);
924 if (rx_queue
& 0x1) {
925 /* vector goes into third byte of register */
926 ivar
= ivar
& 0xFF00FFFF;
927 ivar
|= (msix_vector
| E1000_IVAR_VALID
) << 16;
929 /* vector goes into low byte of register */
930 ivar
= ivar
& 0xFFFFFF00;
931 ivar
|= msix_vector
| E1000_IVAR_VALID
;
933 adapter
->rx_ring
[rx_queue
].eims_value
= 1 << msix_vector
;
934 array_ew32(IVAR0
, index
, ivar
);
936 if (tx_queue
> IGBVF_NO_QUEUE
) {
937 index
= (tx_queue
>> 1);
938 ivar
= array_er32(IVAR0
, index
);
939 if (tx_queue
& 0x1) {
940 /* vector goes into high byte of register */
941 ivar
= ivar
& 0x00FFFFFF;
942 ivar
|= (msix_vector
| E1000_IVAR_VALID
) << 24;
944 /* vector goes into second byte of register */
945 ivar
= ivar
& 0xFFFF00FF;
946 ivar
|= (msix_vector
| E1000_IVAR_VALID
) << 8;
948 adapter
->tx_ring
[tx_queue
].eims_value
= 1 << msix_vector
;
949 array_ew32(IVAR0
, index
, ivar
);
954 * igbvf_configure_msix - Configure MSI-X hardware
956 * igbvf_configure_msix sets up the hardware to properly
957 * generate MSI-X interrupts.
959 static void igbvf_configure_msix(struct igbvf_adapter
*adapter
)
962 struct e1000_hw
*hw
= &adapter
->hw
;
963 struct igbvf_ring
*tx_ring
= adapter
->tx_ring
;
964 struct igbvf_ring
*rx_ring
= adapter
->rx_ring
;
967 adapter
->eims_enable_mask
= 0;
969 igbvf_assign_vector(adapter
, IGBVF_NO_QUEUE
, 0, vector
++);
970 adapter
->eims_enable_mask
|= tx_ring
->eims_value
;
971 if (tx_ring
->itr_val
)
972 writel(tx_ring
->itr_val
,
973 hw
->hw_addr
+ tx_ring
->itr_register
);
975 writel(1952, hw
->hw_addr
+ tx_ring
->itr_register
);
977 igbvf_assign_vector(adapter
, 0, IGBVF_NO_QUEUE
, vector
++);
978 adapter
->eims_enable_mask
|= rx_ring
->eims_value
;
979 if (rx_ring
->itr_val
)
980 writel(rx_ring
->itr_val
,
981 hw
->hw_addr
+ rx_ring
->itr_register
);
983 writel(1952, hw
->hw_addr
+ rx_ring
->itr_register
);
985 /* set vector for other causes, i.e. link changes */
987 tmp
= (vector
++ | E1000_IVAR_VALID
);
989 ew32(IVAR_MISC
, tmp
);
991 adapter
->eims_enable_mask
= (1 << (vector
)) - 1;
992 adapter
->eims_other
= 1 << (vector
- 1);
996 static void igbvf_reset_interrupt_capability(struct igbvf_adapter
*adapter
)
998 if (adapter
->msix_entries
) {
999 pci_disable_msix(adapter
->pdev
);
1000 kfree(adapter
->msix_entries
);
1001 adapter
->msix_entries
= NULL
;
1006 * igbvf_set_interrupt_capability - set MSI or MSI-X if supported
1008 * Attempt to configure interrupts using the best available
1009 * capabilities of the hardware and kernel.
1011 static void igbvf_set_interrupt_capability(struct igbvf_adapter
*adapter
)
1016 /* we allocate 3 vectors, 1 for tx, 1 for rx, one for pf messages */
1017 adapter
->msix_entries
= kcalloc(3, sizeof(struct msix_entry
),
1019 if (adapter
->msix_entries
) {
1020 for (i
= 0; i
< 3; i
++)
1021 adapter
->msix_entries
[i
].entry
= i
;
1023 err
= pci_enable_msix(adapter
->pdev
,
1024 adapter
->msix_entries
, 3);
1029 dev_err(&adapter
->pdev
->dev
,
1030 "Failed to initialize MSI-X interrupts.\n");
1031 igbvf_reset_interrupt_capability(adapter
);
1036 * igbvf_request_msix - Initialize MSI-X interrupts
1038 * igbvf_request_msix allocates MSI-X vectors and requests interrupts from the
1041 static int igbvf_request_msix(struct igbvf_adapter
*adapter
)
1043 struct net_device
*netdev
= adapter
->netdev
;
1044 int err
= 0, vector
= 0;
1046 if (strlen(netdev
->name
) < (IFNAMSIZ
- 5)) {
1047 sprintf(adapter
->tx_ring
->name
, "%s-tx-0", netdev
->name
);
1048 sprintf(adapter
->rx_ring
->name
, "%s-rx-0", netdev
->name
);
1050 memcpy(adapter
->tx_ring
->name
, netdev
->name
, IFNAMSIZ
);
1051 memcpy(adapter
->rx_ring
->name
, netdev
->name
, IFNAMSIZ
);
1054 err
= request_irq(adapter
->msix_entries
[vector
].vector
,
1055 &igbvf_intr_msix_tx
, 0, adapter
->tx_ring
->name
,
1060 adapter
->tx_ring
->itr_register
= E1000_EITR(vector
);
1061 adapter
->tx_ring
->itr_val
= 1952;
1064 err
= request_irq(adapter
->msix_entries
[vector
].vector
,
1065 &igbvf_intr_msix_rx
, 0, adapter
->rx_ring
->name
,
1070 adapter
->rx_ring
->itr_register
= E1000_EITR(vector
);
1071 adapter
->rx_ring
->itr_val
= 1952;
1074 err
= request_irq(adapter
->msix_entries
[vector
].vector
,
1075 &igbvf_msix_other
, 0, netdev
->name
, netdev
);
1079 igbvf_configure_msix(adapter
);
1086 * igbvf_alloc_queues - Allocate memory for all rings
1087 * @adapter: board private structure to initialize
1089 static int __devinit
igbvf_alloc_queues(struct igbvf_adapter
*adapter
)
1091 struct net_device
*netdev
= adapter
->netdev
;
1093 adapter
->tx_ring
= kzalloc(sizeof(struct igbvf_ring
), GFP_KERNEL
);
1094 if (!adapter
->tx_ring
)
1097 adapter
->rx_ring
= kzalloc(sizeof(struct igbvf_ring
), GFP_KERNEL
);
1098 if (!adapter
->rx_ring
) {
1099 kfree(adapter
->tx_ring
);
1103 netif_napi_add(netdev
, &adapter
->rx_ring
->napi
, igbvf_poll
, 64);
1109 * igbvf_request_irq - initialize interrupts
1111 * Attempts to configure interrupts using the best available
1112 * capabilities of the hardware and kernel.
1114 static int igbvf_request_irq(struct igbvf_adapter
*adapter
)
1118 /* igbvf supports msi-x only */
1119 if (adapter
->msix_entries
)
1120 err
= igbvf_request_msix(adapter
);
1125 dev_err(&adapter
->pdev
->dev
,
1126 "Unable to allocate interrupt, Error: %d\n", err
);
1131 static void igbvf_free_irq(struct igbvf_adapter
*adapter
)
1133 struct net_device
*netdev
= adapter
->netdev
;
1136 if (adapter
->msix_entries
) {
1137 for (vector
= 0; vector
< 3; vector
++)
1138 free_irq(adapter
->msix_entries
[vector
].vector
, netdev
);
1143 * igbvf_irq_disable - Mask off interrupt generation on the NIC
1145 static void igbvf_irq_disable(struct igbvf_adapter
*adapter
)
1147 struct e1000_hw
*hw
= &adapter
->hw
;
1151 if (adapter
->msix_entries
)
1156 * igbvf_irq_enable - Enable default interrupt generation settings
1158 static void igbvf_irq_enable(struct igbvf_adapter
*adapter
)
1160 struct e1000_hw
*hw
= &adapter
->hw
;
1162 ew32(EIAC
, adapter
->eims_enable_mask
);
1163 ew32(EIAM
, adapter
->eims_enable_mask
);
1164 ew32(EIMS
, adapter
->eims_enable_mask
);
1168 * igbvf_poll - NAPI Rx polling callback
1169 * @napi: struct associated with this polling callback
1170 * @budget: amount of packets driver is allowed to process this poll
1172 static int igbvf_poll(struct napi_struct
*napi
, int budget
)
1174 struct igbvf_ring
*rx_ring
= container_of(napi
, struct igbvf_ring
, napi
);
1175 struct igbvf_adapter
*adapter
= rx_ring
->adapter
;
1176 struct e1000_hw
*hw
= &adapter
->hw
;
1179 igbvf_clean_rx_irq(adapter
, &work_done
, budget
);
1181 /* If not enough Rx work done, exit the polling mode */
1182 if (work_done
< budget
) {
1183 napi_complete(napi
);
1185 if (adapter
->itr_setting
& 3)
1186 igbvf_set_itr(adapter
);
1188 if (!test_bit(__IGBVF_DOWN
, &adapter
->state
))
1189 ew32(EIMS
, adapter
->rx_ring
->eims_value
);
1196 * igbvf_set_rlpml - set receive large packet maximum length
1197 * @adapter: board private structure
1199 * Configure the maximum size of packets that will be received
1201 static void igbvf_set_rlpml(struct igbvf_adapter
*adapter
)
1203 int max_frame_size
= adapter
->max_frame_size
;
1204 struct e1000_hw
*hw
= &adapter
->hw
;
1207 max_frame_size
+= VLAN_TAG_SIZE
;
1209 e1000_rlpml_set_vf(hw
, max_frame_size
);
1212 static void igbvf_vlan_rx_add_vid(struct net_device
*netdev
, u16 vid
)
1214 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
1215 struct e1000_hw
*hw
= &adapter
->hw
;
1217 if (hw
->mac
.ops
.set_vfta(hw
, vid
, true))
1218 dev_err(&adapter
->pdev
->dev
, "Failed to add vlan id %d\n", vid
);
1221 static void igbvf_vlan_rx_kill_vid(struct net_device
*netdev
, u16 vid
)
1223 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
1224 struct e1000_hw
*hw
= &adapter
->hw
;
1226 igbvf_irq_disable(adapter
);
1227 vlan_group_set_device(adapter
->vlgrp
, vid
, NULL
);
1229 if (!test_bit(__IGBVF_DOWN
, &adapter
->state
))
1230 igbvf_irq_enable(adapter
);
1232 if (hw
->mac
.ops
.set_vfta(hw
, vid
, false))
1233 dev_err(&adapter
->pdev
->dev
,
1234 "Failed to remove vlan id %d\n", vid
);
1237 static void igbvf_vlan_rx_register(struct net_device
*netdev
,
1238 struct vlan_group
*grp
)
1240 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
1242 adapter
->vlgrp
= grp
;
1245 static void igbvf_restore_vlan(struct igbvf_adapter
*adapter
)
1249 if (!adapter
->vlgrp
)
1252 for (vid
= 0; vid
< VLAN_GROUP_ARRAY_LEN
; vid
++) {
1253 if (!vlan_group_get_device(adapter
->vlgrp
, vid
))
1255 igbvf_vlan_rx_add_vid(adapter
->netdev
, vid
);
1258 igbvf_set_rlpml(adapter
);
1262 * igbvf_configure_tx - Configure Transmit Unit after Reset
1263 * @adapter: board private structure
1265 * Configure the Tx unit of the MAC after a reset.
1267 static void igbvf_configure_tx(struct igbvf_adapter
*adapter
)
1269 struct e1000_hw
*hw
= &adapter
->hw
;
1270 struct igbvf_ring
*tx_ring
= adapter
->tx_ring
;
1272 u32 txdctl
, dca_txctrl
;
1274 /* disable transmits */
1275 txdctl
= er32(TXDCTL(0));
1276 ew32(TXDCTL(0), txdctl
& ~E1000_TXDCTL_QUEUE_ENABLE
);
1279 /* Setup the HW Tx Head and Tail descriptor pointers */
1280 ew32(TDLEN(0), tx_ring
->count
* sizeof(union e1000_adv_tx_desc
));
1281 tdba
= tx_ring
->dma
;
1282 ew32(TDBAL(0), (tdba
& DMA_32BIT_MASK
));
1283 ew32(TDBAH(0), (tdba
>> 32));
1286 tx_ring
->head
= E1000_TDH(0);
1287 tx_ring
->tail
= E1000_TDT(0);
1289 /* Turn off Relaxed Ordering on head write-backs. The writebacks
1290 * MUST be delivered in order or it will completely screw up
1293 dca_txctrl
= er32(DCA_TXCTRL(0));
1294 dca_txctrl
&= ~E1000_DCA_TXCTRL_TX_WB_RO_EN
;
1295 ew32(DCA_TXCTRL(0), dca_txctrl
);
1297 /* enable transmits */
1298 txdctl
|= E1000_TXDCTL_QUEUE_ENABLE
;
1299 ew32(TXDCTL(0), txdctl
);
1301 /* Setup Transmit Descriptor Settings for eop descriptor */
1302 adapter
->txd_cmd
= E1000_ADVTXD_DCMD_EOP
| E1000_ADVTXD_DCMD_IFCS
;
1304 /* enable Report Status bit */
1305 adapter
->txd_cmd
|= E1000_ADVTXD_DCMD_RS
;
1307 adapter
->tx_queue_len
= adapter
->netdev
->tx_queue_len
;
1311 * igbvf_setup_srrctl - configure the receive control registers
1312 * @adapter: Board private structure
1314 static void igbvf_setup_srrctl(struct igbvf_adapter
*adapter
)
1316 struct e1000_hw
*hw
= &adapter
->hw
;
1319 srrctl
&= ~(E1000_SRRCTL_DESCTYPE_MASK
|
1320 E1000_SRRCTL_BSIZEHDR_MASK
|
1321 E1000_SRRCTL_BSIZEPKT_MASK
);
1323 /* Enable queue drop to avoid head of line blocking */
1324 srrctl
|= E1000_SRRCTL_DROP_EN
;
1326 /* Setup buffer sizes */
1327 srrctl
|= ALIGN(adapter
->rx_buffer_len
, 1024) >>
1328 E1000_SRRCTL_BSIZEPKT_SHIFT
;
1330 if (adapter
->rx_buffer_len
< 2048) {
1331 adapter
->rx_ps_hdr_size
= 0;
1332 srrctl
|= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF
;
1334 adapter
->rx_ps_hdr_size
= 128;
1335 srrctl
|= adapter
->rx_ps_hdr_size
<<
1336 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT
;
1337 srrctl
|= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS
;
1340 ew32(SRRCTL(0), srrctl
);
1344 * igbvf_configure_rx - Configure Receive Unit after Reset
1345 * @adapter: board private structure
1347 * Configure the Rx unit of the MAC after a reset.
1349 static void igbvf_configure_rx(struct igbvf_adapter
*adapter
)
1351 struct e1000_hw
*hw
= &adapter
->hw
;
1352 struct igbvf_ring
*rx_ring
= adapter
->rx_ring
;
1356 /* disable receives */
1357 rxdctl
= er32(RXDCTL(0));
1358 ew32(RXDCTL(0), rxdctl
& ~E1000_RXDCTL_QUEUE_ENABLE
);
1361 rdlen
= rx_ring
->count
* sizeof(union e1000_adv_rx_desc
);
1364 * Setup the HW Rx Head and Tail Descriptor Pointers and
1365 * the Base and Length of the Rx Descriptor Ring
1367 rdba
= rx_ring
->dma
;
1368 ew32(RDBAL(0), (rdba
& DMA_32BIT_MASK
));
1369 ew32(RDBAH(0), (rdba
>> 32));
1370 ew32(RDLEN(0), rx_ring
->count
* sizeof(union e1000_adv_rx_desc
));
1371 rx_ring
->head
= E1000_RDH(0);
1372 rx_ring
->tail
= E1000_RDT(0);
1376 rxdctl
|= E1000_RXDCTL_QUEUE_ENABLE
;
1377 rxdctl
&= 0xFFF00000;
1378 rxdctl
|= IGBVF_RX_PTHRESH
;
1379 rxdctl
|= IGBVF_RX_HTHRESH
<< 8;
1380 rxdctl
|= IGBVF_RX_WTHRESH
<< 16;
1382 igbvf_set_rlpml(adapter
);
1384 /* enable receives */
1385 ew32(RXDCTL(0), rxdctl
);
1389 * igbvf_set_multi - Multicast and Promiscuous mode set
1390 * @netdev: network interface device structure
1392 * The set_multi entry point is called whenever the multicast address
1393 * list or the network interface flags are updated. This routine is
1394 * responsible for configuring the hardware for proper multicast,
1395 * promiscuous mode, and all-multi behavior.
1397 static void igbvf_set_multi(struct net_device
*netdev
)
1399 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
1400 struct e1000_hw
*hw
= &adapter
->hw
;
1401 struct dev_mc_list
*mc_ptr
;
1402 u8
*mta_list
= NULL
;
1405 if (netdev
->mc_count
) {
1406 mta_list
= kmalloc(netdev
->mc_count
* 6, GFP_ATOMIC
);
1408 dev_err(&adapter
->pdev
->dev
,
1409 "failed to allocate multicast filter list\n");
1414 /* prepare a packed array of only addresses. */
1415 mc_ptr
= netdev
->mc_list
;
1417 for (i
= 0; i
< netdev
->mc_count
; i
++) {
1420 memcpy(mta_list
+ (i
*ETH_ALEN
), mc_ptr
->dmi_addr
,
1422 mc_ptr
= mc_ptr
->next
;
1425 hw
->mac
.ops
.update_mc_addr_list(hw
, mta_list
, i
, 0, 0);
1430 * igbvf_configure - configure the hardware for Rx and Tx
1431 * @adapter: private board structure
1433 static void igbvf_configure(struct igbvf_adapter
*adapter
)
1435 igbvf_set_multi(adapter
->netdev
);
1437 igbvf_restore_vlan(adapter
);
1439 igbvf_configure_tx(adapter
);
1440 igbvf_setup_srrctl(adapter
);
1441 igbvf_configure_rx(adapter
);
1442 igbvf_alloc_rx_buffers(adapter
->rx_ring
,
1443 igbvf_desc_unused(adapter
->rx_ring
));
1446 /* igbvf_reset - bring the hardware into a known good state
1448 * This function boots the hardware and enables some settings that
1449 * require a configuration cycle of the hardware - those cannot be
1450 * set/changed during runtime. After reset the device needs to be
1451 * properly configured for Rx, Tx etc.
1453 static void igbvf_reset(struct igbvf_adapter
*adapter
)
1455 struct e1000_mac_info
*mac
= &adapter
->hw
.mac
;
1456 struct net_device
*netdev
= adapter
->netdev
;
1457 struct e1000_hw
*hw
= &adapter
->hw
;
1459 /* Allow time for pending master requests to run */
1460 if (mac
->ops
.reset_hw(hw
))
1461 dev_err(&adapter
->pdev
->dev
, "PF still resetting\n");
1463 mac
->ops
.init_hw(hw
);
1465 if (is_valid_ether_addr(adapter
->hw
.mac
.addr
)) {
1466 memcpy(netdev
->dev_addr
, adapter
->hw
.mac
.addr
,
1468 memcpy(netdev
->perm_addr
, adapter
->hw
.mac
.addr
,
1473 int igbvf_up(struct igbvf_adapter
*adapter
)
1475 struct e1000_hw
*hw
= &adapter
->hw
;
1477 /* hardware has been reset, we need to reload some things */
1478 igbvf_configure(adapter
);
1480 clear_bit(__IGBVF_DOWN
, &adapter
->state
);
1482 napi_enable(&adapter
->rx_ring
->napi
);
1483 if (adapter
->msix_entries
)
1484 igbvf_configure_msix(adapter
);
1486 /* Clear any pending interrupts. */
1488 igbvf_irq_enable(adapter
);
1490 /* start the watchdog */
1491 hw
->mac
.get_link_status
= 1;
1492 mod_timer(&adapter
->watchdog_timer
, jiffies
+ 1);
1498 void igbvf_down(struct igbvf_adapter
*adapter
)
1500 struct net_device
*netdev
= adapter
->netdev
;
1501 struct e1000_hw
*hw
= &adapter
->hw
;
1505 * signal that we're down so the interrupt handler does not
1506 * reschedule our watchdog timer
1508 set_bit(__IGBVF_DOWN
, &adapter
->state
);
1510 /* disable receives in the hardware */
1511 rxdctl
= er32(RXDCTL(0));
1512 ew32(RXDCTL(0), rxdctl
& ~E1000_RXDCTL_QUEUE_ENABLE
);
1514 netif_stop_queue(netdev
);
1516 /* disable transmits in the hardware */
1517 txdctl
= er32(TXDCTL(0));
1518 ew32(TXDCTL(0), txdctl
& ~E1000_TXDCTL_QUEUE_ENABLE
);
1520 /* flush both disables and wait for them to finish */
1524 napi_disable(&adapter
->rx_ring
->napi
);
1526 igbvf_irq_disable(adapter
);
1528 del_timer_sync(&adapter
->watchdog_timer
);
1530 netdev
->tx_queue_len
= adapter
->tx_queue_len
;
1531 netif_carrier_off(netdev
);
1533 /* record the stats before reset*/
1534 igbvf_update_stats(adapter
);
1536 adapter
->link_speed
= 0;
1537 adapter
->link_duplex
= 0;
1539 igbvf_reset(adapter
);
1540 igbvf_clean_tx_ring(adapter
->tx_ring
);
1541 igbvf_clean_rx_ring(adapter
->rx_ring
);
1544 void igbvf_reinit_locked(struct igbvf_adapter
*adapter
)
1547 while (test_and_set_bit(__IGBVF_RESETTING
, &adapter
->state
))
1549 igbvf_down(adapter
);
1551 clear_bit(__IGBVF_RESETTING
, &adapter
->state
);
1555 * igbvf_sw_init - Initialize general software structures (struct igbvf_adapter)
1556 * @adapter: board private structure to initialize
1558 * igbvf_sw_init initializes the Adapter private data structure.
1559 * Fields are initialized based on PCI device information and
1560 * OS network device settings (MTU size).
1562 static int __devinit
igbvf_sw_init(struct igbvf_adapter
*adapter
)
1564 struct net_device
*netdev
= adapter
->netdev
;
1567 adapter
->rx_buffer_len
= ETH_FRAME_LEN
+ VLAN_HLEN
+ ETH_FCS_LEN
;
1568 adapter
->rx_ps_hdr_size
= 0;
1569 adapter
->max_frame_size
= netdev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
;
1570 adapter
->min_frame_size
= ETH_ZLEN
+ ETH_FCS_LEN
;
1572 adapter
->tx_int_delay
= 8;
1573 adapter
->tx_abs_int_delay
= 32;
1574 adapter
->rx_int_delay
= 0;
1575 adapter
->rx_abs_int_delay
= 8;
1576 adapter
->itr_setting
= 3;
1577 adapter
->itr
= 20000;
1579 /* Set various function pointers */
1580 adapter
->ei
->init_ops(&adapter
->hw
);
1582 rc
= adapter
->hw
.mac
.ops
.init_params(&adapter
->hw
);
1586 rc
= adapter
->hw
.mbx
.ops
.init_params(&adapter
->hw
);
1590 igbvf_set_interrupt_capability(adapter
);
1592 if (igbvf_alloc_queues(adapter
))
1595 spin_lock_init(&adapter
->tx_queue_lock
);
1597 /* Explicitly disable IRQ since the NIC can be in any state. */
1598 igbvf_irq_disable(adapter
);
1600 spin_lock_init(&adapter
->stats_lock
);
1602 set_bit(__IGBVF_DOWN
, &adapter
->state
);
1606 static void igbvf_initialize_last_counter_stats(struct igbvf_adapter
*adapter
)
1608 struct e1000_hw
*hw
= &adapter
->hw
;
1610 adapter
->stats
.last_gprc
= er32(VFGPRC
);
1611 adapter
->stats
.last_gorc
= er32(VFGORC
);
1612 adapter
->stats
.last_gptc
= er32(VFGPTC
);
1613 adapter
->stats
.last_gotc
= er32(VFGOTC
);
1614 adapter
->stats
.last_mprc
= er32(VFMPRC
);
1615 adapter
->stats
.last_gotlbc
= er32(VFGOTLBC
);
1616 adapter
->stats
.last_gptlbc
= er32(VFGPTLBC
);
1617 adapter
->stats
.last_gorlbc
= er32(VFGORLBC
);
1618 adapter
->stats
.last_gprlbc
= er32(VFGPRLBC
);
1620 adapter
->stats
.base_gprc
= er32(VFGPRC
);
1621 adapter
->stats
.base_gorc
= er32(VFGORC
);
1622 adapter
->stats
.base_gptc
= er32(VFGPTC
);
1623 adapter
->stats
.base_gotc
= er32(VFGOTC
);
1624 adapter
->stats
.base_mprc
= er32(VFMPRC
);
1625 adapter
->stats
.base_gotlbc
= er32(VFGOTLBC
);
1626 adapter
->stats
.base_gptlbc
= er32(VFGPTLBC
);
1627 adapter
->stats
.base_gorlbc
= er32(VFGORLBC
);
1628 adapter
->stats
.base_gprlbc
= er32(VFGPRLBC
);
1632 * igbvf_open - Called when a network interface is made active
1633 * @netdev: network interface device structure
1635 * Returns 0 on success, negative value on failure
1637 * The open entry point is called when a network interface is made
1638 * active by the system (IFF_UP). At this point all resources needed
1639 * for transmit and receive operations are allocated, the interrupt
1640 * handler is registered with the OS, the watchdog timer is started,
1641 * and the stack is notified that the interface is ready.
1643 static int igbvf_open(struct net_device
*netdev
)
1645 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
1646 struct e1000_hw
*hw
= &adapter
->hw
;
1649 /* disallow open during test */
1650 if (test_bit(__IGBVF_TESTING
, &adapter
->state
))
1653 /* allocate transmit descriptors */
1654 err
= igbvf_setup_tx_resources(adapter
, adapter
->tx_ring
);
1658 /* allocate receive descriptors */
1659 err
= igbvf_setup_rx_resources(adapter
, adapter
->rx_ring
);
1664 * before we allocate an interrupt, we must be ready to handle it.
1665 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1666 * as soon as we call pci_request_irq, so we have to setup our
1667 * clean_rx handler before we do so.
1669 igbvf_configure(adapter
);
1671 err
= igbvf_request_irq(adapter
);
1675 /* From here on the code is the same as igbvf_up() */
1676 clear_bit(__IGBVF_DOWN
, &adapter
->state
);
1678 napi_enable(&adapter
->rx_ring
->napi
);
1680 /* clear any pending interrupts */
1683 igbvf_irq_enable(adapter
);
1685 /* start the watchdog */
1686 hw
->mac
.get_link_status
= 1;
1687 mod_timer(&adapter
->watchdog_timer
, jiffies
+ 1);
1692 igbvf_free_rx_resources(adapter
->rx_ring
);
1694 igbvf_free_tx_resources(adapter
->tx_ring
);
1696 igbvf_reset(adapter
);
1702 * igbvf_close - Disables a network interface
1703 * @netdev: network interface device structure
1705 * Returns 0, this is not allowed to fail
1707 * The close entry point is called when an interface is de-activated
1708 * by the OS. The hardware is still under the drivers control, but
1709 * needs to be disabled. A global MAC reset is issued to stop the
1710 * hardware, and all transmit and receive resources are freed.
1712 static int igbvf_close(struct net_device
*netdev
)
1714 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
1716 WARN_ON(test_bit(__IGBVF_RESETTING
, &adapter
->state
));
1717 igbvf_down(adapter
);
1719 igbvf_free_irq(adapter
);
1721 igbvf_free_tx_resources(adapter
->tx_ring
);
1722 igbvf_free_rx_resources(adapter
->rx_ring
);
1727 * igbvf_set_mac - Change the Ethernet Address of the NIC
1728 * @netdev: network interface device structure
1729 * @p: pointer to an address structure
1731 * Returns 0 on success, negative on failure
1733 static int igbvf_set_mac(struct net_device
*netdev
, void *p
)
1735 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
1736 struct e1000_hw
*hw
= &adapter
->hw
;
1737 struct sockaddr
*addr
= p
;
1739 if (!is_valid_ether_addr(addr
->sa_data
))
1740 return -EADDRNOTAVAIL
;
1742 memcpy(hw
->mac
.addr
, addr
->sa_data
, netdev
->addr_len
);
1744 hw
->mac
.ops
.rar_set(hw
, hw
->mac
.addr
, 0);
1746 if (memcmp(addr
->sa_data
, hw
->mac
.addr
, 6))
1747 return -EADDRNOTAVAIL
;
1749 memcpy(netdev
->dev_addr
, addr
->sa_data
, netdev
->addr_len
);
1754 #define UPDATE_VF_COUNTER(reg, name) \
1756 u32 current_counter = er32(reg); \
1757 if (current_counter < adapter->stats.last_##name) \
1758 adapter->stats.name += 0x100000000LL; \
1759 adapter->stats.last_##name = current_counter; \
1760 adapter->stats.name &= 0xFFFFFFFF00000000LL; \
1761 adapter->stats.name |= current_counter; \
1765 * igbvf_update_stats - Update the board statistics counters
1766 * @adapter: board private structure
1768 void igbvf_update_stats(struct igbvf_adapter
*adapter
)
1770 struct e1000_hw
*hw
= &adapter
->hw
;
1771 struct pci_dev
*pdev
= adapter
->pdev
;
1774 * Prevent stats update while adapter is being reset, link is down
1775 * or if the pci connection is down.
1777 if (adapter
->link_speed
== 0)
1780 if (test_bit(__IGBVF_RESETTING
, &adapter
->state
))
1783 if (pci_channel_offline(pdev
))
1786 UPDATE_VF_COUNTER(VFGPRC
, gprc
);
1787 UPDATE_VF_COUNTER(VFGORC
, gorc
);
1788 UPDATE_VF_COUNTER(VFGPTC
, gptc
);
1789 UPDATE_VF_COUNTER(VFGOTC
, gotc
);
1790 UPDATE_VF_COUNTER(VFMPRC
, mprc
);
1791 UPDATE_VF_COUNTER(VFGOTLBC
, gotlbc
);
1792 UPDATE_VF_COUNTER(VFGPTLBC
, gptlbc
);
1793 UPDATE_VF_COUNTER(VFGORLBC
, gorlbc
);
1794 UPDATE_VF_COUNTER(VFGPRLBC
, gprlbc
);
1796 /* Fill out the OS statistics structure */
1797 adapter
->net_stats
.multicast
= adapter
->stats
.mprc
;
1800 static void igbvf_print_link_info(struct igbvf_adapter
*adapter
)
1802 dev_info(&adapter
->pdev
->dev
, "Link is Up %d Mbps %s\n",
1803 adapter
->link_speed
,
1804 ((adapter
->link_duplex
== FULL_DUPLEX
) ?
1805 "Full Duplex" : "Half Duplex"));
1808 static bool igbvf_has_link(struct igbvf_adapter
*adapter
)
1810 struct e1000_hw
*hw
= &adapter
->hw
;
1811 s32 ret_val
= E1000_SUCCESS
;
1814 ret_val
= hw
->mac
.ops
.check_for_link(hw
);
1815 link_active
= !hw
->mac
.get_link_status
;
1817 /* if check for link returns error we will need to reset */
1819 schedule_work(&adapter
->reset_task
);
1825 * igbvf_watchdog - Timer Call-back
1826 * @data: pointer to adapter cast into an unsigned long
1828 static void igbvf_watchdog(unsigned long data
)
1830 struct igbvf_adapter
*adapter
= (struct igbvf_adapter
*) data
;
1832 /* Do the rest outside of interrupt context */
1833 schedule_work(&adapter
->watchdog_task
);
1836 static void igbvf_watchdog_task(struct work_struct
*work
)
1838 struct igbvf_adapter
*adapter
= container_of(work
,
1839 struct igbvf_adapter
,
1841 struct net_device
*netdev
= adapter
->netdev
;
1842 struct e1000_mac_info
*mac
= &adapter
->hw
.mac
;
1843 struct igbvf_ring
*tx_ring
= adapter
->tx_ring
;
1844 struct e1000_hw
*hw
= &adapter
->hw
;
1848 link
= igbvf_has_link(adapter
);
1851 if (!netif_carrier_ok(netdev
)) {
1854 mac
->ops
.get_link_up_info(&adapter
->hw
,
1855 &adapter
->link_speed
,
1856 &adapter
->link_duplex
);
1857 igbvf_print_link_info(adapter
);
1860 * tweak tx_queue_len according to speed/duplex
1861 * and adjust the timeout factor
1863 netdev
->tx_queue_len
= adapter
->tx_queue_len
;
1864 adapter
->tx_timeout_factor
= 1;
1865 switch (adapter
->link_speed
) {
1868 netdev
->tx_queue_len
= 10;
1869 adapter
->tx_timeout_factor
= 16;
1873 netdev
->tx_queue_len
= 100;
1874 /* maybe add some timeout factor ? */
1878 netif_carrier_on(netdev
);
1879 netif_wake_queue(netdev
);
1882 if (netif_carrier_ok(netdev
)) {
1883 adapter
->link_speed
= 0;
1884 adapter
->link_duplex
= 0;
1885 dev_info(&adapter
->pdev
->dev
, "Link is Down\n");
1886 netif_carrier_off(netdev
);
1887 netif_stop_queue(netdev
);
1891 if (netif_carrier_ok(netdev
)) {
1892 igbvf_update_stats(adapter
);
1894 tx_pending
= (igbvf_desc_unused(tx_ring
) + 1 <
1898 * We've lost link, so the controller stops DMA,
1899 * but we've got queued Tx work that's never going
1900 * to get done, so reset controller to flush Tx.
1901 * (Do the reset outside of interrupt context).
1903 adapter
->tx_timeout_count
++;
1904 schedule_work(&adapter
->reset_task
);
1908 /* Cause software interrupt to ensure Rx ring is cleaned */
1909 ew32(EICS
, adapter
->rx_ring
->eims_value
);
1911 /* Force detection of hung controller every watchdog period */
1912 adapter
->detect_tx_hung
= 1;
1914 /* Reset the timer */
1915 if (!test_bit(__IGBVF_DOWN
, &adapter
->state
))
1916 mod_timer(&adapter
->watchdog_timer
,
1917 round_jiffies(jiffies
+ (2 * HZ
)));
1920 #define IGBVF_TX_FLAGS_CSUM 0x00000001
1921 #define IGBVF_TX_FLAGS_VLAN 0x00000002
1922 #define IGBVF_TX_FLAGS_TSO 0x00000004
1923 #define IGBVF_TX_FLAGS_IPV4 0x00000008
1924 #define IGBVF_TX_FLAGS_VLAN_MASK 0xffff0000
1925 #define IGBVF_TX_FLAGS_VLAN_SHIFT 16
1927 static int igbvf_tso(struct igbvf_adapter
*adapter
,
1928 struct igbvf_ring
*tx_ring
,
1929 struct sk_buff
*skb
, u32 tx_flags
, u8
*hdr_len
)
1931 struct e1000_adv_tx_context_desc
*context_desc
;
1934 struct igbvf_buffer
*buffer_info
;
1935 u32 info
= 0, tu_cmd
= 0;
1936 u32 mss_l4len_idx
, l4len
;
1939 if (skb_header_cloned(skb
)) {
1940 err
= pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
);
1942 dev_err(&adapter
->pdev
->dev
,
1943 "igbvf_tso returning an error\n");
1948 l4len
= tcp_hdrlen(skb
);
1951 if (skb
->protocol
== htons(ETH_P_IP
)) {
1952 struct iphdr
*iph
= ip_hdr(skb
);
1955 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(iph
->saddr
,
1959 } else if (skb_shinfo(skb
)->gso_type
== SKB_GSO_TCPV6
) {
1960 ipv6_hdr(skb
)->payload_len
= 0;
1961 tcp_hdr(skb
)->check
= ~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
1962 &ipv6_hdr(skb
)->daddr
,
1966 i
= tx_ring
->next_to_use
;
1968 buffer_info
= &tx_ring
->buffer_info
[i
];
1969 context_desc
= IGBVF_TX_CTXTDESC_ADV(*tx_ring
, i
);
1970 /* VLAN MACLEN IPLEN */
1971 if (tx_flags
& IGBVF_TX_FLAGS_VLAN
)
1972 info
|= (tx_flags
& IGBVF_TX_FLAGS_VLAN_MASK
);
1973 info
|= (skb_network_offset(skb
) << E1000_ADVTXD_MACLEN_SHIFT
);
1974 *hdr_len
+= skb_network_offset(skb
);
1975 info
|= (skb_transport_header(skb
) - skb_network_header(skb
));
1976 *hdr_len
+= (skb_transport_header(skb
) - skb_network_header(skb
));
1977 context_desc
->vlan_macip_lens
= cpu_to_le32(info
);
1979 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
1980 tu_cmd
|= (E1000_TXD_CMD_DEXT
| E1000_ADVTXD_DTYP_CTXT
);
1982 if (skb
->protocol
== htons(ETH_P_IP
))
1983 tu_cmd
|= E1000_ADVTXD_TUCMD_IPV4
;
1984 tu_cmd
|= E1000_ADVTXD_TUCMD_L4T_TCP
;
1986 context_desc
->type_tucmd_mlhl
= cpu_to_le32(tu_cmd
);
1989 mss_l4len_idx
= (skb_shinfo(skb
)->gso_size
<< E1000_ADVTXD_MSS_SHIFT
);
1990 mss_l4len_idx
|= (l4len
<< E1000_ADVTXD_L4LEN_SHIFT
);
1992 context_desc
->mss_l4len_idx
= cpu_to_le32(mss_l4len_idx
);
1993 context_desc
->seqnum_seed
= 0;
1995 buffer_info
->time_stamp
= jiffies
;
1996 buffer_info
->next_to_watch
= i
;
1997 buffer_info
->dma
= 0;
1999 if (i
== tx_ring
->count
)
2002 tx_ring
->next_to_use
= i
;
2007 static inline bool igbvf_tx_csum(struct igbvf_adapter
*adapter
,
2008 struct igbvf_ring
*tx_ring
,
2009 struct sk_buff
*skb
, u32 tx_flags
)
2011 struct e1000_adv_tx_context_desc
*context_desc
;
2013 struct igbvf_buffer
*buffer_info
;
2014 u32 info
= 0, tu_cmd
= 0;
2016 if ((skb
->ip_summed
== CHECKSUM_PARTIAL
) ||
2017 (tx_flags
& IGBVF_TX_FLAGS_VLAN
)) {
2018 i
= tx_ring
->next_to_use
;
2019 buffer_info
= &tx_ring
->buffer_info
[i
];
2020 context_desc
= IGBVF_TX_CTXTDESC_ADV(*tx_ring
, i
);
2022 if (tx_flags
& IGBVF_TX_FLAGS_VLAN
)
2023 info
|= (tx_flags
& IGBVF_TX_FLAGS_VLAN_MASK
);
2025 info
|= (skb_network_offset(skb
) << E1000_ADVTXD_MACLEN_SHIFT
);
2026 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
2027 info
|= (skb_transport_header(skb
) -
2028 skb_network_header(skb
));
2031 context_desc
->vlan_macip_lens
= cpu_to_le32(info
);
2033 tu_cmd
|= (E1000_TXD_CMD_DEXT
| E1000_ADVTXD_DTYP_CTXT
);
2035 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
2036 switch (skb
->protocol
) {
2037 case __constant_htons(ETH_P_IP
):
2038 tu_cmd
|= E1000_ADVTXD_TUCMD_IPV4
;
2039 if (ip_hdr(skb
)->protocol
== IPPROTO_TCP
)
2040 tu_cmd
|= E1000_ADVTXD_TUCMD_L4T_TCP
;
2042 case __constant_htons(ETH_P_IPV6
):
2043 if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_TCP
)
2044 tu_cmd
|= E1000_ADVTXD_TUCMD_L4T_TCP
;
2051 context_desc
->type_tucmd_mlhl
= cpu_to_le32(tu_cmd
);
2052 context_desc
->seqnum_seed
= 0;
2053 context_desc
->mss_l4len_idx
= 0;
2055 buffer_info
->time_stamp
= jiffies
;
2056 buffer_info
->next_to_watch
= i
;
2057 buffer_info
->dma
= 0;
2059 if (i
== tx_ring
->count
)
2061 tx_ring
->next_to_use
= i
;
2069 static int igbvf_maybe_stop_tx(struct net_device
*netdev
, int size
)
2071 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
2073 /* there is enough descriptors then we don't need to worry */
2074 if (igbvf_desc_unused(adapter
->tx_ring
) >= size
)
2077 netif_stop_queue(netdev
);
2081 /* We need to check again just in case room has been made available */
2082 if (igbvf_desc_unused(adapter
->tx_ring
) < size
)
2085 netif_wake_queue(netdev
);
2087 ++adapter
->restart_queue
;
2091 #define IGBVF_MAX_TXD_PWR 16
2092 #define IGBVF_MAX_DATA_PER_TXD (1 << IGBVF_MAX_TXD_PWR)
2094 static inline int igbvf_tx_map_adv(struct igbvf_adapter
*adapter
,
2095 struct igbvf_ring
*tx_ring
,
2096 struct sk_buff
*skb
,
2099 struct igbvf_buffer
*buffer_info
;
2100 unsigned int len
= skb_headlen(skb
);
2101 unsigned int count
= 0, i
;
2105 i
= tx_ring
->next_to_use
;
2107 if (skb_dma_map(&adapter
->pdev
->dev
, skb
, DMA_TO_DEVICE
)) {
2108 dev_err(&adapter
->pdev
->dev
, "TX DMA map failed\n");
2112 map
= skb_shinfo(skb
)->dma_maps
;
2114 buffer_info
= &tx_ring
->buffer_info
[i
];
2115 BUG_ON(len
>= IGBVF_MAX_DATA_PER_TXD
);
2116 buffer_info
->length
= len
;
2117 /* set time_stamp *before* dma to help avoid a possible race */
2118 buffer_info
->time_stamp
= jiffies
;
2119 buffer_info
->next_to_watch
= i
;
2120 buffer_info
->dma
= map
[count
];
2123 for (f
= 0; f
< skb_shinfo(skb
)->nr_frags
; f
++) {
2124 struct skb_frag_struct
*frag
;
2127 if (i
== tx_ring
->count
)
2130 frag
= &skb_shinfo(skb
)->frags
[f
];
2133 buffer_info
= &tx_ring
->buffer_info
[i
];
2134 BUG_ON(len
>= IGBVF_MAX_DATA_PER_TXD
);
2135 buffer_info
->length
= len
;
2136 buffer_info
->time_stamp
= jiffies
;
2137 buffer_info
->next_to_watch
= i
;
2138 buffer_info
->dma
= map
[count
];
2142 tx_ring
->buffer_info
[i
].skb
= skb
;
2143 tx_ring
->buffer_info
[first
].next_to_watch
= i
;
2148 static inline void igbvf_tx_queue_adv(struct igbvf_adapter
*adapter
,
2149 struct igbvf_ring
*tx_ring
,
2150 int tx_flags
, int count
, u32 paylen
,
2153 union e1000_adv_tx_desc
*tx_desc
= NULL
;
2154 struct igbvf_buffer
*buffer_info
;
2155 u32 olinfo_status
= 0, cmd_type_len
;
2158 cmd_type_len
= (E1000_ADVTXD_DTYP_DATA
| E1000_ADVTXD_DCMD_IFCS
|
2159 E1000_ADVTXD_DCMD_DEXT
);
2161 if (tx_flags
& IGBVF_TX_FLAGS_VLAN
)
2162 cmd_type_len
|= E1000_ADVTXD_DCMD_VLE
;
2164 if (tx_flags
& IGBVF_TX_FLAGS_TSO
) {
2165 cmd_type_len
|= E1000_ADVTXD_DCMD_TSE
;
2167 /* insert tcp checksum */
2168 olinfo_status
|= E1000_TXD_POPTS_TXSM
<< 8;
2170 /* insert ip checksum */
2171 if (tx_flags
& IGBVF_TX_FLAGS_IPV4
)
2172 olinfo_status
|= E1000_TXD_POPTS_IXSM
<< 8;
2174 } else if (tx_flags
& IGBVF_TX_FLAGS_CSUM
) {
2175 olinfo_status
|= E1000_TXD_POPTS_TXSM
<< 8;
2178 olinfo_status
|= ((paylen
- hdr_len
) << E1000_ADVTXD_PAYLEN_SHIFT
);
2180 i
= tx_ring
->next_to_use
;
2182 buffer_info
= &tx_ring
->buffer_info
[i
];
2183 tx_desc
= IGBVF_TX_DESC_ADV(*tx_ring
, i
);
2184 tx_desc
->read
.buffer_addr
= cpu_to_le64(buffer_info
->dma
);
2185 tx_desc
->read
.cmd_type_len
=
2186 cpu_to_le32(cmd_type_len
| buffer_info
->length
);
2187 tx_desc
->read
.olinfo_status
= cpu_to_le32(olinfo_status
);
2189 if (i
== tx_ring
->count
)
2193 tx_desc
->read
.cmd_type_len
|= cpu_to_le32(adapter
->txd_cmd
);
2194 /* Force memory writes to complete before letting h/w
2195 * know there are new descriptors to fetch. (Only
2196 * applicable for weak-ordered memory model archs,
2197 * such as IA-64). */
2200 tx_ring
->next_to_use
= i
;
2201 writel(i
, adapter
->hw
.hw_addr
+ tx_ring
->tail
);
2202 /* we need this if more than one processor can write to our tail
2203 * at a time, it syncronizes IO on IA64/Altix systems */
2207 static int igbvf_xmit_frame_ring_adv(struct sk_buff
*skb
,
2208 struct net_device
*netdev
,
2209 struct igbvf_ring
*tx_ring
)
2211 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
2212 unsigned int first
, tx_flags
= 0;
2217 if (test_bit(__IGBVF_DOWN
, &adapter
->state
)) {
2218 dev_kfree_skb_any(skb
);
2219 return NETDEV_TX_OK
;
2222 if (skb
->len
<= 0) {
2223 dev_kfree_skb_any(skb
);
2224 return NETDEV_TX_OK
;
2228 * need: count + 4 desc gap to keep tail from touching
2229 * + 2 desc gap to keep tail from touching head,
2230 * + 1 desc for skb->data,
2231 * + 1 desc for context descriptor,
2232 * head, otherwise try next time
2234 if (igbvf_maybe_stop_tx(netdev
, skb_shinfo(skb
)->nr_frags
+ 4)) {
2235 /* this is a hard error */
2236 return NETDEV_TX_BUSY
;
2239 if (adapter
->vlgrp
&& vlan_tx_tag_present(skb
)) {
2240 tx_flags
|= IGBVF_TX_FLAGS_VLAN
;
2241 tx_flags
|= (vlan_tx_tag_get(skb
) << IGBVF_TX_FLAGS_VLAN_SHIFT
);
2244 if (skb
->protocol
== htons(ETH_P_IP
))
2245 tx_flags
|= IGBVF_TX_FLAGS_IPV4
;
2247 first
= tx_ring
->next_to_use
;
2249 tso
= skb_is_gso(skb
) ?
2250 igbvf_tso(adapter
, tx_ring
, skb
, tx_flags
, &hdr_len
) : 0;
2251 if (unlikely(tso
< 0)) {
2252 dev_kfree_skb_any(skb
);
2253 return NETDEV_TX_OK
;
2257 tx_flags
|= IGBVF_TX_FLAGS_TSO
;
2258 else if (igbvf_tx_csum(adapter
, tx_ring
, skb
, tx_flags
) &&
2259 (skb
->ip_summed
== CHECKSUM_PARTIAL
))
2260 tx_flags
|= IGBVF_TX_FLAGS_CSUM
;
2263 * count reflects descriptors mapped, if 0 then mapping error
2264 * has occured and we need to rewind the descriptor queue
2266 count
= igbvf_tx_map_adv(adapter
, tx_ring
, skb
, first
);
2269 igbvf_tx_queue_adv(adapter
, tx_ring
, tx_flags
, count
,
2271 netdev
->trans_start
= jiffies
;
2272 /* Make sure there is space in the ring for the next send. */
2273 igbvf_maybe_stop_tx(netdev
, MAX_SKB_FRAGS
+ 4);
2275 dev_kfree_skb_any(skb
);
2276 tx_ring
->buffer_info
[first
].time_stamp
= 0;
2277 tx_ring
->next_to_use
= first
;
2280 return NETDEV_TX_OK
;
2283 static int igbvf_xmit_frame(struct sk_buff
*skb
, struct net_device
*netdev
)
2285 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
2286 struct igbvf_ring
*tx_ring
;
2289 if (test_bit(__IGBVF_DOWN
, &adapter
->state
)) {
2290 dev_kfree_skb_any(skb
);
2291 return NETDEV_TX_OK
;
2294 tx_ring
= &adapter
->tx_ring
[0];
2296 retval
= igbvf_xmit_frame_ring_adv(skb
, netdev
, tx_ring
);
2302 * igbvf_tx_timeout - Respond to a Tx Hang
2303 * @netdev: network interface device structure
2305 static void igbvf_tx_timeout(struct net_device
*netdev
)
2307 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
2309 /* Do the reset outside of interrupt context */
2310 adapter
->tx_timeout_count
++;
2311 schedule_work(&adapter
->reset_task
);
2314 static void igbvf_reset_task(struct work_struct
*work
)
2316 struct igbvf_adapter
*adapter
;
2317 adapter
= container_of(work
, struct igbvf_adapter
, reset_task
);
2319 igbvf_reinit_locked(adapter
);
2323 * igbvf_get_stats - Get System Network Statistics
2324 * @netdev: network interface device structure
2326 * Returns the address of the device statistics structure.
2327 * The statistics are actually updated from the timer callback.
2329 static struct net_device_stats
*igbvf_get_stats(struct net_device
*netdev
)
2331 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
2333 /* only return the current stats */
2334 return &adapter
->net_stats
;
2338 * igbvf_change_mtu - Change the Maximum Transfer Unit
2339 * @netdev: network interface device structure
2340 * @new_mtu: new value for maximum frame size
2342 * Returns 0 on success, negative on failure
2344 static int igbvf_change_mtu(struct net_device
*netdev
, int new_mtu
)
2346 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
2347 int max_frame
= new_mtu
+ ETH_HLEN
+ ETH_FCS_LEN
;
2349 if ((new_mtu
< 68) || (max_frame
> MAX_JUMBO_FRAME_SIZE
)) {
2350 dev_err(&adapter
->pdev
->dev
, "Invalid MTU setting\n");
2354 /* Jumbo frame size limits */
2355 if (max_frame
> ETH_FRAME_LEN
+ ETH_FCS_LEN
) {
2356 if (!(adapter
->flags
& FLAG_HAS_JUMBO_FRAMES
)) {
2357 dev_err(&adapter
->pdev
->dev
,
2358 "Jumbo Frames not supported.\n");
2363 #define MAX_STD_JUMBO_FRAME_SIZE 9234
2364 if (max_frame
> MAX_STD_JUMBO_FRAME_SIZE
) {
2365 dev_err(&adapter
->pdev
->dev
, "MTU > 9216 not supported.\n");
2369 while (test_and_set_bit(__IGBVF_RESETTING
, &adapter
->state
))
2371 /* igbvf_down has a dependency on max_frame_size */
2372 adapter
->max_frame_size
= max_frame
;
2373 if (netif_running(netdev
))
2374 igbvf_down(adapter
);
2377 * NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
2378 * means we reserve 2 more, this pushes us to allocate from the next
2380 * i.e. RXBUFFER_2048 --> size-4096 slab
2381 * However with the new *_jumbo_rx* routines, jumbo receives will use
2385 if (max_frame
<= 1024)
2386 adapter
->rx_buffer_len
= 1024;
2387 else if (max_frame
<= 2048)
2388 adapter
->rx_buffer_len
= 2048;
2390 #if (PAGE_SIZE / 2) > 16384
2391 adapter
->rx_buffer_len
= 16384;
2393 adapter
->rx_buffer_len
= PAGE_SIZE
/ 2;
2397 /* adjust allocation if LPE protects us, and we aren't using SBP */
2398 if ((max_frame
== ETH_FRAME_LEN
+ ETH_FCS_LEN
) ||
2399 (max_frame
== ETH_FRAME_LEN
+ VLAN_HLEN
+ ETH_FCS_LEN
))
2400 adapter
->rx_buffer_len
= ETH_FRAME_LEN
+ VLAN_HLEN
+
2403 dev_info(&adapter
->pdev
->dev
, "changing MTU from %d to %d\n",
2404 netdev
->mtu
, new_mtu
);
2405 netdev
->mtu
= new_mtu
;
2407 if (netif_running(netdev
))
2410 igbvf_reset(adapter
);
2412 clear_bit(__IGBVF_RESETTING
, &adapter
->state
);
2417 static int igbvf_ioctl(struct net_device
*netdev
, struct ifreq
*ifr
, int cmd
)
2425 static int igbvf_suspend(struct pci_dev
*pdev
, pm_message_t state
)
2427 struct net_device
*netdev
= pci_get_drvdata(pdev
);
2428 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
2433 netif_device_detach(netdev
);
2435 if (netif_running(netdev
)) {
2436 WARN_ON(test_bit(__IGBVF_RESETTING
, &adapter
->state
));
2437 igbvf_down(adapter
);
2438 igbvf_free_irq(adapter
);
2442 retval
= pci_save_state(pdev
);
2447 pci_disable_device(pdev
);
2453 static int igbvf_resume(struct pci_dev
*pdev
)
2455 struct net_device
*netdev
= pci_get_drvdata(pdev
);
2456 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
2459 pci_restore_state(pdev
);
2460 err
= pci_enable_device_mem(pdev
);
2462 dev_err(&pdev
->dev
, "Cannot enable PCI device from suspend\n");
2466 pci_set_master(pdev
);
2468 if (netif_running(netdev
)) {
2469 err
= igbvf_request_irq(adapter
);
2474 igbvf_reset(adapter
);
2476 if (netif_running(netdev
))
2479 netif_device_attach(netdev
);
2485 static void igbvf_shutdown(struct pci_dev
*pdev
)
2487 igbvf_suspend(pdev
, PMSG_SUSPEND
);
2490 #ifdef CONFIG_NET_POLL_CONTROLLER
2492 * Polling 'interrupt' - used by things like netconsole to send skbs
2493 * without having to re-enable interrupts. It's not called while
2494 * the interrupt routine is executing.
2496 static void igbvf_netpoll(struct net_device
*netdev
)
2498 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
2500 disable_irq(adapter
->pdev
->irq
);
2502 igbvf_clean_tx_irq(adapter
->tx_ring
);
2504 enable_irq(adapter
->pdev
->irq
);
2509 * igbvf_io_error_detected - called when PCI error is detected
2510 * @pdev: Pointer to PCI device
2511 * @state: The current pci connection state
2513 * This function is called after a PCI bus error affecting
2514 * this device has been detected.
2516 static pci_ers_result_t
igbvf_io_error_detected(struct pci_dev
*pdev
,
2517 pci_channel_state_t state
)
2519 struct net_device
*netdev
= pci_get_drvdata(pdev
);
2520 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
2522 netif_device_detach(netdev
);
2524 if (netif_running(netdev
))
2525 igbvf_down(adapter
);
2526 pci_disable_device(pdev
);
2528 /* Request a slot slot reset. */
2529 return PCI_ERS_RESULT_NEED_RESET
;
2533 * igbvf_io_slot_reset - called after the pci bus has been reset.
2534 * @pdev: Pointer to PCI device
2536 * Restart the card from scratch, as if from a cold-boot. Implementation
2537 * resembles the first-half of the igbvf_resume routine.
2539 static pci_ers_result_t
igbvf_io_slot_reset(struct pci_dev
*pdev
)
2541 struct net_device
*netdev
= pci_get_drvdata(pdev
);
2542 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
2544 if (pci_enable_device_mem(pdev
)) {
2546 "Cannot re-enable PCI device after reset.\n");
2547 return PCI_ERS_RESULT_DISCONNECT
;
2549 pci_set_master(pdev
);
2551 igbvf_reset(adapter
);
2553 return PCI_ERS_RESULT_RECOVERED
;
2557 * igbvf_io_resume - called when traffic can start flowing again.
2558 * @pdev: Pointer to PCI device
2560 * This callback is called when the error recovery driver tells us that
2561 * its OK to resume normal operation. Implementation resembles the
2562 * second-half of the igbvf_resume routine.
2564 static void igbvf_io_resume(struct pci_dev
*pdev
)
2566 struct net_device
*netdev
= pci_get_drvdata(pdev
);
2567 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
2569 if (netif_running(netdev
)) {
2570 if (igbvf_up(adapter
)) {
2572 "can't bring device back up after reset\n");
2577 netif_device_attach(netdev
);
2580 static void igbvf_print_device_info(struct igbvf_adapter
*adapter
)
2582 struct e1000_hw
*hw
= &adapter
->hw
;
2583 struct net_device
*netdev
= adapter
->netdev
;
2584 struct pci_dev
*pdev
= adapter
->pdev
;
2586 dev_info(&pdev
->dev
, "Intel(R) 82576 Virtual Function\n");
2587 dev_info(&pdev
->dev
, "Address: %02x:%02x:%02x:%02x:%02x:%02x\n",
2589 netdev
->dev_addr
[0], netdev
->dev_addr
[1],
2590 netdev
->dev_addr
[2], netdev
->dev_addr
[3],
2591 netdev
->dev_addr
[4], netdev
->dev_addr
[5]);
2592 dev_info(&pdev
->dev
, "MAC: %d\n", hw
->mac
.type
);
2595 static const struct net_device_ops igbvf_netdev_ops
= {
2596 .ndo_open
= igbvf_open
,
2597 .ndo_stop
= igbvf_close
,
2598 .ndo_start_xmit
= igbvf_xmit_frame
,
2599 .ndo_get_stats
= igbvf_get_stats
,
2600 .ndo_set_multicast_list
= igbvf_set_multi
,
2601 .ndo_set_mac_address
= igbvf_set_mac
,
2602 .ndo_change_mtu
= igbvf_change_mtu
,
2603 .ndo_do_ioctl
= igbvf_ioctl
,
2604 .ndo_tx_timeout
= igbvf_tx_timeout
,
2605 .ndo_vlan_rx_register
= igbvf_vlan_rx_register
,
2606 .ndo_vlan_rx_add_vid
= igbvf_vlan_rx_add_vid
,
2607 .ndo_vlan_rx_kill_vid
= igbvf_vlan_rx_kill_vid
,
2608 #ifdef CONFIG_NET_POLL_CONTROLLER
2609 .ndo_poll_controller
= igbvf_netpoll
,
2614 * igbvf_probe - Device Initialization Routine
2615 * @pdev: PCI device information struct
2616 * @ent: entry in igbvf_pci_tbl
2618 * Returns 0 on success, negative on failure
2620 * igbvf_probe initializes an adapter identified by a pci_dev structure.
2621 * The OS initialization, configuring of the adapter private structure,
2622 * and a hardware reset occur.
2624 static int __devinit
igbvf_probe(struct pci_dev
*pdev
,
2625 const struct pci_device_id
*ent
)
2627 struct net_device
*netdev
;
2628 struct igbvf_adapter
*adapter
;
2629 struct e1000_hw
*hw
;
2630 const struct igbvf_info
*ei
= igbvf_info_tbl
[ent
->driver_data
];
2632 static int cards_found
;
2633 int err
, pci_using_dac
;
2635 err
= pci_enable_device_mem(pdev
);
2640 err
= pci_set_dma_mask(pdev
, DMA_64BIT_MASK
);
2642 err
= pci_set_consistent_dma_mask(pdev
, DMA_64BIT_MASK
);
2646 err
= pci_set_dma_mask(pdev
, DMA_32BIT_MASK
);
2648 err
= pci_set_consistent_dma_mask(pdev
, DMA_32BIT_MASK
);
2650 dev_err(&pdev
->dev
, "No usable DMA "
2651 "configuration, aborting\n");
2657 err
= pci_request_regions(pdev
, igbvf_driver_name
);
2661 pci_set_master(pdev
);
2664 netdev
= alloc_etherdev(sizeof(struct igbvf_adapter
));
2666 goto err_alloc_etherdev
;
2668 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
2670 pci_set_drvdata(pdev
, netdev
);
2671 adapter
= netdev_priv(netdev
);
2673 adapter
->netdev
= netdev
;
2674 adapter
->pdev
= pdev
;
2676 adapter
->pba
= ei
->pba
;
2677 adapter
->flags
= ei
->flags
;
2678 adapter
->hw
.back
= adapter
;
2679 adapter
->hw
.mac
.type
= ei
->mac
;
2680 adapter
->msg_enable
= (1 << NETIF_MSG_DRV
| NETIF_MSG_PROBE
) - 1;
2682 /* PCI config space info */
2684 hw
->vendor_id
= pdev
->vendor
;
2685 hw
->device_id
= pdev
->device
;
2686 hw
->subsystem_vendor_id
= pdev
->subsystem_vendor
;
2687 hw
->subsystem_device_id
= pdev
->subsystem_device
;
2689 pci_read_config_byte(pdev
, PCI_REVISION_ID
, &hw
->revision_id
);
2692 adapter
->hw
.hw_addr
= ioremap(pci_resource_start(pdev
, 0),
2693 pci_resource_len(pdev
, 0));
2695 if (!adapter
->hw
.hw_addr
)
2698 if (ei
->get_variants
) {
2699 err
= ei
->get_variants(adapter
);
2704 /* setup adapter struct */
2705 err
= igbvf_sw_init(adapter
);
2709 /* construct the net_device struct */
2710 netdev
->netdev_ops
= &igbvf_netdev_ops
;
2712 igbvf_set_ethtool_ops(netdev
);
2713 netdev
->watchdog_timeo
= 5 * HZ
;
2714 strncpy(netdev
->name
, pci_name(pdev
), sizeof(netdev
->name
) - 1);
2716 adapter
->bd_number
= cards_found
++;
2718 netdev
->features
= NETIF_F_SG
|
2720 NETIF_F_HW_VLAN_TX
|
2721 NETIF_F_HW_VLAN_RX
|
2722 NETIF_F_HW_VLAN_FILTER
;
2724 netdev
->features
|= NETIF_F_IPV6_CSUM
;
2725 netdev
->features
|= NETIF_F_TSO
;
2726 netdev
->features
|= NETIF_F_TSO6
;
2729 netdev
->features
|= NETIF_F_HIGHDMA
;
2731 netdev
->vlan_features
|= NETIF_F_TSO
;
2732 netdev
->vlan_features
|= NETIF_F_TSO6
;
2733 netdev
->vlan_features
|= NETIF_F_IP_CSUM
;
2734 netdev
->vlan_features
|= NETIF_F_IPV6_CSUM
;
2735 netdev
->vlan_features
|= NETIF_F_SG
;
2737 /*reset the controller to put the device in a known good state */
2738 err
= hw
->mac
.ops
.reset_hw(hw
);
2740 dev_info(&pdev
->dev
,
2741 "PF still in reset state, assigning new address\n");
2742 random_ether_addr(hw
->mac
.addr
);
2744 err
= hw
->mac
.ops
.read_mac_addr(hw
);
2746 dev_err(&pdev
->dev
, "Error reading MAC address\n");
2751 memcpy(netdev
->dev_addr
, adapter
->hw
.mac
.addr
, netdev
->addr_len
);
2752 memcpy(netdev
->perm_addr
, adapter
->hw
.mac
.addr
, netdev
->addr_len
);
2754 if (!is_valid_ether_addr(netdev
->perm_addr
)) {
2755 dev_err(&pdev
->dev
, "Invalid MAC Address: "
2756 "%02x:%02x:%02x:%02x:%02x:%02x\n",
2757 netdev
->dev_addr
[0], netdev
->dev_addr
[1],
2758 netdev
->dev_addr
[2], netdev
->dev_addr
[3],
2759 netdev
->dev_addr
[4], netdev
->dev_addr
[5]);
2764 setup_timer(&adapter
->watchdog_timer
, &igbvf_watchdog
,
2765 (unsigned long) adapter
);
2767 INIT_WORK(&adapter
->reset_task
, igbvf_reset_task
);
2768 INIT_WORK(&adapter
->watchdog_task
, igbvf_watchdog_task
);
2770 /* ring size defaults */
2771 adapter
->rx_ring
->count
= 1024;
2772 adapter
->tx_ring
->count
= 1024;
2774 /* reset the hardware with the new settings */
2775 igbvf_reset(adapter
);
2777 /* tell the stack to leave us alone until igbvf_open() is called */
2778 netif_carrier_off(netdev
);
2779 netif_stop_queue(netdev
);
2781 strcpy(netdev
->name
, "eth%d");
2782 err
= register_netdev(netdev
);
2786 igbvf_print_device_info(adapter
);
2788 igbvf_initialize_last_counter_stats(adapter
);
2793 kfree(adapter
->tx_ring
);
2794 kfree(adapter
->rx_ring
);
2796 igbvf_reset_interrupt_capability(adapter
);
2797 iounmap(adapter
->hw
.hw_addr
);
2799 free_netdev(netdev
);
2801 pci_release_regions(pdev
);
2804 pci_disable_device(pdev
);
2809 * igbvf_remove - Device Removal Routine
2810 * @pdev: PCI device information struct
2812 * igbvf_remove is called by the PCI subsystem to alert the driver
2813 * that it should release a PCI device. The could be caused by a
2814 * Hot-Plug event, or because the driver is going to be removed from
2817 static void __devexit
igbvf_remove(struct pci_dev
*pdev
)
2819 struct net_device
*netdev
= pci_get_drvdata(pdev
);
2820 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
2821 struct e1000_hw
*hw
= &adapter
->hw
;
2824 * flush_scheduled work may reschedule our watchdog task, so
2825 * explicitly disable watchdog tasks from being rescheduled
2827 set_bit(__IGBVF_DOWN
, &adapter
->state
);
2828 del_timer_sync(&adapter
->watchdog_timer
);
2830 flush_scheduled_work();
2832 unregister_netdev(netdev
);
2834 igbvf_reset_interrupt_capability(adapter
);
2837 * it is important to delete the napi struct prior to freeing the
2838 * rx ring so that you do not end up with null pointer refs
2840 netif_napi_del(&adapter
->rx_ring
->napi
);
2841 kfree(adapter
->tx_ring
);
2842 kfree(adapter
->rx_ring
);
2844 iounmap(hw
->hw_addr
);
2845 if (hw
->flash_address
)
2846 iounmap(hw
->flash_address
);
2847 pci_release_regions(pdev
);
2849 free_netdev(netdev
);
2851 pci_disable_device(pdev
);
2854 /* PCI Error Recovery (ERS) */
2855 static struct pci_error_handlers igbvf_err_handler
= {
2856 .error_detected
= igbvf_io_error_detected
,
2857 .slot_reset
= igbvf_io_slot_reset
,
2858 .resume
= igbvf_io_resume
,
2861 static struct pci_device_id igbvf_pci_tbl
[] = {
2862 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82576_VF
), board_vf
},
2863 { } /* terminate list */
2865 MODULE_DEVICE_TABLE(pci
, igbvf_pci_tbl
);
2867 /* PCI Device API Driver */
2868 static struct pci_driver igbvf_driver
= {
2869 .name
= igbvf_driver_name
,
2870 .id_table
= igbvf_pci_tbl
,
2871 .probe
= igbvf_probe
,
2872 .remove
= __devexit_p(igbvf_remove
),
2874 /* Power Management Hooks */
2875 .suspend
= igbvf_suspend
,
2876 .resume
= igbvf_resume
,
2878 .shutdown
= igbvf_shutdown
,
2879 .err_handler
= &igbvf_err_handler
2883 * igbvf_init_module - Driver Registration Routine
2885 * igbvf_init_module is the first routine called when the driver is
2886 * loaded. All it does is register with the PCI subsystem.
2888 static int __init
igbvf_init_module(void)
2891 printk(KERN_INFO
"%s - version %s\n",
2892 igbvf_driver_string
, igbvf_driver_version
);
2893 printk(KERN_INFO
"%s\n", igbvf_copyright
);
2895 ret
= pci_register_driver(&igbvf_driver
);
2896 pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY
, igbvf_driver_name
,
2897 PM_QOS_DEFAULT_VALUE
);
2901 module_init(igbvf_init_module
);
2904 * igbvf_exit_module - Driver Exit Cleanup Routine
2906 * igbvf_exit_module is called just before the driver is removed
2909 static void __exit
igbvf_exit_module(void)
2911 pci_unregister_driver(&igbvf_driver
);
2912 pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY
, igbvf_driver_name
);
2914 module_exit(igbvf_exit_module
);
2917 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
2918 MODULE_DESCRIPTION("Intel(R) 82576 Virtual Function Network Driver");
2919 MODULE_LICENSE("GPL");
2920 MODULE_VERSION(DRV_VERSION
);