1 /*******************************************************************************
3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2008 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *******************************************************************************/
29 #include <linux/module.h>
30 #include <linux/types.h>
31 #include <linux/init.h>
32 #include <linux/pci.h>
33 #include <linux/vmalloc.h>
34 #include <linux/pagemap.h>
35 #include <linux/delay.h>
36 #include <linux/netdevice.h>
37 #include <linux/tcp.h>
38 #include <linux/ipv6.h>
39 #include <net/checksum.h>
40 #include <net/ip6_checksum.h>
41 #include <linux/mii.h>
42 #include <linux/ethtool.h>
43 #include <linux/if_vlan.h>
44 #include <linux/cpu.h>
45 #include <linux/smp.h>
46 #include <linux/pm_qos_params.h>
47 #include <linux/aer.h>
51 #define DRV_VERSION "0.3.3.4-k2"
52 char e1000e_driver_name
[] = "e1000e";
53 const char e1000e_driver_version
[] = DRV_VERSION
;
55 static const struct e1000_info
*e1000_info_tbl
[] = {
56 [board_82571
] = &e1000_82571_info
,
57 [board_82572
] = &e1000_82572_info
,
58 [board_82573
] = &e1000_82573_info
,
59 [board_82574
] = &e1000_82574_info
,
60 [board_82583
] = &e1000_82583_info
,
61 [board_80003es2lan
] = &e1000_es2_info
,
62 [board_ich8lan
] = &e1000_ich8_info
,
63 [board_ich9lan
] = &e1000_ich9_info
,
64 [board_ich10lan
] = &e1000_ich10_info
,
69 * e1000_get_hw_dev_name - return device name string
70 * used by hardware layer to print debugging information
72 char *e1000e_get_hw_dev_name(struct e1000_hw
*hw
)
74 return hw
->adapter
->netdev
->name
;
79 * e1000_desc_unused - calculate if we have unused descriptors
81 static int e1000_desc_unused(struct e1000_ring
*ring
)
83 if (ring
->next_to_clean
> ring
->next_to_use
)
84 return ring
->next_to_clean
- ring
->next_to_use
- 1;
86 return ring
->count
+ ring
->next_to_clean
- ring
->next_to_use
- 1;
90 * e1000_receive_skb - helper function to handle Rx indications
91 * @adapter: board private structure
92 * @status: descriptor status field as written by hardware
93 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
94 * @skb: pointer to sk_buff to be indicated to stack
96 static void e1000_receive_skb(struct e1000_adapter
*adapter
,
97 struct net_device
*netdev
,
99 u8 status
, __le16 vlan
)
101 skb
->protocol
= eth_type_trans(skb
, netdev
);
103 if (adapter
->vlgrp
&& (status
& E1000_RXD_STAT_VP
))
104 vlan_gro_receive(&adapter
->napi
, adapter
->vlgrp
,
105 le16_to_cpu(vlan
), skb
);
107 napi_gro_receive(&adapter
->napi
, skb
);
111 * e1000_rx_checksum - Receive Checksum Offload for 82543
112 * @adapter: board private structure
113 * @status_err: receive descriptor status and error fields
114 * @csum: receive descriptor csum field
115 * @sk_buff: socket buffer with received data
117 static void e1000_rx_checksum(struct e1000_adapter
*adapter
, u32 status_err
,
118 u32 csum
, struct sk_buff
*skb
)
120 u16 status
= (u16
)status_err
;
121 u8 errors
= (u8
)(status_err
>> 24);
122 skb
->ip_summed
= CHECKSUM_NONE
;
124 /* Ignore Checksum bit is set */
125 if (status
& E1000_RXD_STAT_IXSM
)
127 /* TCP/UDP checksum error bit is set */
128 if (errors
& E1000_RXD_ERR_TCPE
) {
129 /* let the stack verify checksum errors */
130 adapter
->hw_csum_err
++;
134 /* TCP/UDP Checksum has not been calculated */
135 if (!(status
& (E1000_RXD_STAT_TCPCS
| E1000_RXD_STAT_UDPCS
)))
138 /* It must be a TCP or UDP packet with a valid checksum */
139 if (status
& E1000_RXD_STAT_TCPCS
) {
140 /* TCP checksum is good */
141 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
144 * IP fragment with UDP payload
145 * Hardware complements the payload checksum, so we undo it
146 * and then put the value in host order for further stack use.
148 __sum16 sum
= (__force __sum16
)htons(csum
);
149 skb
->csum
= csum_unfold(~sum
);
150 skb
->ip_summed
= CHECKSUM_COMPLETE
;
152 adapter
->hw_csum_good
++;
156 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
157 * @adapter: address of board private structure
159 static void e1000_alloc_rx_buffers(struct e1000_adapter
*adapter
,
162 struct net_device
*netdev
= adapter
->netdev
;
163 struct pci_dev
*pdev
= adapter
->pdev
;
164 struct e1000_ring
*rx_ring
= adapter
->rx_ring
;
165 struct e1000_rx_desc
*rx_desc
;
166 struct e1000_buffer
*buffer_info
;
169 unsigned int bufsz
= adapter
->rx_buffer_len
+ NET_IP_ALIGN
;
171 i
= rx_ring
->next_to_use
;
172 buffer_info
= &rx_ring
->buffer_info
[i
];
174 while (cleaned_count
--) {
175 skb
= buffer_info
->skb
;
181 skb
= netdev_alloc_skb(netdev
, bufsz
);
183 /* Better luck next round */
184 adapter
->alloc_rx_buff_failed
++;
189 * Make buffer alignment 2 beyond a 16 byte boundary
190 * this will result in a 16 byte aligned IP header after
191 * the 14 byte MAC header is removed
193 skb_reserve(skb
, NET_IP_ALIGN
);
195 buffer_info
->skb
= skb
;
197 buffer_info
->dma
= pci_map_single(pdev
, skb
->data
,
198 adapter
->rx_buffer_len
,
200 if (pci_dma_mapping_error(pdev
, buffer_info
->dma
)) {
201 dev_err(&pdev
->dev
, "RX DMA map failed\n");
202 adapter
->rx_dma_failed
++;
206 rx_desc
= E1000_RX_DESC(*rx_ring
, i
);
207 rx_desc
->buffer_addr
= cpu_to_le64(buffer_info
->dma
);
210 if (i
== rx_ring
->count
)
212 buffer_info
= &rx_ring
->buffer_info
[i
];
215 if (rx_ring
->next_to_use
!= i
) {
216 rx_ring
->next_to_use
= i
;
218 i
= (rx_ring
->count
- 1);
221 * Force memory writes to complete before letting h/w
222 * know there are new descriptors to fetch. (Only
223 * applicable for weak-ordered memory model archs,
227 writel(i
, adapter
->hw
.hw_addr
+ rx_ring
->tail
);
232 * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split
233 * @adapter: address of board private structure
235 static void e1000_alloc_rx_buffers_ps(struct e1000_adapter
*adapter
,
238 struct net_device
*netdev
= adapter
->netdev
;
239 struct pci_dev
*pdev
= adapter
->pdev
;
240 union e1000_rx_desc_packet_split
*rx_desc
;
241 struct e1000_ring
*rx_ring
= adapter
->rx_ring
;
242 struct e1000_buffer
*buffer_info
;
243 struct e1000_ps_page
*ps_page
;
247 i
= rx_ring
->next_to_use
;
248 buffer_info
= &rx_ring
->buffer_info
[i
];
250 while (cleaned_count
--) {
251 rx_desc
= E1000_RX_DESC_PS(*rx_ring
, i
);
253 for (j
= 0; j
< PS_PAGE_BUFFERS
; j
++) {
254 ps_page
= &buffer_info
->ps_pages
[j
];
255 if (j
>= adapter
->rx_ps_pages
) {
256 /* all unused desc entries get hw null ptr */
257 rx_desc
->read
.buffer_addr
[j
+1] = ~cpu_to_le64(0);
260 if (!ps_page
->page
) {
261 ps_page
->page
= alloc_page(GFP_ATOMIC
);
262 if (!ps_page
->page
) {
263 adapter
->alloc_rx_buff_failed
++;
266 ps_page
->dma
= pci_map_page(pdev
,
270 if (pci_dma_mapping_error(pdev
, ps_page
->dma
)) {
271 dev_err(&adapter
->pdev
->dev
,
272 "RX DMA page map failed\n");
273 adapter
->rx_dma_failed
++;
278 * Refresh the desc even if buffer_addrs
279 * didn't change because each write-back
282 rx_desc
->read
.buffer_addr
[j
+1] =
283 cpu_to_le64(ps_page
->dma
);
286 skb
= netdev_alloc_skb(netdev
,
287 adapter
->rx_ps_bsize0
+ NET_IP_ALIGN
);
290 adapter
->alloc_rx_buff_failed
++;
295 * Make buffer alignment 2 beyond a 16 byte boundary
296 * this will result in a 16 byte aligned IP header after
297 * the 14 byte MAC header is removed
299 skb_reserve(skb
, NET_IP_ALIGN
);
301 buffer_info
->skb
= skb
;
302 buffer_info
->dma
= pci_map_single(pdev
, skb
->data
,
303 adapter
->rx_ps_bsize0
,
305 if (pci_dma_mapping_error(pdev
, buffer_info
->dma
)) {
306 dev_err(&pdev
->dev
, "RX DMA map failed\n");
307 adapter
->rx_dma_failed
++;
309 dev_kfree_skb_any(skb
);
310 buffer_info
->skb
= NULL
;
314 rx_desc
->read
.buffer_addr
[0] = cpu_to_le64(buffer_info
->dma
);
317 if (i
== rx_ring
->count
)
319 buffer_info
= &rx_ring
->buffer_info
[i
];
323 if (rx_ring
->next_to_use
!= i
) {
324 rx_ring
->next_to_use
= i
;
327 i
= (rx_ring
->count
- 1);
330 * Force memory writes to complete before letting h/w
331 * know there are new descriptors to fetch. (Only
332 * applicable for weak-ordered memory model archs,
337 * Hardware increments by 16 bytes, but packet split
338 * descriptors are 32 bytes...so we increment tail
341 writel(i
<<1, adapter
->hw
.hw_addr
+ rx_ring
->tail
);
346 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
347 * @adapter: address of board private structure
348 * @cleaned_count: number of buffers to allocate this pass
351 static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter
*adapter
,
354 struct net_device
*netdev
= adapter
->netdev
;
355 struct pci_dev
*pdev
= adapter
->pdev
;
356 struct e1000_rx_desc
*rx_desc
;
357 struct e1000_ring
*rx_ring
= adapter
->rx_ring
;
358 struct e1000_buffer
*buffer_info
;
361 unsigned int bufsz
= 256 -
362 16 /* for skb_reserve */ -
365 i
= rx_ring
->next_to_use
;
366 buffer_info
= &rx_ring
->buffer_info
[i
];
368 while (cleaned_count
--) {
369 skb
= buffer_info
->skb
;
375 skb
= netdev_alloc_skb(netdev
, bufsz
);
376 if (unlikely(!skb
)) {
377 /* Better luck next round */
378 adapter
->alloc_rx_buff_failed
++;
382 /* Make buffer alignment 2 beyond a 16 byte boundary
383 * this will result in a 16 byte aligned IP header after
384 * the 14 byte MAC header is removed
386 skb_reserve(skb
, NET_IP_ALIGN
);
388 buffer_info
->skb
= skb
;
390 /* allocate a new page if necessary */
391 if (!buffer_info
->page
) {
392 buffer_info
->page
= alloc_page(GFP_ATOMIC
);
393 if (unlikely(!buffer_info
->page
)) {
394 adapter
->alloc_rx_buff_failed
++;
399 if (!buffer_info
->dma
)
400 buffer_info
->dma
= pci_map_page(pdev
,
401 buffer_info
->page
, 0,
405 rx_desc
= E1000_RX_DESC(*rx_ring
, i
);
406 rx_desc
->buffer_addr
= cpu_to_le64(buffer_info
->dma
);
408 if (unlikely(++i
== rx_ring
->count
))
410 buffer_info
= &rx_ring
->buffer_info
[i
];
413 if (likely(rx_ring
->next_to_use
!= i
)) {
414 rx_ring
->next_to_use
= i
;
415 if (unlikely(i
-- == 0))
416 i
= (rx_ring
->count
- 1);
418 /* Force memory writes to complete before letting h/w
419 * know there are new descriptors to fetch. (Only
420 * applicable for weak-ordered memory model archs,
423 writel(i
, adapter
->hw
.hw_addr
+ rx_ring
->tail
);
428 * e1000_clean_rx_irq - Send received data up the network stack; legacy
429 * @adapter: board private structure
431 * the return value indicates whether actual cleaning was done, there
432 * is no guarantee that everything was cleaned
434 static bool e1000_clean_rx_irq(struct e1000_adapter
*adapter
,
435 int *work_done
, int work_to_do
)
437 struct net_device
*netdev
= adapter
->netdev
;
438 struct pci_dev
*pdev
= adapter
->pdev
;
439 struct e1000_ring
*rx_ring
= adapter
->rx_ring
;
440 struct e1000_rx_desc
*rx_desc
, *next_rxd
;
441 struct e1000_buffer
*buffer_info
, *next_buffer
;
444 int cleaned_count
= 0;
446 unsigned int total_rx_bytes
= 0, total_rx_packets
= 0;
448 i
= rx_ring
->next_to_clean
;
449 rx_desc
= E1000_RX_DESC(*rx_ring
, i
);
450 buffer_info
= &rx_ring
->buffer_info
[i
];
452 while (rx_desc
->status
& E1000_RXD_STAT_DD
) {
456 if (*work_done
>= work_to_do
)
460 status
= rx_desc
->status
;
461 skb
= buffer_info
->skb
;
462 buffer_info
->skb
= NULL
;
464 prefetch(skb
->data
- NET_IP_ALIGN
);
467 if (i
== rx_ring
->count
)
469 next_rxd
= E1000_RX_DESC(*rx_ring
, i
);
472 next_buffer
= &rx_ring
->buffer_info
[i
];
476 pci_unmap_single(pdev
,
478 adapter
->rx_buffer_len
,
480 buffer_info
->dma
= 0;
482 length
= le16_to_cpu(rx_desc
->length
);
484 /* !EOP means multiple descriptors were used to store a single
485 * packet, also make sure the frame isn't just CRC only */
486 if (!(status
& E1000_RXD_STAT_EOP
) || (length
<= 4)) {
487 /* All receives must fit into a single buffer */
488 e_dbg("%s: Receive packet consumed multiple buffers\n",
491 buffer_info
->skb
= skb
;
495 if (rx_desc
->errors
& E1000_RXD_ERR_FRAME_ERR_MASK
) {
497 buffer_info
->skb
= skb
;
501 /* adjust length to remove Ethernet CRC */
502 if (!(adapter
->flags2
& FLAG2_CRC_STRIPPING
))
505 total_rx_bytes
+= length
;
509 * code added for copybreak, this should improve
510 * performance for small packets with large amounts
511 * of reassembly being done in the stack
513 if (length
< copybreak
) {
514 struct sk_buff
*new_skb
=
515 netdev_alloc_skb(netdev
, length
+ NET_IP_ALIGN
);
517 skb_reserve(new_skb
, NET_IP_ALIGN
);
518 skb_copy_to_linear_data_offset(new_skb
,
524 /* save the skb in buffer_info as good */
525 buffer_info
->skb
= skb
;
528 /* else just continue with the old one */
530 /* end copybreak code */
531 skb_put(skb
, length
);
533 /* Receive Checksum Offload */
534 e1000_rx_checksum(adapter
,
536 ((u32
)(rx_desc
->errors
) << 24),
537 le16_to_cpu(rx_desc
->csum
), skb
);
539 e1000_receive_skb(adapter
, netdev
, skb
,status
,rx_desc
->special
);
544 /* return some buffers to hardware, one at a time is too slow */
545 if (cleaned_count
>= E1000_RX_BUFFER_WRITE
) {
546 adapter
->alloc_rx_buf(adapter
, cleaned_count
);
550 /* use prefetched values */
552 buffer_info
= next_buffer
;
554 rx_ring
->next_to_clean
= i
;
556 cleaned_count
= e1000_desc_unused(rx_ring
);
558 adapter
->alloc_rx_buf(adapter
, cleaned_count
);
560 adapter
->total_rx_bytes
+= total_rx_bytes
;
561 adapter
->total_rx_packets
+= total_rx_packets
;
562 adapter
->net_stats
.rx_bytes
+= total_rx_bytes
;
563 adapter
->net_stats
.rx_packets
+= total_rx_packets
;
567 static void e1000_put_txbuf(struct e1000_adapter
*adapter
,
568 struct e1000_buffer
*buffer_info
)
570 buffer_info
->dma
= 0;
571 if (buffer_info
->skb
) {
572 skb_dma_unmap(&adapter
->pdev
->dev
, buffer_info
->skb
,
574 dev_kfree_skb_any(buffer_info
->skb
);
575 buffer_info
->skb
= NULL
;
577 buffer_info
->time_stamp
= 0;
580 static void e1000_print_tx_hang(struct e1000_adapter
*adapter
)
582 struct e1000_ring
*tx_ring
= adapter
->tx_ring
;
583 unsigned int i
= tx_ring
->next_to_clean
;
584 unsigned int eop
= tx_ring
->buffer_info
[i
].next_to_watch
;
585 struct e1000_tx_desc
*eop_desc
= E1000_TX_DESC(*tx_ring
, eop
);
587 /* detected Tx unit hang */
588 e_err("Detected Tx Unit Hang:\n"
591 " next_to_use <%x>\n"
592 " next_to_clean <%x>\n"
593 "buffer_info[next_to_clean]:\n"
594 " time_stamp <%lx>\n"
595 " next_to_watch <%x>\n"
597 " next_to_watch.status <%x>\n",
598 readl(adapter
->hw
.hw_addr
+ tx_ring
->head
),
599 readl(adapter
->hw
.hw_addr
+ tx_ring
->tail
),
600 tx_ring
->next_to_use
,
601 tx_ring
->next_to_clean
,
602 tx_ring
->buffer_info
[eop
].time_stamp
,
605 eop_desc
->upper
.fields
.status
);
609 * e1000_clean_tx_irq - Reclaim resources after transmit completes
610 * @adapter: board private structure
612 * the return value indicates whether actual cleaning was done, there
613 * is no guarantee that everything was cleaned
615 static bool e1000_clean_tx_irq(struct e1000_adapter
*adapter
)
617 struct net_device
*netdev
= adapter
->netdev
;
618 struct e1000_hw
*hw
= &adapter
->hw
;
619 struct e1000_ring
*tx_ring
= adapter
->tx_ring
;
620 struct e1000_tx_desc
*tx_desc
, *eop_desc
;
621 struct e1000_buffer
*buffer_info
;
623 unsigned int count
= 0;
625 unsigned int total_tx_bytes
= 0, total_tx_packets
= 0;
627 i
= tx_ring
->next_to_clean
;
628 eop
= tx_ring
->buffer_info
[i
].next_to_watch
;
629 eop_desc
= E1000_TX_DESC(*tx_ring
, eop
);
631 while (eop_desc
->upper
.data
& cpu_to_le32(E1000_TXD_STAT_DD
)) {
632 for (cleaned
= 0; !cleaned
; ) {
633 tx_desc
= E1000_TX_DESC(*tx_ring
, i
);
634 buffer_info
= &tx_ring
->buffer_info
[i
];
635 cleaned
= (i
== eop
);
638 struct sk_buff
*skb
= buffer_info
->skb
;
639 unsigned int segs
, bytecount
;
640 segs
= skb_shinfo(skb
)->gso_segs
?: 1;
641 /* multiply data chunks by size of headers */
642 bytecount
= ((segs
- 1) * skb_headlen(skb
)) +
644 total_tx_packets
+= segs
;
645 total_tx_bytes
+= bytecount
;
648 e1000_put_txbuf(adapter
, buffer_info
);
649 tx_desc
->upper
.data
= 0;
652 if (i
== tx_ring
->count
)
656 eop
= tx_ring
->buffer_info
[i
].next_to_watch
;
657 eop_desc
= E1000_TX_DESC(*tx_ring
, eop
);
658 #define E1000_TX_WEIGHT 64
659 /* weight of a sort for tx, to avoid endless transmit cleanup */
660 if (count
++ == E1000_TX_WEIGHT
)
664 tx_ring
->next_to_clean
= i
;
666 #define TX_WAKE_THRESHOLD 32
667 if (cleaned
&& netif_carrier_ok(netdev
) &&
668 e1000_desc_unused(tx_ring
) >= TX_WAKE_THRESHOLD
) {
669 /* Make sure that anybody stopping the queue after this
670 * sees the new next_to_clean.
674 if (netif_queue_stopped(netdev
) &&
675 !(test_bit(__E1000_DOWN
, &adapter
->state
))) {
676 netif_wake_queue(netdev
);
677 ++adapter
->restart_queue
;
681 if (adapter
->detect_tx_hung
) {
682 /* Detect a transmit hang in hardware, this serializes the
683 * check with the clearing of time_stamp and movement of i */
684 adapter
->detect_tx_hung
= 0;
685 if (tx_ring
->buffer_info
[eop
].time_stamp
&&
686 time_after(jiffies
, tx_ring
->buffer_info
[eop
].time_stamp
687 + (adapter
->tx_timeout_factor
* HZ
))
688 && !(er32(STATUS
) & E1000_STATUS_TXOFF
)) {
689 e1000_print_tx_hang(adapter
);
690 netif_stop_queue(netdev
);
693 adapter
->total_tx_bytes
+= total_tx_bytes
;
694 adapter
->total_tx_packets
+= total_tx_packets
;
695 adapter
->net_stats
.tx_bytes
+= total_tx_bytes
;
696 adapter
->net_stats
.tx_packets
+= total_tx_packets
;
701 * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split
702 * @adapter: board private structure
704 * the return value indicates whether actual cleaning was done, there
705 * is no guarantee that everything was cleaned
707 static bool e1000_clean_rx_irq_ps(struct e1000_adapter
*adapter
,
708 int *work_done
, int work_to_do
)
710 union e1000_rx_desc_packet_split
*rx_desc
, *next_rxd
;
711 struct net_device
*netdev
= adapter
->netdev
;
712 struct pci_dev
*pdev
= adapter
->pdev
;
713 struct e1000_ring
*rx_ring
= adapter
->rx_ring
;
714 struct e1000_buffer
*buffer_info
, *next_buffer
;
715 struct e1000_ps_page
*ps_page
;
719 int cleaned_count
= 0;
721 unsigned int total_rx_bytes
= 0, total_rx_packets
= 0;
723 i
= rx_ring
->next_to_clean
;
724 rx_desc
= E1000_RX_DESC_PS(*rx_ring
, i
);
725 staterr
= le32_to_cpu(rx_desc
->wb
.middle
.status_error
);
726 buffer_info
= &rx_ring
->buffer_info
[i
];
728 while (staterr
& E1000_RXD_STAT_DD
) {
729 if (*work_done
>= work_to_do
)
732 skb
= buffer_info
->skb
;
734 /* in the packet split case this is header only */
735 prefetch(skb
->data
- NET_IP_ALIGN
);
738 if (i
== rx_ring
->count
)
740 next_rxd
= E1000_RX_DESC_PS(*rx_ring
, i
);
743 next_buffer
= &rx_ring
->buffer_info
[i
];
747 pci_unmap_single(pdev
, buffer_info
->dma
,
748 adapter
->rx_ps_bsize0
,
750 buffer_info
->dma
= 0;
752 if (!(staterr
& E1000_RXD_STAT_EOP
)) {
753 e_dbg("%s: Packet Split buffers didn't pick up the "
754 "full packet\n", netdev
->name
);
755 dev_kfree_skb_irq(skb
);
759 if (staterr
& E1000_RXDEXT_ERR_FRAME_ERR_MASK
) {
760 dev_kfree_skb_irq(skb
);
764 length
= le16_to_cpu(rx_desc
->wb
.middle
.length0
);
767 e_dbg("%s: Last part of the packet spanning multiple "
768 "descriptors\n", netdev
->name
);
769 dev_kfree_skb_irq(skb
);
774 skb_put(skb
, length
);
778 * this looks ugly, but it seems compiler issues make it
779 * more efficient than reusing j
781 int l1
= le16_to_cpu(rx_desc
->wb
.upper
.length
[0]);
784 * page alloc/put takes too long and effects small packet
785 * throughput, so unsplit small packets and save the alloc/put
786 * only valid in softirq (napi) context to call kmap_*
788 if (l1
&& (l1
<= copybreak
) &&
789 ((length
+ l1
) <= adapter
->rx_ps_bsize0
)) {
792 ps_page
= &buffer_info
->ps_pages
[0];
795 * there is no documentation about how to call
796 * kmap_atomic, so we can't hold the mapping
799 pci_dma_sync_single_for_cpu(pdev
, ps_page
->dma
,
800 PAGE_SIZE
, PCI_DMA_FROMDEVICE
);
801 vaddr
= kmap_atomic(ps_page
->page
, KM_SKB_DATA_SOFTIRQ
);
802 memcpy(skb_tail_pointer(skb
), vaddr
, l1
);
803 kunmap_atomic(vaddr
, KM_SKB_DATA_SOFTIRQ
);
804 pci_dma_sync_single_for_device(pdev
, ps_page
->dma
,
805 PAGE_SIZE
, PCI_DMA_FROMDEVICE
);
808 if (!(adapter
->flags2
& FLAG2_CRC_STRIPPING
))
816 for (j
= 0; j
< PS_PAGE_BUFFERS
; j
++) {
817 length
= le16_to_cpu(rx_desc
->wb
.upper
.length
[j
]);
821 ps_page
= &buffer_info
->ps_pages
[j
];
822 pci_unmap_page(pdev
, ps_page
->dma
, PAGE_SIZE
,
825 skb_fill_page_desc(skb
, j
, ps_page
->page
, 0, length
);
826 ps_page
->page
= NULL
;
828 skb
->data_len
+= length
;
829 skb
->truesize
+= length
;
832 /* strip the ethernet crc, problem is we're using pages now so
833 * this whole operation can get a little cpu intensive
835 if (!(adapter
->flags2
& FLAG2_CRC_STRIPPING
))
836 pskb_trim(skb
, skb
->len
- 4);
839 total_rx_bytes
+= skb
->len
;
842 e1000_rx_checksum(adapter
, staterr
, le16_to_cpu(
843 rx_desc
->wb
.lower
.hi_dword
.csum_ip
.csum
), skb
);
845 if (rx_desc
->wb
.upper
.header_status
&
846 cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP
))
847 adapter
->rx_hdr_split
++;
849 e1000_receive_skb(adapter
, netdev
, skb
,
850 staterr
, rx_desc
->wb
.middle
.vlan
);
853 rx_desc
->wb
.middle
.status_error
&= cpu_to_le32(~0xFF);
854 buffer_info
->skb
= NULL
;
856 /* return some buffers to hardware, one at a time is too slow */
857 if (cleaned_count
>= E1000_RX_BUFFER_WRITE
) {
858 adapter
->alloc_rx_buf(adapter
, cleaned_count
);
862 /* use prefetched values */
864 buffer_info
= next_buffer
;
866 staterr
= le32_to_cpu(rx_desc
->wb
.middle
.status_error
);
868 rx_ring
->next_to_clean
= i
;
870 cleaned_count
= e1000_desc_unused(rx_ring
);
872 adapter
->alloc_rx_buf(adapter
, cleaned_count
);
874 adapter
->total_rx_bytes
+= total_rx_bytes
;
875 adapter
->total_rx_packets
+= total_rx_packets
;
876 adapter
->net_stats
.rx_bytes
+= total_rx_bytes
;
877 adapter
->net_stats
.rx_packets
+= total_rx_packets
;
882 * e1000_consume_page - helper function
884 static void e1000_consume_page(struct e1000_buffer
*bi
, struct sk_buff
*skb
,
889 skb
->data_len
+= length
;
890 skb
->truesize
+= length
;
894 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
895 * @adapter: board private structure
897 * the return value indicates whether actual cleaning was done, there
898 * is no guarantee that everything was cleaned
901 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter
*adapter
,
902 int *work_done
, int work_to_do
)
904 struct net_device
*netdev
= adapter
->netdev
;
905 struct pci_dev
*pdev
= adapter
->pdev
;
906 struct e1000_ring
*rx_ring
= adapter
->rx_ring
;
907 struct e1000_rx_desc
*rx_desc
, *next_rxd
;
908 struct e1000_buffer
*buffer_info
, *next_buffer
;
911 int cleaned_count
= 0;
912 bool cleaned
= false;
913 unsigned int total_rx_bytes
=0, total_rx_packets
=0;
915 i
= rx_ring
->next_to_clean
;
916 rx_desc
= E1000_RX_DESC(*rx_ring
, i
);
917 buffer_info
= &rx_ring
->buffer_info
[i
];
919 while (rx_desc
->status
& E1000_RXD_STAT_DD
) {
923 if (*work_done
>= work_to_do
)
927 status
= rx_desc
->status
;
928 skb
= buffer_info
->skb
;
929 buffer_info
->skb
= NULL
;
932 if (i
== rx_ring
->count
)
934 next_rxd
= E1000_RX_DESC(*rx_ring
, i
);
937 next_buffer
= &rx_ring
->buffer_info
[i
];
941 pci_unmap_page(pdev
, buffer_info
->dma
, PAGE_SIZE
,
943 buffer_info
->dma
= 0;
945 length
= le16_to_cpu(rx_desc
->length
);
947 /* errors is only valid for DD + EOP descriptors */
948 if (unlikely((status
& E1000_RXD_STAT_EOP
) &&
949 (rx_desc
->errors
& E1000_RXD_ERR_FRAME_ERR_MASK
))) {
950 /* recycle both page and skb */
951 buffer_info
->skb
= skb
;
952 /* an error means any chain goes out the window
954 if (rx_ring
->rx_skb_top
)
955 dev_kfree_skb(rx_ring
->rx_skb_top
);
956 rx_ring
->rx_skb_top
= NULL
;
960 #define rxtop rx_ring->rx_skb_top
961 if (!(status
& E1000_RXD_STAT_EOP
)) {
962 /* this descriptor is only the beginning (or middle) */
964 /* this is the beginning of a chain */
966 skb_fill_page_desc(rxtop
, 0, buffer_info
->page
,
969 /* this is the middle of a chain */
970 skb_fill_page_desc(rxtop
,
971 skb_shinfo(rxtop
)->nr_frags
,
972 buffer_info
->page
, 0, length
);
973 /* re-use the skb, only consumed the page */
974 buffer_info
->skb
= skb
;
976 e1000_consume_page(buffer_info
, rxtop
, length
);
980 /* end of the chain */
981 skb_fill_page_desc(rxtop
,
982 skb_shinfo(rxtop
)->nr_frags
,
983 buffer_info
->page
, 0, length
);
984 /* re-use the current skb, we only consumed the
986 buffer_info
->skb
= skb
;
989 e1000_consume_page(buffer_info
, skb
, length
);
991 /* no chain, got EOP, this buf is the packet
992 * copybreak to save the put_page/alloc_page */
993 if (length
<= copybreak
&&
994 skb_tailroom(skb
) >= length
) {
996 vaddr
= kmap_atomic(buffer_info
->page
,
997 KM_SKB_DATA_SOFTIRQ
);
998 memcpy(skb_tail_pointer(skb
), vaddr
,
1000 kunmap_atomic(vaddr
,
1001 KM_SKB_DATA_SOFTIRQ
);
1002 /* re-use the page, so don't erase
1003 * buffer_info->page */
1004 skb_put(skb
, length
);
1006 skb_fill_page_desc(skb
, 0,
1007 buffer_info
->page
, 0,
1009 e1000_consume_page(buffer_info
, skb
,
1015 /* Receive Checksum Offload XXX recompute due to CRC strip? */
1016 e1000_rx_checksum(adapter
,
1018 ((u32
)(rx_desc
->errors
) << 24),
1019 le16_to_cpu(rx_desc
->csum
), skb
);
1021 /* probably a little skewed due to removing CRC */
1022 total_rx_bytes
+= skb
->len
;
1025 /* eth type trans needs skb->data to point to something */
1026 if (!pskb_may_pull(skb
, ETH_HLEN
)) {
1027 e_err("pskb_may_pull failed.\n");
1032 e1000_receive_skb(adapter
, netdev
, skb
, status
,
1036 rx_desc
->status
= 0;
1038 /* return some buffers to hardware, one at a time is too slow */
1039 if (unlikely(cleaned_count
>= E1000_RX_BUFFER_WRITE
)) {
1040 adapter
->alloc_rx_buf(adapter
, cleaned_count
);
1044 /* use prefetched values */
1046 buffer_info
= next_buffer
;
1048 rx_ring
->next_to_clean
= i
;
1050 cleaned_count
= e1000_desc_unused(rx_ring
);
1052 adapter
->alloc_rx_buf(adapter
, cleaned_count
);
1054 adapter
->total_rx_bytes
+= total_rx_bytes
;
1055 adapter
->total_rx_packets
+= total_rx_packets
;
1056 adapter
->net_stats
.rx_bytes
+= total_rx_bytes
;
1057 adapter
->net_stats
.rx_packets
+= total_rx_packets
;
1062 * e1000_clean_rx_ring - Free Rx Buffers per Queue
1063 * @adapter: board private structure
1065 static void e1000_clean_rx_ring(struct e1000_adapter
*adapter
)
1067 struct e1000_ring
*rx_ring
= adapter
->rx_ring
;
1068 struct e1000_buffer
*buffer_info
;
1069 struct e1000_ps_page
*ps_page
;
1070 struct pci_dev
*pdev
= adapter
->pdev
;
1073 /* Free all the Rx ring sk_buffs */
1074 for (i
= 0; i
< rx_ring
->count
; i
++) {
1075 buffer_info
= &rx_ring
->buffer_info
[i
];
1076 if (buffer_info
->dma
) {
1077 if (adapter
->clean_rx
== e1000_clean_rx_irq
)
1078 pci_unmap_single(pdev
, buffer_info
->dma
,
1079 adapter
->rx_buffer_len
,
1080 PCI_DMA_FROMDEVICE
);
1081 else if (adapter
->clean_rx
== e1000_clean_jumbo_rx_irq
)
1082 pci_unmap_page(pdev
, buffer_info
->dma
,
1084 PCI_DMA_FROMDEVICE
);
1085 else if (adapter
->clean_rx
== e1000_clean_rx_irq_ps
)
1086 pci_unmap_single(pdev
, buffer_info
->dma
,
1087 adapter
->rx_ps_bsize0
,
1088 PCI_DMA_FROMDEVICE
);
1089 buffer_info
->dma
= 0;
1092 if (buffer_info
->page
) {
1093 put_page(buffer_info
->page
);
1094 buffer_info
->page
= NULL
;
1097 if (buffer_info
->skb
) {
1098 dev_kfree_skb(buffer_info
->skb
);
1099 buffer_info
->skb
= NULL
;
1102 for (j
= 0; j
< PS_PAGE_BUFFERS
; j
++) {
1103 ps_page
= &buffer_info
->ps_pages
[j
];
1106 pci_unmap_page(pdev
, ps_page
->dma
, PAGE_SIZE
,
1107 PCI_DMA_FROMDEVICE
);
1109 put_page(ps_page
->page
);
1110 ps_page
->page
= NULL
;
1114 /* there also may be some cached data from a chained receive */
1115 if (rx_ring
->rx_skb_top
) {
1116 dev_kfree_skb(rx_ring
->rx_skb_top
);
1117 rx_ring
->rx_skb_top
= NULL
;
1120 /* Zero out the descriptor ring */
1121 memset(rx_ring
->desc
, 0, rx_ring
->size
);
1123 rx_ring
->next_to_clean
= 0;
1124 rx_ring
->next_to_use
= 0;
1126 writel(0, adapter
->hw
.hw_addr
+ rx_ring
->head
);
1127 writel(0, adapter
->hw
.hw_addr
+ rx_ring
->tail
);
1130 static void e1000e_downshift_workaround(struct work_struct
*work
)
1132 struct e1000_adapter
*adapter
= container_of(work
,
1133 struct e1000_adapter
, downshift_task
);
1135 e1000e_gig_downshift_workaround_ich8lan(&adapter
->hw
);
1139 * e1000_intr_msi - Interrupt Handler
1140 * @irq: interrupt number
1141 * @data: pointer to a network interface device structure
1143 static irqreturn_t
e1000_intr_msi(int irq
, void *data
)
1145 struct net_device
*netdev
= data
;
1146 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
1147 struct e1000_hw
*hw
= &adapter
->hw
;
1148 u32 icr
= er32(ICR
);
1151 * read ICR disables interrupts using IAM
1154 if (icr
& E1000_ICR_LSC
) {
1155 hw
->mac
.get_link_status
= 1;
1157 * ICH8 workaround-- Call gig speed drop workaround on cable
1158 * disconnect (LSC) before accessing any PHY registers
1160 if ((adapter
->flags
& FLAG_LSC_GIG_SPEED_DROP
) &&
1161 (!(er32(STATUS
) & E1000_STATUS_LU
)))
1162 schedule_work(&adapter
->downshift_task
);
1165 * 80003ES2LAN workaround-- For packet buffer work-around on
1166 * link down event; disable receives here in the ISR and reset
1167 * adapter in watchdog
1169 if (netif_carrier_ok(netdev
) &&
1170 adapter
->flags
& FLAG_RX_NEEDS_RESTART
) {
1171 /* disable receives */
1172 u32 rctl
= er32(RCTL
);
1173 ew32(RCTL
, rctl
& ~E1000_RCTL_EN
);
1174 adapter
->flags
|= FLAG_RX_RESTART_NOW
;
1176 /* guard against interrupt when we're going down */
1177 if (!test_bit(__E1000_DOWN
, &adapter
->state
))
1178 mod_timer(&adapter
->watchdog_timer
, jiffies
+ 1);
1181 if (napi_schedule_prep(&adapter
->napi
)) {
1182 adapter
->total_tx_bytes
= 0;
1183 adapter
->total_tx_packets
= 0;
1184 adapter
->total_rx_bytes
= 0;
1185 adapter
->total_rx_packets
= 0;
1186 __napi_schedule(&adapter
->napi
);
1193 * e1000_intr - Interrupt Handler
1194 * @irq: interrupt number
1195 * @data: pointer to a network interface device structure
1197 static irqreturn_t
e1000_intr(int irq
, void *data
)
1199 struct net_device
*netdev
= data
;
1200 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
1201 struct e1000_hw
*hw
= &adapter
->hw
;
1202 u32 rctl
, icr
= er32(ICR
);
1205 return IRQ_NONE
; /* Not our interrupt */
1208 * IMS will not auto-mask if INT_ASSERTED is not set, and if it is
1209 * not set, then the adapter didn't send an interrupt
1211 if (!(icr
& E1000_ICR_INT_ASSERTED
))
1215 * Interrupt Auto-Mask...upon reading ICR,
1216 * interrupts are masked. No need for the
1220 if (icr
& E1000_ICR_LSC
) {
1221 hw
->mac
.get_link_status
= 1;
1223 * ICH8 workaround-- Call gig speed drop workaround on cable
1224 * disconnect (LSC) before accessing any PHY registers
1226 if ((adapter
->flags
& FLAG_LSC_GIG_SPEED_DROP
) &&
1227 (!(er32(STATUS
) & E1000_STATUS_LU
)))
1228 schedule_work(&adapter
->downshift_task
);
1231 * 80003ES2LAN workaround--
1232 * For packet buffer work-around on link down event;
1233 * disable receives here in the ISR and
1234 * reset adapter in watchdog
1236 if (netif_carrier_ok(netdev
) &&
1237 (adapter
->flags
& FLAG_RX_NEEDS_RESTART
)) {
1238 /* disable receives */
1240 ew32(RCTL
, rctl
& ~E1000_RCTL_EN
);
1241 adapter
->flags
|= FLAG_RX_RESTART_NOW
;
1243 /* guard against interrupt when we're going down */
1244 if (!test_bit(__E1000_DOWN
, &adapter
->state
))
1245 mod_timer(&adapter
->watchdog_timer
, jiffies
+ 1);
1248 if (napi_schedule_prep(&adapter
->napi
)) {
1249 adapter
->total_tx_bytes
= 0;
1250 adapter
->total_tx_packets
= 0;
1251 adapter
->total_rx_bytes
= 0;
1252 adapter
->total_rx_packets
= 0;
1253 __napi_schedule(&adapter
->napi
);
1259 static irqreturn_t
e1000_msix_other(int irq
, void *data
)
1261 struct net_device
*netdev
= data
;
1262 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
1263 struct e1000_hw
*hw
= &adapter
->hw
;
1264 u32 icr
= er32(ICR
);
1266 if (!(icr
& E1000_ICR_INT_ASSERTED
)) {
1267 ew32(IMS
, E1000_IMS_OTHER
);
1271 if (icr
& adapter
->eiac_mask
)
1272 ew32(ICS
, (icr
& adapter
->eiac_mask
));
1274 if (icr
& E1000_ICR_OTHER
) {
1275 if (!(icr
& E1000_ICR_LSC
))
1276 goto no_link_interrupt
;
1277 hw
->mac
.get_link_status
= 1;
1278 /* guard against interrupt when we're going down */
1279 if (!test_bit(__E1000_DOWN
, &adapter
->state
))
1280 mod_timer(&adapter
->watchdog_timer
, jiffies
+ 1);
1284 ew32(IMS
, E1000_IMS_LSC
| E1000_IMS_OTHER
);
1290 static irqreturn_t
e1000_intr_msix_tx(int irq
, void *data
)
1292 struct net_device
*netdev
= data
;
1293 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
1294 struct e1000_hw
*hw
= &adapter
->hw
;
1295 struct e1000_ring
*tx_ring
= adapter
->tx_ring
;
1298 adapter
->total_tx_bytes
= 0;
1299 adapter
->total_tx_packets
= 0;
1301 if (!e1000_clean_tx_irq(adapter
))
1302 /* Ring was not completely cleaned, so fire another interrupt */
1303 ew32(ICS
, tx_ring
->ims_val
);
1308 static irqreturn_t
e1000_intr_msix_rx(int irq
, void *data
)
1310 struct net_device
*netdev
= data
;
1311 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
1313 /* Write the ITR value calculated at the end of the
1314 * previous interrupt.
1316 if (adapter
->rx_ring
->set_itr
) {
1317 writel(1000000000 / (adapter
->rx_ring
->itr_val
* 256),
1318 adapter
->hw
.hw_addr
+ adapter
->rx_ring
->itr_register
);
1319 adapter
->rx_ring
->set_itr
= 0;
1322 if (napi_schedule_prep(&adapter
->napi
)) {
1323 adapter
->total_rx_bytes
= 0;
1324 adapter
->total_rx_packets
= 0;
1325 __napi_schedule(&adapter
->napi
);
1331 * e1000_configure_msix - Configure MSI-X hardware
1333 * e1000_configure_msix sets up the hardware to properly
1334 * generate MSI-X interrupts.
1336 static void e1000_configure_msix(struct e1000_adapter
*adapter
)
1338 struct e1000_hw
*hw
= &adapter
->hw
;
1339 struct e1000_ring
*rx_ring
= adapter
->rx_ring
;
1340 struct e1000_ring
*tx_ring
= adapter
->tx_ring
;
1342 u32 ctrl_ext
, ivar
= 0;
1344 adapter
->eiac_mask
= 0;
1346 /* Workaround issue with spurious interrupts on 82574 in MSI-X mode */
1347 if (hw
->mac
.type
== e1000_82574
) {
1348 u32 rfctl
= er32(RFCTL
);
1349 rfctl
|= E1000_RFCTL_ACK_DIS
;
1353 #define E1000_IVAR_INT_ALLOC_VALID 0x8
1354 /* Configure Rx vector */
1355 rx_ring
->ims_val
= E1000_IMS_RXQ0
;
1356 adapter
->eiac_mask
|= rx_ring
->ims_val
;
1357 if (rx_ring
->itr_val
)
1358 writel(1000000000 / (rx_ring
->itr_val
* 256),
1359 hw
->hw_addr
+ rx_ring
->itr_register
);
1361 writel(1, hw
->hw_addr
+ rx_ring
->itr_register
);
1362 ivar
= E1000_IVAR_INT_ALLOC_VALID
| vector
;
1364 /* Configure Tx vector */
1365 tx_ring
->ims_val
= E1000_IMS_TXQ0
;
1367 if (tx_ring
->itr_val
)
1368 writel(1000000000 / (tx_ring
->itr_val
* 256),
1369 hw
->hw_addr
+ tx_ring
->itr_register
);
1371 writel(1, hw
->hw_addr
+ tx_ring
->itr_register
);
1372 adapter
->eiac_mask
|= tx_ring
->ims_val
;
1373 ivar
|= ((E1000_IVAR_INT_ALLOC_VALID
| vector
) << 8);
1375 /* set vector for Other Causes, e.g. link changes */
1377 ivar
|= ((E1000_IVAR_INT_ALLOC_VALID
| vector
) << 16);
1378 if (rx_ring
->itr_val
)
1379 writel(1000000000 / (rx_ring
->itr_val
* 256),
1380 hw
->hw_addr
+ E1000_EITR_82574(vector
));
1382 writel(1, hw
->hw_addr
+ E1000_EITR_82574(vector
));
1384 /* Cause Tx interrupts on every write back */
1389 /* enable MSI-X PBA support */
1390 ctrl_ext
= er32(CTRL_EXT
);
1391 ctrl_ext
|= E1000_CTRL_EXT_PBA_CLR
;
1393 /* Auto-Mask Other interrupts upon ICR read */
1394 #define E1000_EIAC_MASK_82574 0x01F00000
1395 ew32(IAM
, ~E1000_EIAC_MASK_82574
| E1000_IMS_OTHER
);
1396 ctrl_ext
|= E1000_CTRL_EXT_EIAME
;
1397 ew32(CTRL_EXT
, ctrl_ext
);
1401 void e1000e_reset_interrupt_capability(struct e1000_adapter
*adapter
)
1403 if (adapter
->msix_entries
) {
1404 pci_disable_msix(adapter
->pdev
);
1405 kfree(adapter
->msix_entries
);
1406 adapter
->msix_entries
= NULL
;
1407 } else if (adapter
->flags
& FLAG_MSI_ENABLED
) {
1408 pci_disable_msi(adapter
->pdev
);
1409 adapter
->flags
&= ~FLAG_MSI_ENABLED
;
1416 * e1000e_set_interrupt_capability - set MSI or MSI-X if supported
1418 * Attempt to configure interrupts using the best available
1419 * capabilities of the hardware and kernel.
1421 void e1000e_set_interrupt_capability(struct e1000_adapter
*adapter
)
1427 switch (adapter
->int_mode
) {
1428 case E1000E_INT_MODE_MSIX
:
1429 if (adapter
->flags
& FLAG_HAS_MSIX
) {
1430 numvecs
= 3; /* RxQ0, TxQ0 and other */
1431 adapter
->msix_entries
= kcalloc(numvecs
,
1432 sizeof(struct msix_entry
),
1434 if (adapter
->msix_entries
) {
1435 for (i
= 0; i
< numvecs
; i
++)
1436 adapter
->msix_entries
[i
].entry
= i
;
1438 err
= pci_enable_msix(adapter
->pdev
,
1439 adapter
->msix_entries
,
1444 /* MSI-X failed, so fall through and try MSI */
1445 e_err("Failed to initialize MSI-X interrupts. "
1446 "Falling back to MSI interrupts.\n");
1447 e1000e_reset_interrupt_capability(adapter
);
1449 adapter
->int_mode
= E1000E_INT_MODE_MSI
;
1451 case E1000E_INT_MODE_MSI
:
1452 if (!pci_enable_msi(adapter
->pdev
)) {
1453 adapter
->flags
|= FLAG_MSI_ENABLED
;
1455 adapter
->int_mode
= E1000E_INT_MODE_LEGACY
;
1456 e_err("Failed to initialize MSI interrupts. Falling "
1457 "back to legacy interrupts.\n");
1460 case E1000E_INT_MODE_LEGACY
:
1461 /* Don't do anything; this is the system default */
1469 * e1000_request_msix - Initialize MSI-X interrupts
1471 * e1000_request_msix allocates MSI-X vectors and requests interrupts from the
1474 static int e1000_request_msix(struct e1000_adapter
*adapter
)
1476 struct net_device
*netdev
= adapter
->netdev
;
1477 int err
= 0, vector
= 0;
1479 if (strlen(netdev
->name
) < (IFNAMSIZ
- 5))
1480 sprintf(adapter
->rx_ring
->name
, "%s-rx-0", netdev
->name
);
1482 memcpy(adapter
->rx_ring
->name
, netdev
->name
, IFNAMSIZ
);
1483 err
= request_irq(adapter
->msix_entries
[vector
].vector
,
1484 &e1000_intr_msix_rx
, 0, adapter
->rx_ring
->name
,
1488 adapter
->rx_ring
->itr_register
= E1000_EITR_82574(vector
);
1489 adapter
->rx_ring
->itr_val
= adapter
->itr
;
1492 if (strlen(netdev
->name
) < (IFNAMSIZ
- 5))
1493 sprintf(adapter
->tx_ring
->name
, "%s-tx-0", netdev
->name
);
1495 memcpy(adapter
->tx_ring
->name
, netdev
->name
, IFNAMSIZ
);
1496 err
= request_irq(adapter
->msix_entries
[vector
].vector
,
1497 &e1000_intr_msix_tx
, 0, adapter
->tx_ring
->name
,
1501 adapter
->tx_ring
->itr_register
= E1000_EITR_82574(vector
);
1502 adapter
->tx_ring
->itr_val
= adapter
->itr
;
1505 err
= request_irq(adapter
->msix_entries
[vector
].vector
,
1506 &e1000_msix_other
, 0, netdev
->name
, netdev
);
1510 e1000_configure_msix(adapter
);
1517 * e1000_request_irq - initialize interrupts
1519 * Attempts to configure interrupts using the best available
1520 * capabilities of the hardware and kernel.
1522 static int e1000_request_irq(struct e1000_adapter
*adapter
)
1524 struct net_device
*netdev
= adapter
->netdev
;
1527 if (adapter
->msix_entries
) {
1528 err
= e1000_request_msix(adapter
);
1531 /* fall back to MSI */
1532 e1000e_reset_interrupt_capability(adapter
);
1533 adapter
->int_mode
= E1000E_INT_MODE_MSI
;
1534 e1000e_set_interrupt_capability(adapter
);
1536 if (adapter
->flags
& FLAG_MSI_ENABLED
) {
1537 err
= request_irq(adapter
->pdev
->irq
, &e1000_intr_msi
, 0,
1538 netdev
->name
, netdev
);
1542 /* fall back to legacy interrupt */
1543 e1000e_reset_interrupt_capability(adapter
);
1544 adapter
->int_mode
= E1000E_INT_MODE_LEGACY
;
1547 err
= request_irq(adapter
->pdev
->irq
, &e1000_intr
, IRQF_SHARED
,
1548 netdev
->name
, netdev
);
1550 e_err("Unable to allocate interrupt, Error: %d\n", err
);
1555 static void e1000_free_irq(struct e1000_adapter
*adapter
)
1557 struct net_device
*netdev
= adapter
->netdev
;
1559 if (adapter
->msix_entries
) {
1562 free_irq(adapter
->msix_entries
[vector
].vector
, netdev
);
1565 free_irq(adapter
->msix_entries
[vector
].vector
, netdev
);
1568 /* Other Causes interrupt vector */
1569 free_irq(adapter
->msix_entries
[vector
].vector
, netdev
);
1573 free_irq(adapter
->pdev
->irq
, netdev
);
1577 * e1000_irq_disable - Mask off interrupt generation on the NIC
1579 static void e1000_irq_disable(struct e1000_adapter
*adapter
)
1581 struct e1000_hw
*hw
= &adapter
->hw
;
1584 if (adapter
->msix_entries
)
1585 ew32(EIAC_82574
, 0);
1587 synchronize_irq(adapter
->pdev
->irq
);
1591 * e1000_irq_enable - Enable default interrupt generation settings
1593 static void e1000_irq_enable(struct e1000_adapter
*adapter
)
1595 struct e1000_hw
*hw
= &adapter
->hw
;
1597 if (adapter
->msix_entries
) {
1598 ew32(EIAC_82574
, adapter
->eiac_mask
& E1000_EIAC_MASK_82574
);
1599 ew32(IMS
, adapter
->eiac_mask
| E1000_IMS_OTHER
| E1000_IMS_LSC
);
1601 ew32(IMS
, IMS_ENABLE_MASK
);
1607 * e1000_get_hw_control - get control of the h/w from f/w
1608 * @adapter: address of board private structure
1610 * e1000_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit.
1611 * For ASF and Pass Through versions of f/w this means that
1612 * the driver is loaded. For AMT version (only with 82573)
1613 * of the f/w this means that the network i/f is open.
1615 static void e1000_get_hw_control(struct e1000_adapter
*adapter
)
1617 struct e1000_hw
*hw
= &adapter
->hw
;
1621 /* Let firmware know the driver has taken over */
1622 if (adapter
->flags
& FLAG_HAS_SWSM_ON_LOAD
) {
1624 ew32(SWSM
, swsm
| E1000_SWSM_DRV_LOAD
);
1625 } else if (adapter
->flags
& FLAG_HAS_CTRLEXT_ON_LOAD
) {
1626 ctrl_ext
= er32(CTRL_EXT
);
1627 ew32(CTRL_EXT
, ctrl_ext
| E1000_CTRL_EXT_DRV_LOAD
);
1632 * e1000_release_hw_control - release control of the h/w to f/w
1633 * @adapter: address of board private structure
1635 * e1000_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit.
1636 * For ASF and Pass Through versions of f/w this means that the
1637 * driver is no longer loaded. For AMT version (only with 82573) i
1638 * of the f/w this means that the network i/f is closed.
1641 static void e1000_release_hw_control(struct e1000_adapter
*adapter
)
1643 struct e1000_hw
*hw
= &adapter
->hw
;
1647 /* Let firmware taken over control of h/w */
1648 if (adapter
->flags
& FLAG_HAS_SWSM_ON_LOAD
) {
1650 ew32(SWSM
, swsm
& ~E1000_SWSM_DRV_LOAD
);
1651 } else if (adapter
->flags
& FLAG_HAS_CTRLEXT_ON_LOAD
) {
1652 ctrl_ext
= er32(CTRL_EXT
);
1653 ew32(CTRL_EXT
, ctrl_ext
& ~E1000_CTRL_EXT_DRV_LOAD
);
1658 * @e1000_alloc_ring - allocate memory for a ring structure
1660 static int e1000_alloc_ring_dma(struct e1000_adapter
*adapter
,
1661 struct e1000_ring
*ring
)
1663 struct pci_dev
*pdev
= adapter
->pdev
;
1665 ring
->desc
= dma_alloc_coherent(&pdev
->dev
, ring
->size
, &ring
->dma
,
1674 * e1000e_setup_tx_resources - allocate Tx resources (Descriptors)
1675 * @adapter: board private structure
1677 * Return 0 on success, negative on failure
1679 int e1000e_setup_tx_resources(struct e1000_adapter
*adapter
)
1681 struct e1000_ring
*tx_ring
= adapter
->tx_ring
;
1682 int err
= -ENOMEM
, size
;
1684 size
= sizeof(struct e1000_buffer
) * tx_ring
->count
;
1685 tx_ring
->buffer_info
= vmalloc(size
);
1686 if (!tx_ring
->buffer_info
)
1688 memset(tx_ring
->buffer_info
, 0, size
);
1690 /* round up to nearest 4K */
1691 tx_ring
->size
= tx_ring
->count
* sizeof(struct e1000_tx_desc
);
1692 tx_ring
->size
= ALIGN(tx_ring
->size
, 4096);
1694 err
= e1000_alloc_ring_dma(adapter
, tx_ring
);
1698 tx_ring
->next_to_use
= 0;
1699 tx_ring
->next_to_clean
= 0;
1703 vfree(tx_ring
->buffer_info
);
1704 e_err("Unable to allocate memory for the transmit descriptor ring\n");
1709 * e1000e_setup_rx_resources - allocate Rx resources (Descriptors)
1710 * @adapter: board private structure
1712 * Returns 0 on success, negative on failure
1714 int e1000e_setup_rx_resources(struct e1000_adapter
*adapter
)
1716 struct e1000_ring
*rx_ring
= adapter
->rx_ring
;
1717 struct e1000_buffer
*buffer_info
;
1718 int i
, size
, desc_len
, err
= -ENOMEM
;
1720 size
= sizeof(struct e1000_buffer
) * rx_ring
->count
;
1721 rx_ring
->buffer_info
= vmalloc(size
);
1722 if (!rx_ring
->buffer_info
)
1724 memset(rx_ring
->buffer_info
, 0, size
);
1726 for (i
= 0; i
< rx_ring
->count
; i
++) {
1727 buffer_info
= &rx_ring
->buffer_info
[i
];
1728 buffer_info
->ps_pages
= kcalloc(PS_PAGE_BUFFERS
,
1729 sizeof(struct e1000_ps_page
),
1731 if (!buffer_info
->ps_pages
)
1735 desc_len
= sizeof(union e1000_rx_desc_packet_split
);
1737 /* Round up to nearest 4K */
1738 rx_ring
->size
= rx_ring
->count
* desc_len
;
1739 rx_ring
->size
= ALIGN(rx_ring
->size
, 4096);
1741 err
= e1000_alloc_ring_dma(adapter
, rx_ring
);
1745 rx_ring
->next_to_clean
= 0;
1746 rx_ring
->next_to_use
= 0;
1747 rx_ring
->rx_skb_top
= NULL
;
1752 for (i
= 0; i
< rx_ring
->count
; i
++) {
1753 buffer_info
= &rx_ring
->buffer_info
[i
];
1754 kfree(buffer_info
->ps_pages
);
1757 vfree(rx_ring
->buffer_info
);
1758 e_err("Unable to allocate memory for the transmit descriptor ring\n");
1763 * e1000_clean_tx_ring - Free Tx Buffers
1764 * @adapter: board private structure
1766 static void e1000_clean_tx_ring(struct e1000_adapter
*adapter
)
1768 struct e1000_ring
*tx_ring
= adapter
->tx_ring
;
1769 struct e1000_buffer
*buffer_info
;
1773 for (i
= 0; i
< tx_ring
->count
; i
++) {
1774 buffer_info
= &tx_ring
->buffer_info
[i
];
1775 e1000_put_txbuf(adapter
, buffer_info
);
1778 size
= sizeof(struct e1000_buffer
) * tx_ring
->count
;
1779 memset(tx_ring
->buffer_info
, 0, size
);
1781 memset(tx_ring
->desc
, 0, tx_ring
->size
);
1783 tx_ring
->next_to_use
= 0;
1784 tx_ring
->next_to_clean
= 0;
1786 writel(0, adapter
->hw
.hw_addr
+ tx_ring
->head
);
1787 writel(0, adapter
->hw
.hw_addr
+ tx_ring
->tail
);
1791 * e1000e_free_tx_resources - Free Tx Resources per Queue
1792 * @adapter: board private structure
1794 * Free all transmit software resources
1796 void e1000e_free_tx_resources(struct e1000_adapter
*adapter
)
1798 struct pci_dev
*pdev
= adapter
->pdev
;
1799 struct e1000_ring
*tx_ring
= adapter
->tx_ring
;
1801 e1000_clean_tx_ring(adapter
);
1803 vfree(tx_ring
->buffer_info
);
1804 tx_ring
->buffer_info
= NULL
;
1806 dma_free_coherent(&pdev
->dev
, tx_ring
->size
, tx_ring
->desc
,
1808 tx_ring
->desc
= NULL
;
1812 * e1000e_free_rx_resources - Free Rx Resources
1813 * @adapter: board private structure
1815 * Free all receive software resources
1818 void e1000e_free_rx_resources(struct e1000_adapter
*adapter
)
1820 struct pci_dev
*pdev
= adapter
->pdev
;
1821 struct e1000_ring
*rx_ring
= adapter
->rx_ring
;
1824 e1000_clean_rx_ring(adapter
);
1826 for (i
= 0; i
< rx_ring
->count
; i
++) {
1827 kfree(rx_ring
->buffer_info
[i
].ps_pages
);
1830 vfree(rx_ring
->buffer_info
);
1831 rx_ring
->buffer_info
= NULL
;
1833 dma_free_coherent(&pdev
->dev
, rx_ring
->size
, rx_ring
->desc
,
1835 rx_ring
->desc
= NULL
;
1839 * e1000_update_itr - update the dynamic ITR value based on statistics
1840 * @adapter: pointer to adapter
1841 * @itr_setting: current adapter->itr
1842 * @packets: the number of packets during this measurement interval
1843 * @bytes: the number of bytes during this measurement interval
1845 * Stores a new ITR value based on packets and byte
1846 * counts during the last interrupt. The advantage of per interrupt
1847 * computation is faster updates and more accurate ITR for the current
1848 * traffic pattern. Constants in this function were computed
1849 * based on theoretical maximum wire speed and thresholds were set based
1850 * on testing data as well as attempting to minimize response time
1851 * while increasing bulk throughput. This functionality is controlled
1852 * by the InterruptThrottleRate module parameter.
1854 static unsigned int e1000_update_itr(struct e1000_adapter
*adapter
,
1855 u16 itr_setting
, int packets
,
1858 unsigned int retval
= itr_setting
;
1861 goto update_itr_done
;
1863 switch (itr_setting
) {
1864 case lowest_latency
:
1865 /* handle TSO and jumbo frames */
1866 if (bytes
/packets
> 8000)
1867 retval
= bulk_latency
;
1868 else if ((packets
< 5) && (bytes
> 512)) {
1869 retval
= low_latency
;
1872 case low_latency
: /* 50 usec aka 20000 ints/s */
1873 if (bytes
> 10000) {
1874 /* this if handles the TSO accounting */
1875 if (bytes
/packets
> 8000) {
1876 retval
= bulk_latency
;
1877 } else if ((packets
< 10) || ((bytes
/packets
) > 1200)) {
1878 retval
= bulk_latency
;
1879 } else if ((packets
> 35)) {
1880 retval
= lowest_latency
;
1882 } else if (bytes
/packets
> 2000) {
1883 retval
= bulk_latency
;
1884 } else if (packets
<= 2 && bytes
< 512) {
1885 retval
= lowest_latency
;
1888 case bulk_latency
: /* 250 usec aka 4000 ints/s */
1889 if (bytes
> 25000) {
1891 retval
= low_latency
;
1893 } else if (bytes
< 6000) {
1894 retval
= low_latency
;
1903 static void e1000_set_itr(struct e1000_adapter
*adapter
)
1905 struct e1000_hw
*hw
= &adapter
->hw
;
1907 u32 new_itr
= adapter
->itr
;
1909 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
1910 if (adapter
->link_speed
!= SPEED_1000
) {
1916 adapter
->tx_itr
= e1000_update_itr(adapter
,
1918 adapter
->total_tx_packets
,
1919 adapter
->total_tx_bytes
);
1920 /* conservative mode (itr 3) eliminates the lowest_latency setting */
1921 if (adapter
->itr_setting
== 3 && adapter
->tx_itr
== lowest_latency
)
1922 adapter
->tx_itr
= low_latency
;
1924 adapter
->rx_itr
= e1000_update_itr(adapter
,
1926 adapter
->total_rx_packets
,
1927 adapter
->total_rx_bytes
);
1928 /* conservative mode (itr 3) eliminates the lowest_latency setting */
1929 if (adapter
->itr_setting
== 3 && adapter
->rx_itr
== lowest_latency
)
1930 adapter
->rx_itr
= low_latency
;
1932 current_itr
= max(adapter
->rx_itr
, adapter
->tx_itr
);
1934 switch (current_itr
) {
1935 /* counts and packets in update_itr are dependent on these numbers */
1936 case lowest_latency
:
1940 new_itr
= 20000; /* aka hwitr = ~200 */
1950 if (new_itr
!= adapter
->itr
) {
1952 * this attempts to bias the interrupt rate towards Bulk
1953 * by adding intermediate steps when interrupt rate is
1956 new_itr
= new_itr
> adapter
->itr
?
1957 min(adapter
->itr
+ (new_itr
>> 2), new_itr
) :
1959 adapter
->itr
= new_itr
;
1960 adapter
->rx_ring
->itr_val
= new_itr
;
1961 if (adapter
->msix_entries
)
1962 adapter
->rx_ring
->set_itr
= 1;
1964 ew32(ITR
, 1000000000 / (new_itr
* 256));
1969 * e1000_alloc_queues - Allocate memory for all rings
1970 * @adapter: board private structure to initialize
1972 static int __devinit
e1000_alloc_queues(struct e1000_adapter
*adapter
)
1974 adapter
->tx_ring
= kzalloc(sizeof(struct e1000_ring
), GFP_KERNEL
);
1975 if (!adapter
->tx_ring
)
1978 adapter
->rx_ring
= kzalloc(sizeof(struct e1000_ring
), GFP_KERNEL
);
1979 if (!adapter
->rx_ring
)
1984 e_err("Unable to allocate memory for queues\n");
1985 kfree(adapter
->rx_ring
);
1986 kfree(adapter
->tx_ring
);
1991 * e1000_clean - NAPI Rx polling callback
1992 * @napi: struct associated with this polling callback
1993 * @budget: amount of packets driver is allowed to process this poll
1995 static int e1000_clean(struct napi_struct
*napi
, int budget
)
1997 struct e1000_adapter
*adapter
= container_of(napi
, struct e1000_adapter
, napi
);
1998 struct e1000_hw
*hw
= &adapter
->hw
;
1999 struct net_device
*poll_dev
= adapter
->netdev
;
2000 int tx_cleaned
= 0, work_done
= 0;
2002 adapter
= netdev_priv(poll_dev
);
2004 if (adapter
->msix_entries
&&
2005 !(adapter
->rx_ring
->ims_val
& adapter
->tx_ring
->ims_val
))
2008 tx_cleaned
= e1000_clean_tx_irq(adapter
);
2011 adapter
->clean_rx(adapter
, &work_done
, budget
);
2016 /* If budget not fully consumed, exit the polling mode */
2017 if (work_done
< budget
) {
2018 if (adapter
->itr_setting
& 3)
2019 e1000_set_itr(adapter
);
2020 napi_complete(napi
);
2021 if (adapter
->msix_entries
)
2022 ew32(IMS
, adapter
->rx_ring
->ims_val
);
2024 e1000_irq_enable(adapter
);
2030 static void e1000_vlan_rx_add_vid(struct net_device
*netdev
, u16 vid
)
2032 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
2033 struct e1000_hw
*hw
= &adapter
->hw
;
2036 /* don't update vlan cookie if already programmed */
2037 if ((adapter
->hw
.mng_cookie
.status
&
2038 E1000_MNG_DHCP_COOKIE_STATUS_VLAN
) &&
2039 (vid
== adapter
->mng_vlan_id
))
2041 /* add VID to filter table */
2042 index
= (vid
>> 5) & 0x7F;
2043 vfta
= E1000_READ_REG_ARRAY(hw
, E1000_VFTA
, index
);
2044 vfta
|= (1 << (vid
& 0x1F));
2045 e1000e_write_vfta(hw
, index
, vfta
);
2048 static void e1000_vlan_rx_kill_vid(struct net_device
*netdev
, u16 vid
)
2050 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
2051 struct e1000_hw
*hw
= &adapter
->hw
;
2054 if (!test_bit(__E1000_DOWN
, &adapter
->state
))
2055 e1000_irq_disable(adapter
);
2056 vlan_group_set_device(adapter
->vlgrp
, vid
, NULL
);
2058 if (!test_bit(__E1000_DOWN
, &adapter
->state
))
2059 e1000_irq_enable(adapter
);
2061 if ((adapter
->hw
.mng_cookie
.status
&
2062 E1000_MNG_DHCP_COOKIE_STATUS_VLAN
) &&
2063 (vid
== adapter
->mng_vlan_id
)) {
2064 /* release control to f/w */
2065 e1000_release_hw_control(adapter
);
2069 /* remove VID from filter table */
2070 index
= (vid
>> 5) & 0x7F;
2071 vfta
= E1000_READ_REG_ARRAY(hw
, E1000_VFTA
, index
);
2072 vfta
&= ~(1 << (vid
& 0x1F));
2073 e1000e_write_vfta(hw
, index
, vfta
);
2076 static void e1000_update_mng_vlan(struct e1000_adapter
*adapter
)
2078 struct net_device
*netdev
= adapter
->netdev
;
2079 u16 vid
= adapter
->hw
.mng_cookie
.vlan_id
;
2080 u16 old_vid
= adapter
->mng_vlan_id
;
2082 if (!adapter
->vlgrp
)
2085 if (!vlan_group_get_device(adapter
->vlgrp
, vid
)) {
2086 adapter
->mng_vlan_id
= E1000_MNG_VLAN_NONE
;
2087 if (adapter
->hw
.mng_cookie
.status
&
2088 E1000_MNG_DHCP_COOKIE_STATUS_VLAN
) {
2089 e1000_vlan_rx_add_vid(netdev
, vid
);
2090 adapter
->mng_vlan_id
= vid
;
2093 if ((old_vid
!= (u16
)E1000_MNG_VLAN_NONE
) &&
2095 !vlan_group_get_device(adapter
->vlgrp
, old_vid
))
2096 e1000_vlan_rx_kill_vid(netdev
, old_vid
);
2098 adapter
->mng_vlan_id
= vid
;
2103 static void e1000_vlan_rx_register(struct net_device
*netdev
,
2104 struct vlan_group
*grp
)
2106 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
2107 struct e1000_hw
*hw
= &adapter
->hw
;
2110 if (!test_bit(__E1000_DOWN
, &adapter
->state
))
2111 e1000_irq_disable(adapter
);
2112 adapter
->vlgrp
= grp
;
2115 /* enable VLAN tag insert/strip */
2117 ctrl
|= E1000_CTRL_VME
;
2120 if (adapter
->flags
& FLAG_HAS_HW_VLAN_FILTER
) {
2121 /* enable VLAN receive filtering */
2123 rctl
&= ~E1000_RCTL_CFIEN
;
2125 e1000_update_mng_vlan(adapter
);
2128 /* disable VLAN tag insert/strip */
2130 ctrl
&= ~E1000_CTRL_VME
;
2133 if (adapter
->flags
& FLAG_HAS_HW_VLAN_FILTER
) {
2134 if (adapter
->mng_vlan_id
!=
2135 (u16
)E1000_MNG_VLAN_NONE
) {
2136 e1000_vlan_rx_kill_vid(netdev
,
2137 adapter
->mng_vlan_id
);
2138 adapter
->mng_vlan_id
= E1000_MNG_VLAN_NONE
;
2143 if (!test_bit(__E1000_DOWN
, &adapter
->state
))
2144 e1000_irq_enable(adapter
);
2147 static void e1000_restore_vlan(struct e1000_adapter
*adapter
)
2151 e1000_vlan_rx_register(adapter
->netdev
, adapter
->vlgrp
);
2153 if (!adapter
->vlgrp
)
2156 for (vid
= 0; vid
< VLAN_GROUP_ARRAY_LEN
; vid
++) {
2157 if (!vlan_group_get_device(adapter
->vlgrp
, vid
))
2159 e1000_vlan_rx_add_vid(adapter
->netdev
, vid
);
2163 static void e1000_init_manageability(struct e1000_adapter
*adapter
)
2165 struct e1000_hw
*hw
= &adapter
->hw
;
2168 if (!(adapter
->flags
& FLAG_MNG_PT_ENABLED
))
2174 * enable receiving management packets to the host. this will probably
2175 * generate destination unreachable messages from the host OS, but
2176 * the packets will be handled on SMBUS
2178 manc
|= E1000_MANC_EN_MNG2HOST
;
2179 manc2h
= er32(MANC2H
);
2180 #define E1000_MNG2HOST_PORT_623 (1 << 5)
2181 #define E1000_MNG2HOST_PORT_664 (1 << 6)
2182 manc2h
|= E1000_MNG2HOST_PORT_623
;
2183 manc2h
|= E1000_MNG2HOST_PORT_664
;
2184 ew32(MANC2H
, manc2h
);
2189 * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
2190 * @adapter: board private structure
2192 * Configure the Tx unit of the MAC after a reset.
2194 static void e1000_configure_tx(struct e1000_adapter
*adapter
)
2196 struct e1000_hw
*hw
= &adapter
->hw
;
2197 struct e1000_ring
*tx_ring
= adapter
->tx_ring
;
2199 u32 tdlen
, tctl
, tipg
, tarc
;
2202 /* Setup the HW Tx Head and Tail descriptor pointers */
2203 tdba
= tx_ring
->dma
;
2204 tdlen
= tx_ring
->count
* sizeof(struct e1000_tx_desc
);
2205 ew32(TDBAL
, (tdba
& DMA_32BIT_MASK
));
2206 ew32(TDBAH
, (tdba
>> 32));
2210 tx_ring
->head
= E1000_TDH
;
2211 tx_ring
->tail
= E1000_TDT
;
2213 /* Set the default values for the Tx Inter Packet Gap timer */
2214 tipg
= DEFAULT_82543_TIPG_IPGT_COPPER
; /* 8 */
2215 ipgr1
= DEFAULT_82543_TIPG_IPGR1
; /* 8 */
2216 ipgr2
= DEFAULT_82543_TIPG_IPGR2
; /* 6 */
2218 if (adapter
->flags
& FLAG_TIPG_MEDIUM_FOR_80003ESLAN
)
2219 ipgr2
= DEFAULT_80003ES2LAN_TIPG_IPGR2
; /* 7 */
2221 tipg
|= ipgr1
<< E1000_TIPG_IPGR1_SHIFT
;
2222 tipg
|= ipgr2
<< E1000_TIPG_IPGR2_SHIFT
;
2225 /* Set the Tx Interrupt Delay register */
2226 ew32(TIDV
, adapter
->tx_int_delay
);
2227 /* Tx irq moderation */
2228 ew32(TADV
, adapter
->tx_abs_int_delay
);
2230 /* Program the Transmit Control Register */
2232 tctl
&= ~E1000_TCTL_CT
;
2233 tctl
|= E1000_TCTL_PSP
| E1000_TCTL_RTLC
|
2234 (E1000_COLLISION_THRESHOLD
<< E1000_CT_SHIFT
);
2236 if (adapter
->flags
& FLAG_TARC_SPEED_MODE_BIT
) {
2237 tarc
= er32(TARC(0));
2239 * set the speed mode bit, we'll clear it if we're not at
2240 * gigabit link later
2242 #define SPEED_MODE_BIT (1 << 21)
2243 tarc
|= SPEED_MODE_BIT
;
2244 ew32(TARC(0), tarc
);
2247 /* errata: program both queues to unweighted RR */
2248 if (adapter
->flags
& FLAG_TARC_SET_BIT_ZERO
) {
2249 tarc
= er32(TARC(0));
2251 ew32(TARC(0), tarc
);
2252 tarc
= er32(TARC(1));
2254 ew32(TARC(1), tarc
);
2257 e1000e_config_collision_dist(hw
);
2259 /* Setup Transmit Descriptor Settings for eop descriptor */
2260 adapter
->txd_cmd
= E1000_TXD_CMD_EOP
| E1000_TXD_CMD_IFCS
;
2262 /* only set IDE if we are delaying interrupts using the timers */
2263 if (adapter
->tx_int_delay
)
2264 adapter
->txd_cmd
|= E1000_TXD_CMD_IDE
;
2266 /* enable Report Status bit */
2267 adapter
->txd_cmd
|= E1000_TXD_CMD_RS
;
2271 adapter
->tx_queue_len
= adapter
->netdev
->tx_queue_len
;
2275 * e1000_setup_rctl - configure the receive control registers
2276 * @adapter: Board private structure
2278 #define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
2279 (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
2280 static void e1000_setup_rctl(struct e1000_adapter
*adapter
)
2282 struct e1000_hw
*hw
= &adapter
->hw
;
2287 /* Program MC offset vector base */
2289 rctl
&= ~(3 << E1000_RCTL_MO_SHIFT
);
2290 rctl
|= E1000_RCTL_EN
| E1000_RCTL_BAM
|
2291 E1000_RCTL_LBM_NO
| E1000_RCTL_RDMTS_HALF
|
2292 (adapter
->hw
.mac
.mc_filter_type
<< E1000_RCTL_MO_SHIFT
);
2294 /* Do not Store bad packets */
2295 rctl
&= ~E1000_RCTL_SBP
;
2297 /* Enable Long Packet receive */
2298 if (adapter
->netdev
->mtu
<= ETH_DATA_LEN
)
2299 rctl
&= ~E1000_RCTL_LPE
;
2301 rctl
|= E1000_RCTL_LPE
;
2303 /* Some systems expect that the CRC is included in SMBUS traffic. The
2304 * hardware strips the CRC before sending to both SMBUS (BMC) and to
2305 * host memory when this is enabled
2307 if (adapter
->flags2
& FLAG2_CRC_STRIPPING
)
2308 rctl
|= E1000_RCTL_SECRC
;
2310 /* Setup buffer sizes */
2311 rctl
&= ~E1000_RCTL_SZ_4096
;
2312 rctl
|= E1000_RCTL_BSEX
;
2313 switch (adapter
->rx_buffer_len
) {
2315 rctl
|= E1000_RCTL_SZ_256
;
2316 rctl
&= ~E1000_RCTL_BSEX
;
2319 rctl
|= E1000_RCTL_SZ_512
;
2320 rctl
&= ~E1000_RCTL_BSEX
;
2323 rctl
|= E1000_RCTL_SZ_1024
;
2324 rctl
&= ~E1000_RCTL_BSEX
;
2328 rctl
|= E1000_RCTL_SZ_2048
;
2329 rctl
&= ~E1000_RCTL_BSEX
;
2332 rctl
|= E1000_RCTL_SZ_4096
;
2335 rctl
|= E1000_RCTL_SZ_8192
;
2338 rctl
|= E1000_RCTL_SZ_16384
;
2343 * 82571 and greater support packet-split where the protocol
2344 * header is placed in skb->data and the packet data is
2345 * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
2346 * In the case of a non-split, skb->data is linearly filled,
2347 * followed by the page buffers. Therefore, skb->data is
2348 * sized to hold the largest protocol header.
2350 * allocations using alloc_page take too long for regular MTU
2351 * so only enable packet split for jumbo frames
2353 * Using pages when the page size is greater than 16k wastes
2354 * a lot of memory, since we allocate 3 pages at all times
2357 pages
= PAGE_USE_COUNT(adapter
->netdev
->mtu
);
2358 if (!(adapter
->flags
& FLAG_IS_ICH
) && (pages
<= 3) &&
2359 (PAGE_SIZE
<= 16384) && (rctl
& E1000_RCTL_LPE
))
2360 adapter
->rx_ps_pages
= pages
;
2362 adapter
->rx_ps_pages
= 0;
2364 if (adapter
->rx_ps_pages
) {
2365 /* Configure extra packet-split registers */
2366 rfctl
= er32(RFCTL
);
2367 rfctl
|= E1000_RFCTL_EXTEN
;
2369 * disable packet split support for IPv6 extension headers,
2370 * because some malformed IPv6 headers can hang the Rx
2372 rfctl
|= (E1000_RFCTL_IPV6_EX_DIS
|
2373 E1000_RFCTL_NEW_IPV6_EXT_DIS
);
2377 /* Enable Packet split descriptors */
2378 rctl
|= E1000_RCTL_DTYP_PS
;
2380 psrctl
|= adapter
->rx_ps_bsize0
>>
2381 E1000_PSRCTL_BSIZE0_SHIFT
;
2383 switch (adapter
->rx_ps_pages
) {
2385 psrctl
|= PAGE_SIZE
<<
2386 E1000_PSRCTL_BSIZE3_SHIFT
;
2388 psrctl
|= PAGE_SIZE
<<
2389 E1000_PSRCTL_BSIZE2_SHIFT
;
2391 psrctl
|= PAGE_SIZE
>>
2392 E1000_PSRCTL_BSIZE1_SHIFT
;
2396 ew32(PSRCTL
, psrctl
);
2400 /* just started the receive unit, no need to restart */
2401 adapter
->flags
&= ~FLAG_RX_RESTART_NOW
;
2405 * e1000_configure_rx - Configure Receive Unit after Reset
2406 * @adapter: board private structure
2408 * Configure the Rx unit of the MAC after a reset.
2410 static void e1000_configure_rx(struct e1000_adapter
*adapter
)
2412 struct e1000_hw
*hw
= &adapter
->hw
;
2413 struct e1000_ring
*rx_ring
= adapter
->rx_ring
;
2415 u32 rdlen
, rctl
, rxcsum
, ctrl_ext
;
2417 if (adapter
->rx_ps_pages
) {
2418 /* this is a 32 byte descriptor */
2419 rdlen
= rx_ring
->count
*
2420 sizeof(union e1000_rx_desc_packet_split
);
2421 adapter
->clean_rx
= e1000_clean_rx_irq_ps
;
2422 adapter
->alloc_rx_buf
= e1000_alloc_rx_buffers_ps
;
2423 } else if (adapter
->netdev
->mtu
> ETH_FRAME_LEN
+ ETH_FCS_LEN
) {
2424 rdlen
= rx_ring
->count
* sizeof(struct e1000_rx_desc
);
2425 adapter
->clean_rx
= e1000_clean_jumbo_rx_irq
;
2426 adapter
->alloc_rx_buf
= e1000_alloc_jumbo_rx_buffers
;
2428 rdlen
= rx_ring
->count
* sizeof(struct e1000_rx_desc
);
2429 adapter
->clean_rx
= e1000_clean_rx_irq
;
2430 adapter
->alloc_rx_buf
= e1000_alloc_rx_buffers
;
2433 /* disable receives while setting up the descriptors */
2435 ew32(RCTL
, rctl
& ~E1000_RCTL_EN
);
2439 /* set the Receive Delay Timer Register */
2440 ew32(RDTR
, adapter
->rx_int_delay
);
2442 /* irq moderation */
2443 ew32(RADV
, adapter
->rx_abs_int_delay
);
2444 if (adapter
->itr_setting
!= 0)
2445 ew32(ITR
, 1000000000 / (adapter
->itr
* 256));
2447 ctrl_ext
= er32(CTRL_EXT
);
2448 /* Reset delay timers after every interrupt */
2449 ctrl_ext
|= E1000_CTRL_EXT_INT_TIMER_CLR
;
2450 /* Auto-Mask interrupts upon ICR access */
2451 ctrl_ext
|= E1000_CTRL_EXT_IAME
;
2452 ew32(IAM
, 0xffffffff);
2453 ew32(CTRL_EXT
, ctrl_ext
);
2457 * Setup the HW Rx Head and Tail Descriptor Pointers and
2458 * the Base and Length of the Rx Descriptor Ring
2460 rdba
= rx_ring
->dma
;
2461 ew32(RDBAL
, (rdba
& DMA_32BIT_MASK
));
2462 ew32(RDBAH
, (rdba
>> 32));
2466 rx_ring
->head
= E1000_RDH
;
2467 rx_ring
->tail
= E1000_RDT
;
2469 /* Enable Receive Checksum Offload for TCP and UDP */
2470 rxcsum
= er32(RXCSUM
);
2471 if (adapter
->flags
& FLAG_RX_CSUM_ENABLED
) {
2472 rxcsum
|= E1000_RXCSUM_TUOFL
;
2475 * IPv4 payload checksum for UDP fragments must be
2476 * used in conjunction with packet-split.
2478 if (adapter
->rx_ps_pages
)
2479 rxcsum
|= E1000_RXCSUM_IPPCSE
;
2481 rxcsum
&= ~E1000_RXCSUM_TUOFL
;
2482 /* no need to clear IPPCSE as it defaults to 0 */
2484 ew32(RXCSUM
, rxcsum
);
2487 * Enable early receives on supported devices, only takes effect when
2488 * packet size is equal or larger than the specified value (in 8 byte
2489 * units), e.g. using jumbo frames when setting to E1000_ERT_2048
2491 if ((adapter
->flags
& FLAG_HAS_ERT
) &&
2492 (adapter
->netdev
->mtu
> ETH_DATA_LEN
)) {
2493 u32 rxdctl
= er32(RXDCTL(0));
2494 ew32(RXDCTL(0), rxdctl
| 0x3);
2495 ew32(ERT
, E1000_ERT_2048
| (1 << 13));
2497 * With jumbo frames and early-receive enabled, excessive
2498 * C4->C2 latencies result in dropped transactions.
2500 pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY
,
2501 e1000e_driver_name
, 55);
2503 pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY
,
2505 PM_QOS_DEFAULT_VALUE
);
2508 /* Enable Receives */
2513 * e1000_update_mc_addr_list - Update Multicast addresses
2514 * @hw: pointer to the HW structure
2515 * @mc_addr_list: array of multicast addresses to program
2516 * @mc_addr_count: number of multicast addresses to program
2517 * @rar_used_count: the first RAR register free to program
2518 * @rar_count: total number of supported Receive Address Registers
2520 * Updates the Receive Address Registers and Multicast Table Array.
2521 * The caller must have a packed mc_addr_list of multicast addresses.
2522 * The parameter rar_count will usually be hw->mac.rar_entry_count
2523 * unless there are workarounds that change this. Currently no func pointer
2524 * exists and all implementations are handled in the generic version of this
2527 static void e1000_update_mc_addr_list(struct e1000_hw
*hw
, u8
*mc_addr_list
,
2528 u32 mc_addr_count
, u32 rar_used_count
,
2531 hw
->mac
.ops
.update_mc_addr_list(hw
, mc_addr_list
, mc_addr_count
,
2532 rar_used_count
, rar_count
);
2536 * e1000_set_multi - Multicast and Promiscuous mode set
2537 * @netdev: network interface device structure
2539 * The set_multi entry point is called whenever the multicast address
2540 * list or the network interface flags are updated. This routine is
2541 * responsible for configuring the hardware for proper multicast,
2542 * promiscuous mode, and all-multi behavior.
2544 static void e1000_set_multi(struct net_device
*netdev
)
2546 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
2547 struct e1000_hw
*hw
= &adapter
->hw
;
2548 struct e1000_mac_info
*mac
= &hw
->mac
;
2549 struct dev_mc_list
*mc_ptr
;
2554 /* Check for Promiscuous and All Multicast modes */
2558 if (netdev
->flags
& IFF_PROMISC
) {
2559 rctl
|= (E1000_RCTL_UPE
| E1000_RCTL_MPE
);
2560 rctl
&= ~E1000_RCTL_VFE
;
2562 if (netdev
->flags
& IFF_ALLMULTI
) {
2563 rctl
|= E1000_RCTL_MPE
;
2564 rctl
&= ~E1000_RCTL_UPE
;
2566 rctl
&= ~(E1000_RCTL_UPE
| E1000_RCTL_MPE
);
2568 if (adapter
->flags
& FLAG_HAS_HW_VLAN_FILTER
)
2569 rctl
|= E1000_RCTL_VFE
;
2574 if (netdev
->mc_count
) {
2575 mta_list
= kmalloc(netdev
->mc_count
* 6, GFP_ATOMIC
);
2579 /* prepare a packed array of only addresses. */
2580 mc_ptr
= netdev
->mc_list
;
2582 for (i
= 0; i
< netdev
->mc_count
; i
++) {
2585 memcpy(mta_list
+ (i
*ETH_ALEN
), mc_ptr
->dmi_addr
,
2587 mc_ptr
= mc_ptr
->next
;
2590 e1000_update_mc_addr_list(hw
, mta_list
, i
, 1,
2591 mac
->rar_entry_count
);
2595 * if we're called from probe, we might not have
2596 * anything to do here, so clear out the list
2598 e1000_update_mc_addr_list(hw
, NULL
, 0, 1, mac
->rar_entry_count
);
2603 * e1000_configure - configure the hardware for Rx and Tx
2604 * @adapter: private board structure
2606 static void e1000_configure(struct e1000_adapter
*adapter
)
2608 e1000_set_multi(adapter
->netdev
);
2610 e1000_restore_vlan(adapter
);
2611 e1000_init_manageability(adapter
);
2613 e1000_configure_tx(adapter
);
2614 e1000_setup_rctl(adapter
);
2615 e1000_configure_rx(adapter
);
2616 adapter
->alloc_rx_buf(adapter
, e1000_desc_unused(adapter
->rx_ring
));
2620 * e1000e_power_up_phy - restore link in case the phy was powered down
2621 * @adapter: address of board private structure
2623 * The phy may be powered down to save power and turn off link when the
2624 * driver is unloaded and wake on lan is not enabled (among others)
2625 * *** this routine MUST be followed by a call to e1000e_reset ***
2627 void e1000e_power_up_phy(struct e1000_adapter
*adapter
)
2631 /* Just clear the power down bit to wake the phy back up */
2632 if (adapter
->hw
.phy
.media_type
== e1000_media_type_copper
) {
2634 * According to the manual, the phy will retain its
2635 * settings across a power-down/up cycle
2637 e1e_rphy(&adapter
->hw
, PHY_CONTROL
, &mii_reg
);
2638 mii_reg
&= ~MII_CR_POWER_DOWN
;
2639 e1e_wphy(&adapter
->hw
, PHY_CONTROL
, mii_reg
);
2642 adapter
->hw
.mac
.ops
.setup_link(&adapter
->hw
);
2646 * e1000_power_down_phy - Power down the PHY
2648 * Power down the PHY so no link is implied when interface is down
2649 * The PHY cannot be powered down is management or WoL is active
2651 static void e1000_power_down_phy(struct e1000_adapter
*adapter
)
2653 struct e1000_hw
*hw
= &adapter
->hw
;
2656 /* WoL is enabled */
2660 /* non-copper PHY? */
2661 if (adapter
->hw
.phy
.media_type
!= e1000_media_type_copper
)
2664 /* reset is blocked because of a SoL/IDER session */
2665 if (e1000e_check_mng_mode(hw
) || e1000_check_reset_block(hw
))
2668 /* manageability (AMT) is enabled */
2669 if (er32(MANC
) & E1000_MANC_SMBUS_EN
)
2672 /* power down the PHY */
2673 e1e_rphy(hw
, PHY_CONTROL
, &mii_reg
);
2674 mii_reg
|= MII_CR_POWER_DOWN
;
2675 e1e_wphy(hw
, PHY_CONTROL
, mii_reg
);
2680 * e1000e_reset - bring the hardware into a known good state
2682 * This function boots the hardware and enables some settings that
2683 * require a configuration cycle of the hardware - those cannot be
2684 * set/changed during runtime. After reset the device needs to be
2685 * properly configured for Rx, Tx etc.
2687 void e1000e_reset(struct e1000_adapter
*adapter
)
2689 struct e1000_mac_info
*mac
= &adapter
->hw
.mac
;
2690 struct e1000_fc_info
*fc
= &adapter
->hw
.fc
;
2691 struct e1000_hw
*hw
= &adapter
->hw
;
2692 u32 tx_space
, min_tx_space
, min_rx_space
;
2693 u32 pba
= adapter
->pba
;
2696 /* reset Packet Buffer Allocation to default */
2699 if (adapter
->max_frame_size
> ETH_FRAME_LEN
+ ETH_FCS_LEN
) {
2701 * To maintain wire speed transmits, the Tx FIFO should be
2702 * large enough to accommodate two full transmit packets,
2703 * rounded up to the next 1KB and expressed in KB. Likewise,
2704 * the Rx FIFO should be large enough to accommodate at least
2705 * one full receive packet and is similarly rounded up and
2709 /* upper 16 bits has Tx packet buffer allocation size in KB */
2710 tx_space
= pba
>> 16;
2711 /* lower 16 bits has Rx packet buffer allocation size in KB */
2714 * the Tx fifo also stores 16 bytes of information about the tx
2715 * but don't include ethernet FCS because hardware appends it
2717 min_tx_space
= (adapter
->max_frame_size
+
2718 sizeof(struct e1000_tx_desc
) -
2720 min_tx_space
= ALIGN(min_tx_space
, 1024);
2721 min_tx_space
>>= 10;
2722 /* software strips receive CRC, so leave room for it */
2723 min_rx_space
= adapter
->max_frame_size
;
2724 min_rx_space
= ALIGN(min_rx_space
, 1024);
2725 min_rx_space
>>= 10;
2728 * If current Tx allocation is less than the min Tx FIFO size,
2729 * and the min Tx FIFO size is less than the current Rx FIFO
2730 * allocation, take space away from current Rx allocation
2732 if ((tx_space
< min_tx_space
) &&
2733 ((min_tx_space
- tx_space
) < pba
)) {
2734 pba
-= min_tx_space
- tx_space
;
2737 * if short on Rx space, Rx wins and must trump tx
2738 * adjustment or use Early Receive if available
2740 if ((pba
< min_rx_space
) &&
2741 (!(adapter
->flags
& FLAG_HAS_ERT
)))
2742 /* ERT enabled in e1000_configure_rx */
2751 * flow control settings
2753 * The high water mark must be low enough to fit one full frame
2754 * (or the size used for early receive) above it in the Rx FIFO.
2755 * Set it to the lower of:
2756 * - 90% of the Rx FIFO size, and
2757 * - the full Rx FIFO size minus the early receive size (for parts
2758 * with ERT support assuming ERT set to E1000_ERT_2048), or
2759 * - the full Rx FIFO size minus one full frame
2761 if (adapter
->flags
& FLAG_HAS_ERT
)
2762 hwm
= min(((pba
<< 10) * 9 / 10),
2763 ((pba
<< 10) - (E1000_ERT_2048
<< 3)));
2765 hwm
= min(((pba
<< 10) * 9 / 10),
2766 ((pba
<< 10) - adapter
->max_frame_size
));
2768 fc
->high_water
= hwm
& 0xFFF8; /* 8-byte granularity */
2769 fc
->low_water
= fc
->high_water
- 8;
2771 if (adapter
->flags
& FLAG_DISABLE_FC_PAUSE_TIME
)
2772 fc
->pause_time
= 0xFFFF;
2774 fc
->pause_time
= E1000_FC_PAUSE_TIME
;
2776 fc
->current_mode
= fc
->requested_mode
;
2778 /* Allow time for pending master requests to run */
2779 mac
->ops
.reset_hw(hw
);
2782 * For parts with AMT enabled, let the firmware know
2783 * that the network interface is in control
2785 if (adapter
->flags
& FLAG_HAS_AMT
)
2786 e1000_get_hw_control(adapter
);
2790 if (mac
->ops
.init_hw(hw
))
2791 e_err("Hardware Error\n");
2793 e1000_update_mng_vlan(adapter
);
2795 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
2796 ew32(VET
, ETH_P_8021Q
);
2798 e1000e_reset_adaptive(hw
);
2799 e1000_get_phy_info(hw
);
2801 if (!(adapter
->flags
& FLAG_SMART_POWER_DOWN
)) {
2804 * speed up time to link by disabling smart power down, ignore
2805 * the return value of this function because there is nothing
2806 * different we would do if it failed
2808 e1e_rphy(hw
, IGP02E1000_PHY_POWER_MGMT
, &phy_data
);
2809 phy_data
&= ~IGP02E1000_PM_SPD
;
2810 e1e_wphy(hw
, IGP02E1000_PHY_POWER_MGMT
, phy_data
);
2814 int e1000e_up(struct e1000_adapter
*adapter
)
2816 struct e1000_hw
*hw
= &adapter
->hw
;
2818 /* hardware has been reset, we need to reload some things */
2819 e1000_configure(adapter
);
2821 clear_bit(__E1000_DOWN
, &adapter
->state
);
2823 napi_enable(&adapter
->napi
);
2824 if (adapter
->msix_entries
)
2825 e1000_configure_msix(adapter
);
2826 e1000_irq_enable(adapter
);
2828 /* fire a link change interrupt to start the watchdog */
2829 ew32(ICS
, E1000_ICS_LSC
);
2833 void e1000e_down(struct e1000_adapter
*adapter
)
2835 struct net_device
*netdev
= adapter
->netdev
;
2836 struct e1000_hw
*hw
= &adapter
->hw
;
2840 * signal that we're down so the interrupt handler does not
2841 * reschedule our watchdog timer
2843 set_bit(__E1000_DOWN
, &adapter
->state
);
2845 /* disable receives in the hardware */
2847 ew32(RCTL
, rctl
& ~E1000_RCTL_EN
);
2848 /* flush and sleep below */
2850 netif_tx_stop_all_queues(netdev
);
2852 /* disable transmits in the hardware */
2854 tctl
&= ~E1000_TCTL_EN
;
2856 /* flush both disables and wait for them to finish */
2860 napi_disable(&adapter
->napi
);
2861 e1000_irq_disable(adapter
);
2863 del_timer_sync(&adapter
->watchdog_timer
);
2864 del_timer_sync(&adapter
->phy_info_timer
);
2866 netdev
->tx_queue_len
= adapter
->tx_queue_len
;
2867 netif_carrier_off(netdev
);
2868 adapter
->link_speed
= 0;
2869 adapter
->link_duplex
= 0;
2871 if (!pci_channel_offline(adapter
->pdev
))
2872 e1000e_reset(adapter
);
2873 e1000_clean_tx_ring(adapter
);
2874 e1000_clean_rx_ring(adapter
);
2877 * TODO: for power management, we could drop the link and
2878 * pci_disable_device here.
2882 void e1000e_reinit_locked(struct e1000_adapter
*adapter
)
2885 while (test_and_set_bit(__E1000_RESETTING
, &adapter
->state
))
2887 e1000e_down(adapter
);
2889 clear_bit(__E1000_RESETTING
, &adapter
->state
);
2893 * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
2894 * @adapter: board private structure to initialize
2896 * e1000_sw_init initializes the Adapter private data structure.
2897 * Fields are initialized based on PCI device information and
2898 * OS network device settings (MTU size).
2900 static int __devinit
e1000_sw_init(struct e1000_adapter
*adapter
)
2902 struct net_device
*netdev
= adapter
->netdev
;
2904 adapter
->rx_buffer_len
= ETH_FRAME_LEN
+ VLAN_HLEN
+ ETH_FCS_LEN
;
2905 adapter
->rx_ps_bsize0
= 128;
2906 adapter
->max_frame_size
= netdev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
;
2907 adapter
->min_frame_size
= ETH_ZLEN
+ ETH_FCS_LEN
;
2909 e1000e_set_interrupt_capability(adapter
);
2911 if (e1000_alloc_queues(adapter
))
2914 /* Explicitly disable IRQ since the NIC can be in any state. */
2915 e1000_irq_disable(adapter
);
2917 set_bit(__E1000_DOWN
, &adapter
->state
);
2922 * e1000_intr_msi_test - Interrupt Handler
2923 * @irq: interrupt number
2924 * @data: pointer to a network interface device structure
2926 static irqreturn_t
e1000_intr_msi_test(int irq
, void *data
)
2928 struct net_device
*netdev
= data
;
2929 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
2930 struct e1000_hw
*hw
= &adapter
->hw
;
2931 u32 icr
= er32(ICR
);
2933 e_dbg("%s: icr is %08X\n", netdev
->name
, icr
);
2934 if (icr
& E1000_ICR_RXSEQ
) {
2935 adapter
->flags
&= ~FLAG_MSI_TEST_FAILED
;
2943 * e1000_test_msi_interrupt - Returns 0 for successful test
2944 * @adapter: board private struct
2946 * code flow taken from tg3.c
2948 static int e1000_test_msi_interrupt(struct e1000_adapter
*adapter
)
2950 struct net_device
*netdev
= adapter
->netdev
;
2951 struct e1000_hw
*hw
= &adapter
->hw
;
2954 /* poll_enable hasn't been called yet, so don't need disable */
2955 /* clear any pending events */
2958 /* free the real vector and request a test handler */
2959 e1000_free_irq(adapter
);
2960 e1000e_reset_interrupt_capability(adapter
);
2962 /* Assume that the test fails, if it succeeds then the test
2963 * MSI irq handler will unset this flag */
2964 adapter
->flags
|= FLAG_MSI_TEST_FAILED
;
2966 err
= pci_enable_msi(adapter
->pdev
);
2968 goto msi_test_failed
;
2970 err
= request_irq(adapter
->pdev
->irq
, &e1000_intr_msi_test
, 0,
2971 netdev
->name
, netdev
);
2973 pci_disable_msi(adapter
->pdev
);
2974 goto msi_test_failed
;
2979 e1000_irq_enable(adapter
);
2981 /* fire an unusual interrupt on the test handler */
2982 ew32(ICS
, E1000_ICS_RXSEQ
);
2986 e1000_irq_disable(adapter
);
2990 if (adapter
->flags
& FLAG_MSI_TEST_FAILED
) {
2991 adapter
->int_mode
= E1000E_INT_MODE_LEGACY
;
2993 e_info("MSI interrupt test failed!\n");
2996 free_irq(adapter
->pdev
->irq
, netdev
);
2997 pci_disable_msi(adapter
->pdev
);
3000 goto msi_test_failed
;
3002 /* okay so the test worked, restore settings */
3003 e_dbg("%s: MSI interrupt test succeeded!\n", netdev
->name
);
3005 e1000e_set_interrupt_capability(adapter
);
3006 e1000_request_irq(adapter
);
3011 * e1000_test_msi - Returns 0 if MSI test succeeds or INTx mode is restored
3012 * @adapter: board private struct
3014 * code flow taken from tg3.c, called with e1000 interrupts disabled.
3016 static int e1000_test_msi(struct e1000_adapter
*adapter
)
3021 if (!(adapter
->flags
& FLAG_MSI_ENABLED
))
3024 /* disable SERR in case the MSI write causes a master abort */
3025 pci_read_config_word(adapter
->pdev
, PCI_COMMAND
, &pci_cmd
);
3026 pci_write_config_word(adapter
->pdev
, PCI_COMMAND
,
3027 pci_cmd
& ~PCI_COMMAND_SERR
);
3029 err
= e1000_test_msi_interrupt(adapter
);
3031 /* restore previous setting of command word */
3032 pci_write_config_word(adapter
->pdev
, PCI_COMMAND
, pci_cmd
);
3038 /* EIO means MSI test failed */
3042 /* back to INTx mode */
3043 e_warn("MSI interrupt test failed, using legacy interrupt.\n");
3045 e1000_free_irq(adapter
);
3047 err
= e1000_request_irq(adapter
);
3053 * e1000_open - Called when a network interface is made active
3054 * @netdev: network interface device structure
3056 * Returns 0 on success, negative value on failure
3058 * The open entry point is called when a network interface is made
3059 * active by the system (IFF_UP). At this point all resources needed
3060 * for transmit and receive operations are allocated, the interrupt
3061 * handler is registered with the OS, the watchdog timer is started,
3062 * and the stack is notified that the interface is ready.
3064 static int e1000_open(struct net_device
*netdev
)
3066 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
3067 struct e1000_hw
*hw
= &adapter
->hw
;
3070 /* disallow open during test */
3071 if (test_bit(__E1000_TESTING
, &adapter
->state
))
3074 /* allocate transmit descriptors */
3075 err
= e1000e_setup_tx_resources(adapter
);
3079 /* allocate receive descriptors */
3080 err
= e1000e_setup_rx_resources(adapter
);
3084 e1000e_power_up_phy(adapter
);
3086 adapter
->mng_vlan_id
= E1000_MNG_VLAN_NONE
;
3087 if ((adapter
->hw
.mng_cookie
.status
&
3088 E1000_MNG_DHCP_COOKIE_STATUS_VLAN
))
3089 e1000_update_mng_vlan(adapter
);
3092 * If AMT is enabled, let the firmware know that the network
3093 * interface is now open
3095 if (adapter
->flags
& FLAG_HAS_AMT
)
3096 e1000_get_hw_control(adapter
);
3099 * before we allocate an interrupt, we must be ready to handle it.
3100 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
3101 * as soon as we call pci_request_irq, so we have to setup our
3102 * clean_rx handler before we do so.
3104 e1000_configure(adapter
);
3106 err
= e1000_request_irq(adapter
);
3111 * Work around PCIe errata with MSI interrupts causing some chipsets to
3112 * ignore e1000e MSI messages, which means we need to test our MSI
3115 if (adapter
->int_mode
!= E1000E_INT_MODE_LEGACY
) {
3116 err
= e1000_test_msi(adapter
);
3118 e_err("Interrupt allocation failed\n");
3123 /* From here on the code is the same as e1000e_up() */
3124 clear_bit(__E1000_DOWN
, &adapter
->state
);
3126 napi_enable(&adapter
->napi
);
3128 e1000_irq_enable(adapter
);
3130 netif_tx_start_all_queues(netdev
);
3132 /* fire a link status change interrupt to start the watchdog */
3133 ew32(ICS
, E1000_ICS_LSC
);
3138 e1000_release_hw_control(adapter
);
3139 e1000_power_down_phy(adapter
);
3140 e1000e_free_rx_resources(adapter
);
3142 e1000e_free_tx_resources(adapter
);
3144 e1000e_reset(adapter
);
3150 * e1000_close - Disables a network interface
3151 * @netdev: network interface device structure
3153 * Returns 0, this is not allowed to fail
3155 * The close entry point is called when an interface is de-activated
3156 * by the OS. The hardware is still under the drivers control, but
3157 * needs to be disabled. A global MAC reset is issued to stop the
3158 * hardware, and all transmit and receive resources are freed.
3160 static int e1000_close(struct net_device
*netdev
)
3162 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
3164 WARN_ON(test_bit(__E1000_RESETTING
, &adapter
->state
));
3165 e1000e_down(adapter
);
3166 e1000_power_down_phy(adapter
);
3167 e1000_free_irq(adapter
);
3169 e1000e_free_tx_resources(adapter
);
3170 e1000e_free_rx_resources(adapter
);
3173 * kill manageability vlan ID if supported, but not if a vlan with
3174 * the same ID is registered on the host OS (let 8021q kill it)
3176 if ((adapter
->hw
.mng_cookie
.status
&
3177 E1000_MNG_DHCP_COOKIE_STATUS_VLAN
) &&
3179 vlan_group_get_device(adapter
->vlgrp
, adapter
->mng_vlan_id
)))
3180 e1000_vlan_rx_kill_vid(netdev
, adapter
->mng_vlan_id
);
3183 * If AMT is enabled, let the firmware know that the network
3184 * interface is now closed
3186 if (adapter
->flags
& FLAG_HAS_AMT
)
3187 e1000_release_hw_control(adapter
);
3192 * e1000_set_mac - Change the Ethernet Address of the NIC
3193 * @netdev: network interface device structure
3194 * @p: pointer to an address structure
3196 * Returns 0 on success, negative on failure
3198 static int e1000_set_mac(struct net_device
*netdev
, void *p
)
3200 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
3201 struct sockaddr
*addr
= p
;
3203 if (!is_valid_ether_addr(addr
->sa_data
))
3204 return -EADDRNOTAVAIL
;
3206 memcpy(netdev
->dev_addr
, addr
->sa_data
, netdev
->addr_len
);
3207 memcpy(adapter
->hw
.mac
.addr
, addr
->sa_data
, netdev
->addr_len
);
3209 e1000e_rar_set(&adapter
->hw
, adapter
->hw
.mac
.addr
, 0);
3211 if (adapter
->flags
& FLAG_RESET_OVERWRITES_LAA
) {
3212 /* activate the work around */
3213 e1000e_set_laa_state_82571(&adapter
->hw
, 1);
3216 * Hold a copy of the LAA in RAR[14] This is done so that
3217 * between the time RAR[0] gets clobbered and the time it
3218 * gets fixed (in e1000_watchdog), the actual LAA is in one
3219 * of the RARs and no incoming packets directed to this port
3220 * are dropped. Eventually the LAA will be in RAR[0] and
3223 e1000e_rar_set(&adapter
->hw
,
3224 adapter
->hw
.mac
.addr
,
3225 adapter
->hw
.mac
.rar_entry_count
- 1);
3232 * e1000e_update_phy_task - work thread to update phy
3233 * @work: pointer to our work struct
3235 * this worker thread exists because we must acquire a
3236 * semaphore to read the phy, which we could msleep while
3237 * waiting for it, and we can't msleep in a timer.
3239 static void e1000e_update_phy_task(struct work_struct
*work
)
3241 struct e1000_adapter
*adapter
= container_of(work
,
3242 struct e1000_adapter
, update_phy_task
);
3243 e1000_get_phy_info(&adapter
->hw
);
3247 * Need to wait a few seconds after link up to get diagnostic information from
3250 static void e1000_update_phy_info(unsigned long data
)
3252 struct e1000_adapter
*adapter
= (struct e1000_adapter
*) data
;
3253 schedule_work(&adapter
->update_phy_task
);
3257 * e1000e_update_stats - Update the board statistics counters
3258 * @adapter: board private structure
3260 void e1000e_update_stats(struct e1000_adapter
*adapter
)
3262 struct e1000_hw
*hw
= &adapter
->hw
;
3263 struct pci_dev
*pdev
= adapter
->pdev
;
3266 * Prevent stats update while adapter is being reset, or if the pci
3267 * connection is down.
3269 if (adapter
->link_speed
== 0)
3271 if (pci_channel_offline(pdev
))
3274 adapter
->stats
.crcerrs
+= er32(CRCERRS
);
3275 adapter
->stats
.gprc
+= er32(GPRC
);
3276 adapter
->stats
.gorc
+= er32(GORCL
);
3277 er32(GORCH
); /* Clear gorc */
3278 adapter
->stats
.bprc
+= er32(BPRC
);
3279 adapter
->stats
.mprc
+= er32(MPRC
);
3280 adapter
->stats
.roc
+= er32(ROC
);
3282 adapter
->stats
.mpc
+= er32(MPC
);
3283 adapter
->stats
.scc
+= er32(SCC
);
3284 adapter
->stats
.ecol
+= er32(ECOL
);
3285 adapter
->stats
.mcc
+= er32(MCC
);
3286 adapter
->stats
.latecol
+= er32(LATECOL
);
3287 adapter
->stats
.dc
+= er32(DC
);
3288 adapter
->stats
.xonrxc
+= er32(XONRXC
);
3289 adapter
->stats
.xontxc
+= er32(XONTXC
);
3290 adapter
->stats
.xoffrxc
+= er32(XOFFRXC
);
3291 adapter
->stats
.xofftxc
+= er32(XOFFTXC
);
3292 adapter
->stats
.gptc
+= er32(GPTC
);
3293 adapter
->stats
.gotc
+= er32(GOTCL
);
3294 er32(GOTCH
); /* Clear gotc */
3295 adapter
->stats
.rnbc
+= er32(RNBC
);
3296 adapter
->stats
.ruc
+= er32(RUC
);
3298 adapter
->stats
.mptc
+= er32(MPTC
);
3299 adapter
->stats
.bptc
+= er32(BPTC
);
3301 /* used for adaptive IFS */
3303 hw
->mac
.tx_packet_delta
= er32(TPT
);
3304 adapter
->stats
.tpt
+= hw
->mac
.tx_packet_delta
;
3305 hw
->mac
.collision_delta
= er32(COLC
);
3306 adapter
->stats
.colc
+= hw
->mac
.collision_delta
;
3308 adapter
->stats
.algnerrc
+= er32(ALGNERRC
);
3309 adapter
->stats
.rxerrc
+= er32(RXERRC
);
3310 if ((hw
->mac
.type
!= e1000_82574
) && (hw
->mac
.type
!= e1000_82583
))
3311 adapter
->stats
.tncrs
+= er32(TNCRS
);
3312 adapter
->stats
.cexterr
+= er32(CEXTERR
);
3313 adapter
->stats
.tsctc
+= er32(TSCTC
);
3314 adapter
->stats
.tsctfc
+= er32(TSCTFC
);
3316 /* Fill out the OS statistics structure */
3317 adapter
->net_stats
.multicast
= adapter
->stats
.mprc
;
3318 adapter
->net_stats
.collisions
= adapter
->stats
.colc
;
3323 * RLEC on some newer hardware can be incorrect so build
3324 * our own version based on RUC and ROC
3326 adapter
->net_stats
.rx_errors
= adapter
->stats
.rxerrc
+
3327 adapter
->stats
.crcerrs
+ adapter
->stats
.algnerrc
+
3328 adapter
->stats
.ruc
+ adapter
->stats
.roc
+
3329 adapter
->stats
.cexterr
;
3330 adapter
->net_stats
.rx_length_errors
= adapter
->stats
.ruc
+
3332 adapter
->net_stats
.rx_crc_errors
= adapter
->stats
.crcerrs
;
3333 adapter
->net_stats
.rx_frame_errors
= adapter
->stats
.algnerrc
;
3334 adapter
->net_stats
.rx_missed_errors
= adapter
->stats
.mpc
;
3337 adapter
->net_stats
.tx_errors
= adapter
->stats
.ecol
+
3338 adapter
->stats
.latecol
;
3339 adapter
->net_stats
.tx_aborted_errors
= adapter
->stats
.ecol
;
3340 adapter
->net_stats
.tx_window_errors
= adapter
->stats
.latecol
;
3341 adapter
->net_stats
.tx_carrier_errors
= adapter
->stats
.tncrs
;
3343 /* Tx Dropped needs to be maintained elsewhere */
3345 /* Management Stats */
3346 adapter
->stats
.mgptc
+= er32(MGTPTC
);
3347 adapter
->stats
.mgprc
+= er32(MGTPRC
);
3348 adapter
->stats
.mgpdc
+= er32(MGTPDC
);
3352 * e1000_phy_read_status - Update the PHY register status snapshot
3353 * @adapter: board private structure
3355 static void e1000_phy_read_status(struct e1000_adapter
*adapter
)
3357 struct e1000_hw
*hw
= &adapter
->hw
;
3358 struct e1000_phy_regs
*phy
= &adapter
->phy_regs
;
3361 if ((er32(STATUS
) & E1000_STATUS_LU
) &&
3362 (adapter
->hw
.phy
.media_type
== e1000_media_type_copper
)) {
3363 ret_val
= e1e_rphy(hw
, PHY_CONTROL
, &phy
->bmcr
);
3364 ret_val
|= e1e_rphy(hw
, PHY_STATUS
, &phy
->bmsr
);
3365 ret_val
|= e1e_rphy(hw
, PHY_AUTONEG_ADV
, &phy
->advertise
);
3366 ret_val
|= e1e_rphy(hw
, PHY_LP_ABILITY
, &phy
->lpa
);
3367 ret_val
|= e1e_rphy(hw
, PHY_AUTONEG_EXP
, &phy
->expansion
);
3368 ret_val
|= e1e_rphy(hw
, PHY_1000T_CTRL
, &phy
->ctrl1000
);
3369 ret_val
|= e1e_rphy(hw
, PHY_1000T_STATUS
, &phy
->stat1000
);
3370 ret_val
|= e1e_rphy(hw
, PHY_EXT_STATUS
, &phy
->estatus
);
3372 e_warn("Error reading PHY register\n");
3375 * Do not read PHY registers if link is not up
3376 * Set values to typical power-on defaults
3378 phy
->bmcr
= (BMCR_SPEED1000
| BMCR_ANENABLE
| BMCR_FULLDPLX
);
3379 phy
->bmsr
= (BMSR_100FULL
| BMSR_100HALF
| BMSR_10FULL
|
3380 BMSR_10HALF
| BMSR_ESTATEN
| BMSR_ANEGCAPABLE
|
3382 phy
->advertise
= (ADVERTISE_PAUSE_ASYM
| ADVERTISE_PAUSE_CAP
|
3383 ADVERTISE_ALL
| ADVERTISE_CSMA
);
3385 phy
->expansion
= EXPANSION_ENABLENPAGE
;
3386 phy
->ctrl1000
= ADVERTISE_1000FULL
;
3388 phy
->estatus
= (ESTATUS_1000_TFULL
| ESTATUS_1000_THALF
);
3392 static void e1000_print_link_info(struct e1000_adapter
*adapter
)
3394 struct e1000_hw
*hw
= &adapter
->hw
;
3395 u32 ctrl
= er32(CTRL
);
3397 /* Link status message must follow this format for user tools */
3398 printk(KERN_INFO
"e1000e: %s NIC Link is Up %d Mbps %s, "
3399 "Flow Control: %s\n",
3400 adapter
->netdev
->name
,
3401 adapter
->link_speed
,
3402 (adapter
->link_duplex
== FULL_DUPLEX
) ?
3403 "Full Duplex" : "Half Duplex",
3404 ((ctrl
& E1000_CTRL_TFCE
) && (ctrl
& E1000_CTRL_RFCE
)) ?
3406 ((ctrl
& E1000_CTRL_RFCE
) ? "RX" :
3407 ((ctrl
& E1000_CTRL_TFCE
) ? "TX" : "None" )));
3410 bool e1000_has_link(struct e1000_adapter
*adapter
)
3412 struct e1000_hw
*hw
= &adapter
->hw
;
3413 bool link_active
= 0;
3417 * get_link_status is set on LSC (link status) interrupt or
3418 * Rx sequence error interrupt. get_link_status will stay
3419 * false until the check_for_link establishes link
3420 * for copper adapters ONLY
3422 switch (hw
->phy
.media_type
) {
3423 case e1000_media_type_copper
:
3424 if (hw
->mac
.get_link_status
) {
3425 ret_val
= hw
->mac
.ops
.check_for_link(hw
);
3426 link_active
= !hw
->mac
.get_link_status
;
3431 case e1000_media_type_fiber
:
3432 ret_val
= hw
->mac
.ops
.check_for_link(hw
);
3433 link_active
= !!(er32(STATUS
) & E1000_STATUS_LU
);
3435 case e1000_media_type_internal_serdes
:
3436 ret_val
= hw
->mac
.ops
.check_for_link(hw
);
3437 link_active
= adapter
->hw
.mac
.serdes_has_link
;
3440 case e1000_media_type_unknown
:
3444 if ((ret_val
== E1000_ERR_PHY
) && (hw
->phy
.type
== e1000_phy_igp_3
) &&
3445 (er32(CTRL
) & E1000_PHY_CTRL_GBE_DISABLE
)) {
3446 /* See e1000_kmrn_lock_loss_workaround_ich8lan() */
3447 e_info("Gigabit has been disabled, downgrading speed\n");
3453 static void e1000e_enable_receives(struct e1000_adapter
*adapter
)
3455 /* make sure the receive unit is started */
3456 if ((adapter
->flags
& FLAG_RX_NEEDS_RESTART
) &&
3457 (adapter
->flags
& FLAG_RX_RESTART_NOW
)) {
3458 struct e1000_hw
*hw
= &adapter
->hw
;
3459 u32 rctl
= er32(RCTL
);
3460 ew32(RCTL
, rctl
| E1000_RCTL_EN
);
3461 adapter
->flags
&= ~FLAG_RX_RESTART_NOW
;
3466 * e1000_watchdog - Timer Call-back
3467 * @data: pointer to adapter cast into an unsigned long
3469 static void e1000_watchdog(unsigned long data
)
3471 struct e1000_adapter
*adapter
= (struct e1000_adapter
*) data
;
3473 /* Do the rest outside of interrupt context */
3474 schedule_work(&adapter
->watchdog_task
);
3476 /* TODO: make this use queue_delayed_work() */
3479 static void e1000_watchdog_task(struct work_struct
*work
)
3481 struct e1000_adapter
*adapter
= container_of(work
,
3482 struct e1000_adapter
, watchdog_task
);
3483 struct net_device
*netdev
= adapter
->netdev
;
3484 struct e1000_mac_info
*mac
= &adapter
->hw
.mac
;
3485 struct e1000_phy_info
*phy
= &adapter
->hw
.phy
;
3486 struct e1000_ring
*tx_ring
= adapter
->tx_ring
;
3487 struct e1000_hw
*hw
= &adapter
->hw
;
3491 link
= e1000_has_link(adapter
);
3492 if ((netif_carrier_ok(netdev
)) && link
) {
3493 e1000e_enable_receives(adapter
);
3497 if ((e1000e_enable_tx_pkt_filtering(hw
)) &&
3498 (adapter
->mng_vlan_id
!= adapter
->hw
.mng_cookie
.vlan_id
))
3499 e1000_update_mng_vlan(adapter
);
3502 if (!netif_carrier_ok(netdev
)) {
3504 /* update snapshot of PHY registers on LSC */
3505 e1000_phy_read_status(adapter
);
3506 mac
->ops
.get_link_up_info(&adapter
->hw
,
3507 &adapter
->link_speed
,
3508 &adapter
->link_duplex
);
3509 e1000_print_link_info(adapter
);
3511 * On supported PHYs, check for duplex mismatch only
3512 * if link has autonegotiated at 10/100 half
3514 if ((hw
->phy
.type
== e1000_phy_igp_3
||
3515 hw
->phy
.type
== e1000_phy_bm
) &&
3516 (hw
->mac
.autoneg
== true) &&
3517 (adapter
->link_speed
== SPEED_10
||
3518 adapter
->link_speed
== SPEED_100
) &&
3519 (adapter
->link_duplex
== HALF_DUPLEX
)) {
3522 e1e_rphy(hw
, PHY_AUTONEG_EXP
, &autoneg_exp
);
3524 if (!(autoneg_exp
& NWAY_ER_LP_NWAY_CAPS
))
3525 e_info("Autonegotiated half duplex but"
3526 " link partner cannot autoneg. "
3527 " Try forcing full duplex if "
3528 "link gets many collisions.\n");
3532 * tweak tx_queue_len according to speed/duplex
3533 * and adjust the timeout factor
3535 netdev
->tx_queue_len
= adapter
->tx_queue_len
;
3536 adapter
->tx_timeout_factor
= 1;
3537 switch (adapter
->link_speed
) {
3540 netdev
->tx_queue_len
= 10;
3541 adapter
->tx_timeout_factor
= 16;
3545 netdev
->tx_queue_len
= 100;
3546 /* maybe add some timeout factor ? */
3551 * workaround: re-program speed mode bit after
3554 if ((adapter
->flags
& FLAG_TARC_SPEED_MODE_BIT
) &&
3557 tarc0
= er32(TARC(0));
3558 tarc0
&= ~SPEED_MODE_BIT
;
3559 ew32(TARC(0), tarc0
);
3563 * disable TSO for pcie and 10/100 speeds, to avoid
3564 * some hardware issues
3566 if (!(adapter
->flags
& FLAG_TSO_FORCE
)) {
3567 switch (adapter
->link_speed
) {
3570 e_info("10/100 speed: disabling TSO\n");
3571 netdev
->features
&= ~NETIF_F_TSO
;
3572 netdev
->features
&= ~NETIF_F_TSO6
;
3575 netdev
->features
|= NETIF_F_TSO
;
3576 netdev
->features
|= NETIF_F_TSO6
;
3585 * enable transmits in the hardware, need to do this
3586 * after setting TARC(0)
3589 tctl
|= E1000_TCTL_EN
;
3593 * Perform any post-link-up configuration before
3594 * reporting link up.
3596 if (phy
->ops
.cfg_on_link_up
)
3597 phy
->ops
.cfg_on_link_up(hw
);
3599 netif_carrier_on(netdev
);
3600 netif_tx_wake_all_queues(netdev
);
3602 if (!test_bit(__E1000_DOWN
, &adapter
->state
))
3603 mod_timer(&adapter
->phy_info_timer
,
3604 round_jiffies(jiffies
+ 2 * HZ
));
3607 if (netif_carrier_ok(netdev
)) {
3608 adapter
->link_speed
= 0;
3609 adapter
->link_duplex
= 0;
3610 /* Link status message must follow this format */
3611 printk(KERN_INFO
"e1000e: %s NIC Link is Down\n",
3612 adapter
->netdev
->name
);
3613 netif_carrier_off(netdev
);
3614 netif_tx_stop_all_queues(netdev
);
3615 if (!test_bit(__E1000_DOWN
, &adapter
->state
))
3616 mod_timer(&adapter
->phy_info_timer
,
3617 round_jiffies(jiffies
+ 2 * HZ
));
3619 if (adapter
->flags
& FLAG_RX_NEEDS_RESTART
)
3620 schedule_work(&adapter
->reset_task
);
3625 e1000e_update_stats(adapter
);
3627 mac
->tx_packet_delta
= adapter
->stats
.tpt
- adapter
->tpt_old
;
3628 adapter
->tpt_old
= adapter
->stats
.tpt
;
3629 mac
->collision_delta
= adapter
->stats
.colc
- adapter
->colc_old
;
3630 adapter
->colc_old
= adapter
->stats
.colc
;
3632 adapter
->gorc
= adapter
->stats
.gorc
- adapter
->gorc_old
;
3633 adapter
->gorc_old
= adapter
->stats
.gorc
;
3634 adapter
->gotc
= adapter
->stats
.gotc
- adapter
->gotc_old
;
3635 adapter
->gotc_old
= adapter
->stats
.gotc
;
3637 e1000e_update_adaptive(&adapter
->hw
);
3639 if (!netif_carrier_ok(netdev
)) {
3640 tx_pending
= (e1000_desc_unused(tx_ring
) + 1 <
3644 * We've lost link, so the controller stops DMA,
3645 * but we've got queued Tx work that's never going
3646 * to get done, so reset controller to flush Tx.
3647 * (Do the reset outside of interrupt context).
3649 adapter
->tx_timeout_count
++;
3650 schedule_work(&adapter
->reset_task
);
3654 /* Cause software interrupt to ensure Rx ring is cleaned */
3655 if (adapter
->msix_entries
)
3656 ew32(ICS
, adapter
->rx_ring
->ims_val
);
3658 ew32(ICS
, E1000_ICS_RXDMT0
);
3660 /* Force detection of hung controller every watchdog period */
3661 adapter
->detect_tx_hung
= 1;
3664 * With 82571 controllers, LAA may be overwritten due to controller
3665 * reset from the other port. Set the appropriate LAA in RAR[0]
3667 if (e1000e_get_laa_state_82571(hw
))
3668 e1000e_rar_set(hw
, adapter
->hw
.mac
.addr
, 0);
3670 /* Reset the timer */
3671 if (!test_bit(__E1000_DOWN
, &adapter
->state
))
3672 mod_timer(&adapter
->watchdog_timer
,
3673 round_jiffies(jiffies
+ 2 * HZ
));
3676 #define E1000_TX_FLAGS_CSUM 0x00000001
3677 #define E1000_TX_FLAGS_VLAN 0x00000002
3678 #define E1000_TX_FLAGS_TSO 0x00000004
3679 #define E1000_TX_FLAGS_IPV4 0x00000008
3680 #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
3681 #define E1000_TX_FLAGS_VLAN_SHIFT 16
3683 static int e1000_tso(struct e1000_adapter
*adapter
,
3684 struct sk_buff
*skb
)
3686 struct e1000_ring
*tx_ring
= adapter
->tx_ring
;
3687 struct e1000_context_desc
*context_desc
;
3688 struct e1000_buffer
*buffer_info
;
3691 u16 ipcse
= 0, tucse
, mss
;
3692 u8 ipcss
, ipcso
, tucss
, tucso
, hdr_len
;
3695 if (skb_is_gso(skb
)) {
3696 if (skb_header_cloned(skb
)) {
3697 err
= pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
);
3702 hdr_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
3703 mss
= skb_shinfo(skb
)->gso_size
;
3704 if (skb
->protocol
== htons(ETH_P_IP
)) {
3705 struct iphdr
*iph
= ip_hdr(skb
);
3708 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(iph
->saddr
,
3712 cmd_length
= E1000_TXD_CMD_IP
;
3713 ipcse
= skb_transport_offset(skb
) - 1;
3714 } else if (skb_shinfo(skb
)->gso_type
== SKB_GSO_TCPV6
) {
3715 ipv6_hdr(skb
)->payload_len
= 0;
3716 tcp_hdr(skb
)->check
=
3717 ~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
3718 &ipv6_hdr(skb
)->daddr
,
3722 ipcss
= skb_network_offset(skb
);
3723 ipcso
= (void *)&(ip_hdr(skb
)->check
) - (void *)skb
->data
;
3724 tucss
= skb_transport_offset(skb
);
3725 tucso
= (void *)&(tcp_hdr(skb
)->check
) - (void *)skb
->data
;
3728 cmd_length
|= (E1000_TXD_CMD_DEXT
| E1000_TXD_CMD_TSE
|
3729 E1000_TXD_CMD_TCP
| (skb
->len
- (hdr_len
)));
3731 i
= tx_ring
->next_to_use
;
3732 context_desc
= E1000_CONTEXT_DESC(*tx_ring
, i
);
3733 buffer_info
= &tx_ring
->buffer_info
[i
];
3735 context_desc
->lower_setup
.ip_fields
.ipcss
= ipcss
;
3736 context_desc
->lower_setup
.ip_fields
.ipcso
= ipcso
;
3737 context_desc
->lower_setup
.ip_fields
.ipcse
= cpu_to_le16(ipcse
);
3738 context_desc
->upper_setup
.tcp_fields
.tucss
= tucss
;
3739 context_desc
->upper_setup
.tcp_fields
.tucso
= tucso
;
3740 context_desc
->upper_setup
.tcp_fields
.tucse
= cpu_to_le16(tucse
);
3741 context_desc
->tcp_seg_setup
.fields
.mss
= cpu_to_le16(mss
);
3742 context_desc
->tcp_seg_setup
.fields
.hdr_len
= hdr_len
;
3743 context_desc
->cmd_and_length
= cpu_to_le32(cmd_length
);
3745 buffer_info
->time_stamp
= jiffies
;
3746 buffer_info
->next_to_watch
= i
;
3749 if (i
== tx_ring
->count
)
3751 tx_ring
->next_to_use
= i
;
3759 static bool e1000_tx_csum(struct e1000_adapter
*adapter
, struct sk_buff
*skb
)
3761 struct e1000_ring
*tx_ring
= adapter
->tx_ring
;
3762 struct e1000_context_desc
*context_desc
;
3763 struct e1000_buffer
*buffer_info
;
3766 u32 cmd_len
= E1000_TXD_CMD_DEXT
;
3768 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
3771 switch (skb
->protocol
) {
3772 case cpu_to_be16(ETH_P_IP
):
3773 if (ip_hdr(skb
)->protocol
== IPPROTO_TCP
)
3774 cmd_len
|= E1000_TXD_CMD_TCP
;
3776 case cpu_to_be16(ETH_P_IPV6
):
3777 /* XXX not handling all IPV6 headers */
3778 if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_TCP
)
3779 cmd_len
|= E1000_TXD_CMD_TCP
;
3782 if (unlikely(net_ratelimit()))
3783 e_warn("checksum_partial proto=%x!\n", skb
->protocol
);
3787 css
= skb_transport_offset(skb
);
3789 i
= tx_ring
->next_to_use
;
3790 buffer_info
= &tx_ring
->buffer_info
[i
];
3791 context_desc
= E1000_CONTEXT_DESC(*tx_ring
, i
);
3793 context_desc
->lower_setup
.ip_config
= 0;
3794 context_desc
->upper_setup
.tcp_fields
.tucss
= css
;
3795 context_desc
->upper_setup
.tcp_fields
.tucso
=
3796 css
+ skb
->csum_offset
;
3797 context_desc
->upper_setup
.tcp_fields
.tucse
= 0;
3798 context_desc
->tcp_seg_setup
.data
= 0;
3799 context_desc
->cmd_and_length
= cpu_to_le32(cmd_len
);
3801 buffer_info
->time_stamp
= jiffies
;
3802 buffer_info
->next_to_watch
= i
;
3805 if (i
== tx_ring
->count
)
3807 tx_ring
->next_to_use
= i
;
3812 #define E1000_MAX_PER_TXD 8192
3813 #define E1000_MAX_TXD_PWR 12
3815 static int e1000_tx_map(struct e1000_adapter
*adapter
,
3816 struct sk_buff
*skb
, unsigned int first
,
3817 unsigned int max_per_txd
, unsigned int nr_frags
,
3820 struct e1000_ring
*tx_ring
= adapter
->tx_ring
;
3821 struct e1000_buffer
*buffer_info
;
3822 unsigned int len
= skb_headlen(skb
);
3823 unsigned int offset
, size
, count
= 0, i
;
3827 i
= tx_ring
->next_to_use
;
3829 if (skb_dma_map(&adapter
->pdev
->dev
, skb
, DMA_TO_DEVICE
)) {
3830 dev_err(&adapter
->pdev
->dev
, "TX DMA map failed\n");
3831 adapter
->tx_dma_failed
++;
3835 map
= skb_shinfo(skb
)->dma_maps
;
3839 buffer_info
= &tx_ring
->buffer_info
[i
];
3840 size
= min(len
, max_per_txd
);
3842 buffer_info
->length
= size
;
3843 buffer_info
->time_stamp
= jiffies
;
3844 buffer_info
->next_to_watch
= i
;
3845 buffer_info
->dma
= map
[0] + offset
;
3853 if (i
== tx_ring
->count
)
3858 for (f
= 0; f
< nr_frags
; f
++) {
3859 struct skb_frag_struct
*frag
;
3861 frag
= &skb_shinfo(skb
)->frags
[f
];
3867 if (i
== tx_ring
->count
)
3870 buffer_info
= &tx_ring
->buffer_info
[i
];
3871 size
= min(len
, max_per_txd
);
3873 buffer_info
->length
= size
;
3874 buffer_info
->time_stamp
= jiffies
;
3875 buffer_info
->next_to_watch
= i
;
3876 buffer_info
->dma
= map
[f
+ 1] + offset
;
3884 tx_ring
->buffer_info
[i
].skb
= skb
;
3885 tx_ring
->buffer_info
[first
].next_to_watch
= i
;
3890 static void e1000_tx_queue(struct e1000_adapter
*adapter
,
3891 int tx_flags
, int count
)
3893 struct e1000_ring
*tx_ring
= adapter
->tx_ring
;
3894 struct e1000_tx_desc
*tx_desc
= NULL
;
3895 struct e1000_buffer
*buffer_info
;
3896 u32 txd_upper
= 0, txd_lower
= E1000_TXD_CMD_IFCS
;
3899 if (tx_flags
& E1000_TX_FLAGS_TSO
) {
3900 txd_lower
|= E1000_TXD_CMD_DEXT
| E1000_TXD_DTYP_D
|
3902 txd_upper
|= E1000_TXD_POPTS_TXSM
<< 8;
3904 if (tx_flags
& E1000_TX_FLAGS_IPV4
)
3905 txd_upper
|= E1000_TXD_POPTS_IXSM
<< 8;
3908 if (tx_flags
& E1000_TX_FLAGS_CSUM
) {
3909 txd_lower
|= E1000_TXD_CMD_DEXT
| E1000_TXD_DTYP_D
;
3910 txd_upper
|= E1000_TXD_POPTS_TXSM
<< 8;
3913 if (tx_flags
& E1000_TX_FLAGS_VLAN
) {
3914 txd_lower
|= E1000_TXD_CMD_VLE
;
3915 txd_upper
|= (tx_flags
& E1000_TX_FLAGS_VLAN_MASK
);
3918 i
= tx_ring
->next_to_use
;
3921 buffer_info
= &tx_ring
->buffer_info
[i
];
3922 tx_desc
= E1000_TX_DESC(*tx_ring
, i
);
3923 tx_desc
->buffer_addr
= cpu_to_le64(buffer_info
->dma
);
3924 tx_desc
->lower
.data
=
3925 cpu_to_le32(txd_lower
| buffer_info
->length
);
3926 tx_desc
->upper
.data
= cpu_to_le32(txd_upper
);
3929 if (i
== tx_ring
->count
)
3933 tx_desc
->lower
.data
|= cpu_to_le32(adapter
->txd_cmd
);
3936 * Force memory writes to complete before letting h/w
3937 * know there are new descriptors to fetch. (Only
3938 * applicable for weak-ordered memory model archs,
3943 tx_ring
->next_to_use
= i
;
3944 writel(i
, adapter
->hw
.hw_addr
+ tx_ring
->tail
);
3946 * we need this if more than one processor can write to our tail
3947 * at a time, it synchronizes IO on IA64/Altix systems
3952 #define MINIMUM_DHCP_PACKET_SIZE 282
3953 static int e1000_transfer_dhcp_info(struct e1000_adapter
*adapter
,
3954 struct sk_buff
*skb
)
3956 struct e1000_hw
*hw
= &adapter
->hw
;
3959 if (vlan_tx_tag_present(skb
)) {
3960 if (!((vlan_tx_tag_get(skb
) == adapter
->hw
.mng_cookie
.vlan_id
)
3961 && (adapter
->hw
.mng_cookie
.status
&
3962 E1000_MNG_DHCP_COOKIE_STATUS_VLAN
)))
3966 if (skb
->len
<= MINIMUM_DHCP_PACKET_SIZE
)
3969 if (((struct ethhdr
*) skb
->data
)->h_proto
!= htons(ETH_P_IP
))
3973 const struct iphdr
*ip
= (struct iphdr
*)((u8
*)skb
->data
+14);
3976 if (ip
->protocol
!= IPPROTO_UDP
)
3979 udp
= (struct udphdr
*)((u8
*)ip
+ (ip
->ihl
<< 2));
3980 if (ntohs(udp
->dest
) != 67)
3983 offset
= (u8
*)udp
+ 8 - skb
->data
;
3984 length
= skb
->len
- offset
;
3985 return e1000e_mng_write_dhcp_info(hw
, (u8
*)udp
+ 8, length
);
3991 static int __e1000_maybe_stop_tx(struct net_device
*netdev
, int size
)
3993 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
3995 netif_stop_queue(netdev
);
3997 * Herbert's original patch had:
3998 * smp_mb__after_netif_stop_queue();
3999 * but since that doesn't exist yet, just open code it.
4004 * We need to check again in a case another CPU has just
4005 * made room available.
4007 if (e1000_desc_unused(adapter
->tx_ring
) < size
)
4011 netif_start_queue(netdev
);
4012 ++adapter
->restart_queue
;
4016 static int e1000_maybe_stop_tx(struct net_device
*netdev
, int size
)
4018 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
4020 if (e1000_desc_unused(adapter
->tx_ring
) >= size
)
4022 return __e1000_maybe_stop_tx(netdev
, size
);
4025 #define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
4026 static int e1000_xmit_frame(struct sk_buff
*skb
, struct net_device
*netdev
)
4028 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
4029 struct e1000_ring
*tx_ring
= adapter
->tx_ring
;
4031 unsigned int max_per_txd
= E1000_MAX_PER_TXD
;
4032 unsigned int max_txd_pwr
= E1000_MAX_TXD_PWR
;
4033 unsigned int tx_flags
= 0;
4034 unsigned int len
= skb
->len
- skb
->data_len
;
4035 unsigned int nr_frags
;
4041 if (test_bit(__E1000_DOWN
, &adapter
->state
)) {
4042 dev_kfree_skb_any(skb
);
4043 return NETDEV_TX_OK
;
4046 if (skb
->len
<= 0) {
4047 dev_kfree_skb_any(skb
);
4048 return NETDEV_TX_OK
;
4051 mss
= skb_shinfo(skb
)->gso_size
;
4053 * The controller does a simple calculation to
4054 * make sure there is enough room in the FIFO before
4055 * initiating the DMA for each buffer. The calc is:
4056 * 4 = ceil(buffer len/mss). To make sure we don't
4057 * overrun the FIFO, adjust the max buffer len if mss
4062 max_per_txd
= min(mss
<< 2, max_per_txd
);
4063 max_txd_pwr
= fls(max_per_txd
) - 1;
4066 * TSO Workaround for 82571/2/3 Controllers -- if skb->data
4067 * points to just header, pull a few bytes of payload from
4068 * frags into skb->data
4070 hdr_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
4072 * we do this workaround for ES2LAN, but it is un-necessary,
4073 * avoiding it could save a lot of cycles
4075 if (skb
->data_len
&& (hdr_len
== len
)) {
4076 unsigned int pull_size
;
4078 pull_size
= min((unsigned int)4, skb
->data_len
);
4079 if (!__pskb_pull_tail(skb
, pull_size
)) {
4080 e_err("__pskb_pull_tail failed.\n");
4081 dev_kfree_skb_any(skb
);
4082 return NETDEV_TX_OK
;
4084 len
= skb
->len
- skb
->data_len
;
4088 /* reserve a descriptor for the offload context */
4089 if ((mss
) || (skb
->ip_summed
== CHECKSUM_PARTIAL
))
4093 count
+= TXD_USE_COUNT(len
, max_txd_pwr
);
4095 nr_frags
= skb_shinfo(skb
)->nr_frags
;
4096 for (f
= 0; f
< nr_frags
; f
++)
4097 count
+= TXD_USE_COUNT(skb_shinfo(skb
)->frags
[f
].size
,
4100 if (adapter
->hw
.mac
.tx_pkt_filtering
)
4101 e1000_transfer_dhcp_info(adapter
, skb
);
4104 * need: count + 2 desc gap to keep tail from touching
4105 * head, otherwise try next time
4107 if (e1000_maybe_stop_tx(netdev
, count
+ 2))
4108 return NETDEV_TX_BUSY
;
4110 if (adapter
->vlgrp
&& vlan_tx_tag_present(skb
)) {
4111 tx_flags
|= E1000_TX_FLAGS_VLAN
;
4112 tx_flags
|= (vlan_tx_tag_get(skb
) << E1000_TX_FLAGS_VLAN_SHIFT
);
4115 first
= tx_ring
->next_to_use
;
4117 tso
= e1000_tso(adapter
, skb
);
4119 dev_kfree_skb_any(skb
);
4120 return NETDEV_TX_OK
;
4124 tx_flags
|= E1000_TX_FLAGS_TSO
;
4125 else if (e1000_tx_csum(adapter
, skb
))
4126 tx_flags
|= E1000_TX_FLAGS_CSUM
;
4129 * Old method was to assume IPv4 packet by default if TSO was enabled.
4130 * 82571 hardware supports TSO capabilities for IPv6 as well...
4131 * no longer assume, we must.
4133 if (skb
->protocol
== htons(ETH_P_IP
))
4134 tx_flags
|= E1000_TX_FLAGS_IPV4
;
4136 /* if count is 0 then mapping error has occured */
4137 count
= e1000_tx_map(adapter
, skb
, first
, max_per_txd
, nr_frags
, mss
);
4139 e1000_tx_queue(adapter
, tx_flags
, count
);
4140 netdev
->trans_start
= jiffies
;
4141 /* Make sure there is space in the ring for the next send. */
4142 e1000_maybe_stop_tx(netdev
, MAX_SKB_FRAGS
+ 2);
4145 dev_kfree_skb_any(skb
);
4146 tx_ring
->buffer_info
[first
].time_stamp
= 0;
4147 tx_ring
->next_to_use
= first
;
4150 return NETDEV_TX_OK
;
4154 * e1000_tx_timeout - Respond to a Tx Hang
4155 * @netdev: network interface device structure
4157 static void e1000_tx_timeout(struct net_device
*netdev
)
4159 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
4161 /* Do the reset outside of interrupt context */
4162 adapter
->tx_timeout_count
++;
4163 schedule_work(&adapter
->reset_task
);
4166 static void e1000_reset_task(struct work_struct
*work
)
4168 struct e1000_adapter
*adapter
;
4169 adapter
= container_of(work
, struct e1000_adapter
, reset_task
);
4171 e1000e_reinit_locked(adapter
);
4175 * e1000_get_stats - Get System Network Statistics
4176 * @netdev: network interface device structure
4178 * Returns the address of the device statistics structure.
4179 * The statistics are actually updated from the timer callback.
4181 static struct net_device_stats
*e1000_get_stats(struct net_device
*netdev
)
4183 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
4185 /* only return the current stats */
4186 return &adapter
->net_stats
;
4190 * e1000_change_mtu - Change the Maximum Transfer Unit
4191 * @netdev: network interface device structure
4192 * @new_mtu: new value for maximum frame size
4194 * Returns 0 on success, negative on failure
4196 static int e1000_change_mtu(struct net_device
*netdev
, int new_mtu
)
4198 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
4199 int max_frame
= new_mtu
+ ETH_HLEN
+ ETH_FCS_LEN
;
4201 if ((new_mtu
< ETH_ZLEN
+ ETH_FCS_LEN
+ VLAN_HLEN
) ||
4202 (max_frame
> MAX_JUMBO_FRAME_SIZE
)) {
4203 e_err("Invalid MTU setting\n");
4207 /* Jumbo frame size limits */
4208 if (max_frame
> ETH_FRAME_LEN
+ ETH_FCS_LEN
) {
4209 if (!(adapter
->flags
& FLAG_HAS_JUMBO_FRAMES
)) {
4210 e_err("Jumbo Frames not supported.\n");
4213 if (adapter
->hw
.phy
.type
== e1000_phy_ife
) {
4214 e_err("Jumbo Frames not supported.\n");
4219 #define MAX_STD_JUMBO_FRAME_SIZE 9234
4220 if (max_frame
> MAX_STD_JUMBO_FRAME_SIZE
) {
4221 e_err("MTU > 9216 not supported.\n");
4225 while (test_and_set_bit(__E1000_RESETTING
, &adapter
->state
))
4227 /* e1000e_down has a dependency on max_frame_size */
4228 adapter
->max_frame_size
= max_frame
;
4229 if (netif_running(netdev
))
4230 e1000e_down(adapter
);
4233 * NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
4234 * means we reserve 2 more, this pushes us to allocate from the next
4236 * i.e. RXBUFFER_2048 --> size-4096 slab
4237 * However with the new *_jumbo_rx* routines, jumbo receives will use
4241 if (max_frame
<= 256)
4242 adapter
->rx_buffer_len
= 256;
4243 else if (max_frame
<= 512)
4244 adapter
->rx_buffer_len
= 512;
4245 else if (max_frame
<= 1024)
4246 adapter
->rx_buffer_len
= 1024;
4247 else if (max_frame
<= 2048)
4248 adapter
->rx_buffer_len
= 2048;
4250 adapter
->rx_buffer_len
= 4096;
4252 /* adjust allocation if LPE protects us, and we aren't using SBP */
4253 if ((max_frame
== ETH_FRAME_LEN
+ ETH_FCS_LEN
) ||
4254 (max_frame
== ETH_FRAME_LEN
+ VLAN_HLEN
+ ETH_FCS_LEN
))
4255 adapter
->rx_buffer_len
= ETH_FRAME_LEN
+ VLAN_HLEN
4258 e_info("changing MTU from %d to %d\n", netdev
->mtu
, new_mtu
);
4259 netdev
->mtu
= new_mtu
;
4261 if (netif_running(netdev
))
4264 e1000e_reset(adapter
);
4266 clear_bit(__E1000_RESETTING
, &adapter
->state
);
4271 static int e1000_mii_ioctl(struct net_device
*netdev
, struct ifreq
*ifr
,
4274 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
4275 struct mii_ioctl_data
*data
= if_mii(ifr
);
4277 if (adapter
->hw
.phy
.media_type
!= e1000_media_type_copper
)
4282 data
->phy_id
= adapter
->hw
.phy
.addr
;
4285 if (!capable(CAP_NET_ADMIN
))
4287 switch (data
->reg_num
& 0x1F) {
4289 data
->val_out
= adapter
->phy_regs
.bmcr
;
4292 data
->val_out
= adapter
->phy_regs
.bmsr
;
4295 data
->val_out
= (adapter
->hw
.phy
.id
>> 16);
4298 data
->val_out
= (adapter
->hw
.phy
.id
& 0xFFFF);
4301 data
->val_out
= adapter
->phy_regs
.advertise
;
4304 data
->val_out
= adapter
->phy_regs
.lpa
;
4307 data
->val_out
= adapter
->phy_regs
.expansion
;
4310 data
->val_out
= adapter
->phy_regs
.ctrl1000
;
4313 data
->val_out
= adapter
->phy_regs
.stat1000
;
4316 data
->val_out
= adapter
->phy_regs
.estatus
;
4329 static int e1000_ioctl(struct net_device
*netdev
, struct ifreq
*ifr
, int cmd
)
4335 return e1000_mii_ioctl(netdev
, ifr
, cmd
);
4341 static int e1000_suspend(struct pci_dev
*pdev
, pm_message_t state
)
4343 struct net_device
*netdev
= pci_get_drvdata(pdev
);
4344 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
4345 struct e1000_hw
*hw
= &adapter
->hw
;
4346 u32 ctrl
, ctrl_ext
, rctl
, status
;
4347 u32 wufc
= adapter
->wol
;
4350 netif_device_detach(netdev
);
4352 if (netif_running(netdev
)) {
4353 WARN_ON(test_bit(__E1000_RESETTING
, &adapter
->state
));
4354 e1000e_down(adapter
);
4355 e1000_free_irq(adapter
);
4357 e1000e_reset_interrupt_capability(adapter
);
4359 retval
= pci_save_state(pdev
);
4363 status
= er32(STATUS
);
4364 if (status
& E1000_STATUS_LU
)
4365 wufc
&= ~E1000_WUFC_LNKC
;
4368 e1000_setup_rctl(adapter
);
4369 e1000_set_multi(netdev
);
4371 /* turn on all-multi mode if wake on multicast is enabled */
4372 if (wufc
& E1000_WUFC_MC
) {
4374 rctl
|= E1000_RCTL_MPE
;
4379 /* advertise wake from D3Cold */
4380 #define E1000_CTRL_ADVD3WUC 0x00100000
4381 /* phy power management enable */
4382 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
4383 ctrl
|= E1000_CTRL_ADVD3WUC
|
4384 E1000_CTRL_EN_PHY_PWR_MGMT
;
4387 if (adapter
->hw
.phy
.media_type
== e1000_media_type_fiber
||
4388 adapter
->hw
.phy
.media_type
==
4389 e1000_media_type_internal_serdes
) {
4390 /* keep the laser running in D3 */
4391 ctrl_ext
= er32(CTRL_EXT
);
4392 ctrl_ext
|= E1000_CTRL_EXT_SDP7_DATA
;
4393 ew32(CTRL_EXT
, ctrl_ext
);
4396 if (adapter
->flags
& FLAG_IS_ICH
)
4397 e1000e_disable_gig_wol_ich8lan(&adapter
->hw
);
4399 /* Allow time for pending master requests to run */
4400 e1000e_disable_pcie_master(&adapter
->hw
);
4402 ew32(WUC
, E1000_WUC_PME_EN
);
4404 pci_enable_wake(pdev
, PCI_D3hot
, 1);
4405 pci_enable_wake(pdev
, PCI_D3cold
, 1);
4409 pci_enable_wake(pdev
, PCI_D3hot
, 0);
4410 pci_enable_wake(pdev
, PCI_D3cold
, 0);
4413 /* make sure adapter isn't asleep if manageability is enabled */
4414 if (adapter
->flags
& FLAG_MNG_PT_ENABLED
) {
4415 pci_enable_wake(pdev
, PCI_D3hot
, 1);
4416 pci_enable_wake(pdev
, PCI_D3cold
, 1);
4419 if (adapter
->hw
.phy
.type
== e1000_phy_igp_3
)
4420 e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter
->hw
);
4423 * Release control of h/w to f/w. If f/w is AMT enabled, this
4424 * would have already happened in close and is redundant.
4426 e1000_release_hw_control(adapter
);
4428 pci_disable_device(pdev
);
4431 * The pci-e switch on some quad port adapters will report a
4432 * correctable error when the MAC transitions from D0 to D3. To
4433 * prevent this we need to mask off the correctable errors on the
4434 * downstream port of the pci-e switch.
4436 if (adapter
->flags
& FLAG_IS_QUAD_PORT
) {
4437 struct pci_dev
*us_dev
= pdev
->bus
->self
;
4438 int pos
= pci_find_capability(us_dev
, PCI_CAP_ID_EXP
);
4441 pci_read_config_word(us_dev
, pos
+ PCI_EXP_DEVCTL
, &devctl
);
4442 pci_write_config_word(us_dev
, pos
+ PCI_EXP_DEVCTL
,
4443 (devctl
& ~PCI_EXP_DEVCTL_CERE
));
4445 pci_set_power_state(pdev
, pci_choose_state(pdev
, state
));
4447 pci_write_config_word(us_dev
, pos
+ PCI_EXP_DEVCTL
, devctl
);
4449 pci_set_power_state(pdev
, pci_choose_state(pdev
, state
));
4455 static void e1000e_disable_l1aspm(struct pci_dev
*pdev
)
4461 * 82573 workaround - disable L1 ASPM on mobile chipsets
4463 * L1 ASPM on various mobile (ich7) chipsets do not behave properly
4464 * resulting in lost data or garbage information on the pci-e link
4465 * level. This could result in (false) bad EEPROM checksum errors,
4466 * long ping times (up to 2s) or even a system freeze/hang.
4468 * Unfortunately this feature saves about 1W power consumption when
4471 pos
= pci_find_capability(pdev
, PCI_CAP_ID_EXP
);
4472 pci_read_config_word(pdev
, pos
+ PCI_EXP_LNKCTL
, &val
);
4474 dev_warn(&pdev
->dev
, "Disabling L1 ASPM\n");
4476 pci_write_config_word(pdev
, pos
+ PCI_EXP_LNKCTL
, val
);
4481 static int e1000_resume(struct pci_dev
*pdev
)
4483 struct net_device
*netdev
= pci_get_drvdata(pdev
);
4484 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
4485 struct e1000_hw
*hw
= &adapter
->hw
;
4488 pci_set_power_state(pdev
, PCI_D0
);
4489 pci_restore_state(pdev
);
4490 e1000e_disable_l1aspm(pdev
);
4492 err
= pci_enable_device_mem(pdev
);
4495 "Cannot enable PCI device from suspend\n");
4499 /* AER (Advanced Error Reporting) hooks */
4500 err
= pci_enable_pcie_error_reporting(pdev
);
4502 dev_err(&pdev
->dev
, "pci_enable_pcie_error_reporting failed "
4504 /* non-fatal, continue */
4507 pci_set_master(pdev
);
4509 pci_enable_wake(pdev
, PCI_D3hot
, 0);
4510 pci_enable_wake(pdev
, PCI_D3cold
, 0);
4512 e1000e_set_interrupt_capability(adapter
);
4513 if (netif_running(netdev
)) {
4514 err
= e1000_request_irq(adapter
);
4519 e1000e_power_up_phy(adapter
);
4520 e1000e_reset(adapter
);
4523 e1000_init_manageability(adapter
);
4525 if (netif_running(netdev
))
4528 netif_device_attach(netdev
);
4531 * If the controller has AMT, do not set DRV_LOAD until the interface
4532 * is up. For all other cases, let the f/w know that the h/w is now
4533 * under the control of the driver.
4535 if (!(adapter
->flags
& FLAG_HAS_AMT
))
4536 e1000_get_hw_control(adapter
);
4542 static void e1000_shutdown(struct pci_dev
*pdev
)
4544 e1000_suspend(pdev
, PMSG_SUSPEND
);
4547 #ifdef CONFIG_NET_POLL_CONTROLLER
4549 * Polling 'interrupt' - used by things like netconsole to send skbs
4550 * without having to re-enable interrupts. It's not called while
4551 * the interrupt routine is executing.
4553 static void e1000_netpoll(struct net_device
*netdev
)
4555 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
4557 disable_irq(adapter
->pdev
->irq
);
4558 e1000_intr(adapter
->pdev
->irq
, netdev
);
4560 enable_irq(adapter
->pdev
->irq
);
4565 * e1000_io_error_detected - called when PCI error is detected
4566 * @pdev: Pointer to PCI device
4567 * @state: The current pci connection state
4569 * This function is called after a PCI bus error affecting
4570 * this device has been detected.
4572 static pci_ers_result_t
e1000_io_error_detected(struct pci_dev
*pdev
,
4573 pci_channel_state_t state
)
4575 struct net_device
*netdev
= pci_get_drvdata(pdev
);
4576 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
4578 netif_device_detach(netdev
);
4580 if (netif_running(netdev
))
4581 e1000e_down(adapter
);
4582 pci_disable_device(pdev
);
4584 /* Request a slot slot reset. */
4585 return PCI_ERS_RESULT_NEED_RESET
;
4589 * e1000_io_slot_reset - called after the pci bus has been reset.
4590 * @pdev: Pointer to PCI device
4592 * Restart the card from scratch, as if from a cold-boot. Implementation
4593 * resembles the first-half of the e1000_resume routine.
4595 static pci_ers_result_t
e1000_io_slot_reset(struct pci_dev
*pdev
)
4597 struct net_device
*netdev
= pci_get_drvdata(pdev
);
4598 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
4599 struct e1000_hw
*hw
= &adapter
->hw
;
4601 pci_ers_result_t result
;
4603 e1000e_disable_l1aspm(pdev
);
4604 err
= pci_enable_device_mem(pdev
);
4607 "Cannot re-enable PCI device after reset.\n");
4608 result
= PCI_ERS_RESULT_DISCONNECT
;
4610 pci_set_master(pdev
);
4611 pci_restore_state(pdev
);
4613 pci_enable_wake(pdev
, PCI_D3hot
, 0);
4614 pci_enable_wake(pdev
, PCI_D3cold
, 0);
4616 e1000e_reset(adapter
);
4618 result
= PCI_ERS_RESULT_RECOVERED
;
4621 pci_cleanup_aer_uncorrect_error_status(pdev
);
4627 * e1000_io_resume - called when traffic can start flowing again.
4628 * @pdev: Pointer to PCI device
4630 * This callback is called when the error recovery driver tells us that
4631 * its OK to resume normal operation. Implementation resembles the
4632 * second-half of the e1000_resume routine.
4634 static void e1000_io_resume(struct pci_dev
*pdev
)
4636 struct net_device
*netdev
= pci_get_drvdata(pdev
);
4637 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
4639 e1000_init_manageability(adapter
);
4641 if (netif_running(netdev
)) {
4642 if (e1000e_up(adapter
)) {
4644 "can't bring device back up after reset\n");
4649 netif_device_attach(netdev
);
4652 * If the controller has AMT, do not set DRV_LOAD until the interface
4653 * is up. For all other cases, let the f/w know that the h/w is now
4654 * under the control of the driver.
4656 if (!(adapter
->flags
& FLAG_HAS_AMT
))
4657 e1000_get_hw_control(adapter
);
4661 static void e1000_print_device_info(struct e1000_adapter
*adapter
)
4663 struct e1000_hw
*hw
= &adapter
->hw
;
4664 struct net_device
*netdev
= adapter
->netdev
;
4667 /* print bus type/speed/width info */
4668 e_info("(PCI Express:2.5GB/s:%s) %pM\n",
4670 ((hw
->bus
.width
== e1000_bus_width_pcie_x4
) ? "Width x4" :
4674 e_info("Intel(R) PRO/%s Network Connection\n",
4675 (hw
->phy
.type
== e1000_phy_ife
) ? "10/100" : "1000");
4676 e1000e_read_pba_num(hw
, &pba_num
);
4677 e_info("MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
4678 hw
->mac
.type
, hw
->phy
.type
, (pba_num
>> 8), (pba_num
& 0xff));
4681 static void e1000_eeprom_checks(struct e1000_adapter
*adapter
)
4683 struct e1000_hw
*hw
= &adapter
->hw
;
4687 if (hw
->mac
.type
!= e1000_82573
)
4690 ret_val
= e1000_read_nvm(hw
, NVM_INIT_CONTROL2_REG
, 1, &buf
);
4691 if (!ret_val
&& (!(le16_to_cpu(buf
) & (1 << 0)))) {
4692 /* Deep Smart Power Down (DSPD) */
4693 dev_warn(&adapter
->pdev
->dev
,
4694 "Warning: detected DSPD enabled in EEPROM\n");
4697 ret_val
= e1000_read_nvm(hw
, NVM_INIT_3GIO_3
, 1, &buf
);
4698 if (!ret_val
&& (le16_to_cpu(buf
) & (3 << 2))) {
4700 dev_warn(&adapter
->pdev
->dev
,
4701 "Warning: detected ASPM enabled in EEPROM\n");
4705 static const struct net_device_ops e1000e_netdev_ops
= {
4706 .ndo_open
= e1000_open
,
4707 .ndo_stop
= e1000_close
,
4708 .ndo_start_xmit
= e1000_xmit_frame
,
4709 .ndo_get_stats
= e1000_get_stats
,
4710 .ndo_set_multicast_list
= e1000_set_multi
,
4711 .ndo_set_mac_address
= e1000_set_mac
,
4712 .ndo_change_mtu
= e1000_change_mtu
,
4713 .ndo_do_ioctl
= e1000_ioctl
,
4714 .ndo_tx_timeout
= e1000_tx_timeout
,
4715 .ndo_validate_addr
= eth_validate_addr
,
4717 .ndo_vlan_rx_register
= e1000_vlan_rx_register
,
4718 .ndo_vlan_rx_add_vid
= e1000_vlan_rx_add_vid
,
4719 .ndo_vlan_rx_kill_vid
= e1000_vlan_rx_kill_vid
,
4720 #ifdef CONFIG_NET_POLL_CONTROLLER
4721 .ndo_poll_controller
= e1000_netpoll
,
4726 * e1000_probe - Device Initialization Routine
4727 * @pdev: PCI device information struct
4728 * @ent: entry in e1000_pci_tbl
4730 * Returns 0 on success, negative on failure
4732 * e1000_probe initializes an adapter identified by a pci_dev structure.
4733 * The OS initialization, configuring of the adapter private structure,
4734 * and a hardware reset occur.
4736 static int __devinit
e1000_probe(struct pci_dev
*pdev
,
4737 const struct pci_device_id
*ent
)
4739 struct net_device
*netdev
;
4740 struct e1000_adapter
*adapter
;
4741 struct e1000_hw
*hw
;
4742 const struct e1000_info
*ei
= e1000_info_tbl
[ent
->driver_data
];
4743 resource_size_t mmio_start
, mmio_len
;
4744 resource_size_t flash_start
, flash_len
;
4746 static int cards_found
;
4747 int i
, err
, pci_using_dac
;
4748 u16 eeprom_data
= 0;
4749 u16 eeprom_apme_mask
= E1000_EEPROM_APME
;
4751 e1000e_disable_l1aspm(pdev
);
4753 err
= pci_enable_device_mem(pdev
);
4758 err
= pci_set_dma_mask(pdev
, DMA_64BIT_MASK
);
4760 err
= pci_set_consistent_dma_mask(pdev
, DMA_64BIT_MASK
);
4764 err
= pci_set_dma_mask(pdev
, DMA_32BIT_MASK
);
4766 err
= pci_set_consistent_dma_mask(pdev
,
4769 dev_err(&pdev
->dev
, "No usable DMA "
4770 "configuration, aborting\n");
4776 err
= pci_request_selected_regions_exclusive(pdev
,
4777 pci_select_bars(pdev
, IORESOURCE_MEM
),
4778 e1000e_driver_name
);
4782 pci_set_master(pdev
);
4783 /* PCI config space info */
4784 err
= pci_save_state(pdev
);
4786 goto err_alloc_etherdev
;
4789 netdev
= alloc_etherdev(sizeof(struct e1000_adapter
));
4791 goto err_alloc_etherdev
;
4793 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
4795 pci_set_drvdata(pdev
, netdev
);
4796 adapter
= netdev_priv(netdev
);
4798 adapter
->netdev
= netdev
;
4799 adapter
->pdev
= pdev
;
4801 adapter
->pba
= ei
->pba
;
4802 adapter
->flags
= ei
->flags
;
4803 adapter
->flags2
= ei
->flags2
;
4804 adapter
->hw
.adapter
= adapter
;
4805 adapter
->hw
.mac
.type
= ei
->mac
;
4806 adapter
->msg_enable
= (1 << NETIF_MSG_DRV
| NETIF_MSG_PROBE
) - 1;
4808 mmio_start
= pci_resource_start(pdev
, 0);
4809 mmio_len
= pci_resource_len(pdev
, 0);
4812 adapter
->hw
.hw_addr
= ioremap(mmio_start
, mmio_len
);
4813 if (!adapter
->hw
.hw_addr
)
4816 if ((adapter
->flags
& FLAG_HAS_FLASH
) &&
4817 (pci_resource_flags(pdev
, 1) & IORESOURCE_MEM
)) {
4818 flash_start
= pci_resource_start(pdev
, 1);
4819 flash_len
= pci_resource_len(pdev
, 1);
4820 adapter
->hw
.flash_address
= ioremap(flash_start
, flash_len
);
4821 if (!adapter
->hw
.flash_address
)
4825 /* construct the net_device struct */
4826 netdev
->netdev_ops
= &e1000e_netdev_ops
;
4827 e1000e_set_ethtool_ops(netdev
);
4828 netdev
->watchdog_timeo
= 5 * HZ
;
4829 netif_napi_add(netdev
, &adapter
->napi
, e1000_clean
, 64);
4830 strncpy(netdev
->name
, pci_name(pdev
), sizeof(netdev
->name
) - 1);
4832 netdev
->mem_start
= mmio_start
;
4833 netdev
->mem_end
= mmio_start
+ mmio_len
;
4835 adapter
->bd_number
= cards_found
++;
4837 e1000e_check_options(adapter
);
4839 /* setup adapter struct */
4840 err
= e1000_sw_init(adapter
);
4846 memcpy(&hw
->mac
.ops
, ei
->mac_ops
, sizeof(hw
->mac
.ops
));
4847 memcpy(&hw
->nvm
.ops
, ei
->nvm_ops
, sizeof(hw
->nvm
.ops
));
4848 memcpy(&hw
->phy
.ops
, ei
->phy_ops
, sizeof(hw
->phy
.ops
));
4850 err
= ei
->get_variants(adapter
);
4854 if ((adapter
->flags
& FLAG_IS_ICH
) &&
4855 (adapter
->flags
& FLAG_READ_ONLY_NVM
))
4856 e1000e_write_protect_nvm_ich8lan(&adapter
->hw
);
4858 hw
->mac
.ops
.get_bus_info(&adapter
->hw
);
4860 adapter
->hw
.phy
.autoneg_wait_to_complete
= 0;
4862 /* Copper options */
4863 if (adapter
->hw
.phy
.media_type
== e1000_media_type_copper
) {
4864 adapter
->hw
.phy
.mdix
= AUTO_ALL_MODES
;
4865 adapter
->hw
.phy
.disable_polarity_correction
= 0;
4866 adapter
->hw
.phy
.ms_type
= e1000_ms_hw_default
;
4869 if (e1000_check_reset_block(&adapter
->hw
))
4870 e_info("PHY reset is blocked due to SOL/IDER session.\n");
4872 netdev
->features
= NETIF_F_SG
|
4874 NETIF_F_HW_VLAN_TX
|
4877 if (adapter
->flags
& FLAG_HAS_HW_VLAN_FILTER
)
4878 netdev
->features
|= NETIF_F_HW_VLAN_FILTER
;
4880 netdev
->features
|= NETIF_F_TSO
;
4881 netdev
->features
|= NETIF_F_TSO6
;
4883 netdev
->vlan_features
|= NETIF_F_TSO
;
4884 netdev
->vlan_features
|= NETIF_F_TSO6
;
4885 netdev
->vlan_features
|= NETIF_F_HW_CSUM
;
4886 netdev
->vlan_features
|= NETIF_F_SG
;
4889 netdev
->features
|= NETIF_F_HIGHDMA
;
4891 if (e1000e_enable_mng_pass_thru(&adapter
->hw
))
4892 adapter
->flags
|= FLAG_MNG_PT_ENABLED
;
4895 * before reading the NVM, reset the controller to
4896 * put the device in a known good starting state
4898 adapter
->hw
.mac
.ops
.reset_hw(&adapter
->hw
);
4901 * systems with ASPM and others may see the checksum fail on the first
4902 * attempt. Let's give it a few tries
4905 if (e1000_validate_nvm_checksum(&adapter
->hw
) >= 0)
4908 e_err("The NVM Checksum Is Not Valid\n");
4914 e1000_eeprom_checks(adapter
);
4916 /* copy the MAC address out of the NVM */
4917 if (e1000e_read_mac_addr(&adapter
->hw
))
4918 e_err("NVM Read Error while reading MAC address\n");
4920 memcpy(netdev
->dev_addr
, adapter
->hw
.mac
.addr
, netdev
->addr_len
);
4921 memcpy(netdev
->perm_addr
, adapter
->hw
.mac
.addr
, netdev
->addr_len
);
4923 if (!is_valid_ether_addr(netdev
->perm_addr
)) {
4924 e_err("Invalid MAC Address: %pM\n", netdev
->perm_addr
);
4929 init_timer(&adapter
->watchdog_timer
);
4930 adapter
->watchdog_timer
.function
= &e1000_watchdog
;
4931 adapter
->watchdog_timer
.data
= (unsigned long) adapter
;
4933 init_timer(&adapter
->phy_info_timer
);
4934 adapter
->phy_info_timer
.function
= &e1000_update_phy_info
;
4935 adapter
->phy_info_timer
.data
= (unsigned long) adapter
;
4937 INIT_WORK(&adapter
->reset_task
, e1000_reset_task
);
4938 INIT_WORK(&adapter
->watchdog_task
, e1000_watchdog_task
);
4939 INIT_WORK(&adapter
->downshift_task
, e1000e_downshift_workaround
);
4940 INIT_WORK(&adapter
->update_phy_task
, e1000e_update_phy_task
);
4942 /* Initialize link parameters. User can change them with ethtool */
4943 adapter
->hw
.mac
.autoneg
= 1;
4944 adapter
->fc_autoneg
= 1;
4945 adapter
->hw
.fc
.requested_mode
= e1000_fc_default
;
4946 adapter
->hw
.fc
.current_mode
= e1000_fc_default
;
4947 adapter
->hw
.phy
.autoneg_advertised
= 0x2f;
4949 /* ring size defaults */
4950 adapter
->rx_ring
->count
= 256;
4951 adapter
->tx_ring
->count
= 256;
4954 * Initial Wake on LAN setting - If APM wake is enabled in
4955 * the EEPROM, enable the ACPI Magic Packet filter
4957 if (adapter
->flags
& FLAG_APME_IN_WUC
) {
4958 /* APME bit in EEPROM is mapped to WUC.APME */
4959 eeprom_data
= er32(WUC
);
4960 eeprom_apme_mask
= E1000_WUC_APME
;
4961 } else if (adapter
->flags
& FLAG_APME_IN_CTRL3
) {
4962 if (adapter
->flags
& FLAG_APME_CHECK_PORT_B
&&
4963 (adapter
->hw
.bus
.func
== 1))
4964 e1000_read_nvm(&adapter
->hw
,
4965 NVM_INIT_CONTROL3_PORT_B
, 1, &eeprom_data
);
4967 e1000_read_nvm(&adapter
->hw
,
4968 NVM_INIT_CONTROL3_PORT_A
, 1, &eeprom_data
);
4971 /* fetch WoL from EEPROM */
4972 if (eeprom_data
& eeprom_apme_mask
)
4973 adapter
->eeprom_wol
|= E1000_WUFC_MAG
;
4976 * now that we have the eeprom settings, apply the special cases
4977 * where the eeprom may be wrong or the board simply won't support
4978 * wake on lan on a particular port
4980 if (!(adapter
->flags
& FLAG_HAS_WOL
))
4981 adapter
->eeprom_wol
= 0;
4983 /* initialize the wol settings based on the eeprom settings */
4984 adapter
->wol
= adapter
->eeprom_wol
;
4985 device_set_wakeup_enable(&adapter
->pdev
->dev
, adapter
->wol
);
4987 /* save off EEPROM version number */
4988 e1000_read_nvm(&adapter
->hw
, 5, 1, &adapter
->eeprom_vers
);
4990 /* reset the hardware with the new settings */
4991 e1000e_reset(adapter
);
4994 * If the controller has AMT, do not set DRV_LOAD until the interface
4995 * is up. For all other cases, let the f/w know that the h/w is now
4996 * under the control of the driver.
4998 if (!(adapter
->flags
& FLAG_HAS_AMT
))
4999 e1000_get_hw_control(adapter
);
5001 /* tell the stack to leave us alone until e1000_open() is called */
5002 netif_carrier_off(netdev
);
5003 netif_tx_stop_all_queues(netdev
);
5005 strcpy(netdev
->name
, "eth%d");
5006 err
= register_netdev(netdev
);
5010 e1000_print_device_info(adapter
);
5015 if (!(adapter
->flags
& FLAG_HAS_AMT
))
5016 e1000_release_hw_control(adapter
);
5018 if (!e1000_check_reset_block(&adapter
->hw
))
5019 e1000_phy_hw_reset(&adapter
->hw
);
5022 kfree(adapter
->tx_ring
);
5023 kfree(adapter
->rx_ring
);
5025 if (adapter
->hw
.flash_address
)
5026 iounmap(adapter
->hw
.flash_address
);
5027 e1000e_reset_interrupt_capability(adapter
);
5029 iounmap(adapter
->hw
.hw_addr
);
5031 free_netdev(netdev
);
5033 pci_release_selected_regions(pdev
,
5034 pci_select_bars(pdev
, IORESOURCE_MEM
));
5037 pci_disable_device(pdev
);
5042 * e1000_remove - Device Removal Routine
5043 * @pdev: PCI device information struct
5045 * e1000_remove is called by the PCI subsystem to alert the driver
5046 * that it should release a PCI device. The could be caused by a
5047 * Hot-Plug event, or because the driver is going to be removed from
5050 static void __devexit
e1000_remove(struct pci_dev
*pdev
)
5052 struct net_device
*netdev
= pci_get_drvdata(pdev
);
5053 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
5057 * flush_scheduled work may reschedule our watchdog task, so
5058 * explicitly disable watchdog tasks from being rescheduled
5060 set_bit(__E1000_DOWN
, &adapter
->state
);
5061 del_timer_sync(&adapter
->watchdog_timer
);
5062 del_timer_sync(&adapter
->phy_info_timer
);
5064 flush_scheduled_work();
5067 * Release control of h/w to f/w. If f/w is AMT enabled, this
5068 * would have already happened in close and is redundant.
5070 e1000_release_hw_control(adapter
);
5072 unregister_netdev(netdev
);
5074 if (!e1000_check_reset_block(&adapter
->hw
))
5075 e1000_phy_hw_reset(&adapter
->hw
);
5077 e1000e_reset_interrupt_capability(adapter
);
5078 kfree(adapter
->tx_ring
);
5079 kfree(adapter
->rx_ring
);
5081 iounmap(adapter
->hw
.hw_addr
);
5082 if (adapter
->hw
.flash_address
)
5083 iounmap(adapter
->hw
.flash_address
);
5084 pci_release_selected_regions(pdev
,
5085 pci_select_bars(pdev
, IORESOURCE_MEM
));
5087 free_netdev(netdev
);
5090 err
= pci_disable_pcie_error_reporting(pdev
);
5093 "pci_disable_pcie_error_reporting failed 0x%x\n", err
);
5095 pci_disable_device(pdev
);
5098 /* PCI Error Recovery (ERS) */
5099 static struct pci_error_handlers e1000_err_handler
= {
5100 .error_detected
= e1000_io_error_detected
,
5101 .slot_reset
= e1000_io_slot_reset
,
5102 .resume
= e1000_io_resume
,
5105 static struct pci_device_id e1000_pci_tbl
[] = {
5106 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82571EB_COPPER
), board_82571
},
5107 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82571EB_FIBER
), board_82571
},
5108 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82571EB_QUAD_COPPER
), board_82571
},
5109 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82571EB_QUAD_COPPER_LP
), board_82571
},
5110 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82571EB_QUAD_FIBER
), board_82571
},
5111 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82571EB_SERDES
), board_82571
},
5112 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82571EB_SERDES_DUAL
), board_82571
},
5113 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82571EB_SERDES_QUAD
), board_82571
},
5114 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82571PT_QUAD_COPPER
), board_82571
},
5116 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82572EI
), board_82572
},
5117 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82572EI_COPPER
), board_82572
},
5118 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82572EI_FIBER
), board_82572
},
5119 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82572EI_SERDES
), board_82572
},
5121 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82573E
), board_82573
},
5122 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82573E_IAMT
), board_82573
},
5123 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82573L
), board_82573
},
5125 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82574L
), board_82574
},
5126 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82583V
), board_82583
},
5128 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_80003ES2LAN_COPPER_DPT
),
5129 board_80003es2lan
},
5130 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_80003ES2LAN_COPPER_SPT
),
5131 board_80003es2lan
},
5132 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_80003ES2LAN_SERDES_DPT
),
5133 board_80003es2lan
},
5134 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_80003ES2LAN_SERDES_SPT
),
5135 board_80003es2lan
},
5137 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_ICH8_IFE
), board_ich8lan
},
5138 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_ICH8_IFE_G
), board_ich8lan
},
5139 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_ICH8_IFE_GT
), board_ich8lan
},
5140 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_ICH8_IGP_AMT
), board_ich8lan
},
5141 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_ICH8_IGP_C
), board_ich8lan
},
5142 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_ICH8_IGP_M
), board_ich8lan
},
5143 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_ICH8_IGP_M_AMT
), board_ich8lan
},
5145 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_ICH9_IFE
), board_ich9lan
},
5146 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_ICH9_IFE_G
), board_ich9lan
},
5147 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_ICH9_IFE_GT
), board_ich9lan
},
5148 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_ICH9_IGP_AMT
), board_ich9lan
},
5149 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_ICH9_IGP_C
), board_ich9lan
},
5150 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_ICH9_BM
), board_ich9lan
},
5151 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_ICH9_IGP_M
), board_ich9lan
},
5152 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_ICH9_IGP_M_AMT
), board_ich9lan
},
5153 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_ICH9_IGP_M_V
), board_ich9lan
},
5155 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_ICH10_R_BM_LM
), board_ich9lan
},
5156 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_ICH10_R_BM_LF
), board_ich9lan
},
5157 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_ICH10_R_BM_V
), board_ich9lan
},
5159 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_ICH10_D_BM_LM
), board_ich10lan
},
5160 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_ICH10_D_BM_LF
), board_ich10lan
},
5162 { } /* terminate list */
5164 MODULE_DEVICE_TABLE(pci
, e1000_pci_tbl
);
5166 /* PCI Device API Driver */
5167 static struct pci_driver e1000_driver
= {
5168 .name
= e1000e_driver_name
,
5169 .id_table
= e1000_pci_tbl
,
5170 .probe
= e1000_probe
,
5171 .remove
= __devexit_p(e1000_remove
),
5173 /* Power Management Hooks */
5174 .suspend
= e1000_suspend
,
5175 .resume
= e1000_resume
,
5177 .shutdown
= e1000_shutdown
,
5178 .err_handler
= &e1000_err_handler
5182 * e1000_init_module - Driver Registration Routine
5184 * e1000_init_module is the first routine called when the driver is
5185 * loaded. All it does is register with the PCI subsystem.
5187 static int __init
e1000_init_module(void)
5190 printk(KERN_INFO
"%s: Intel(R) PRO/1000 Network Driver - %s\n",
5191 e1000e_driver_name
, e1000e_driver_version
);
5192 printk(KERN_INFO
"%s: Copyright (c) 1999-2008 Intel Corporation.\n",
5193 e1000e_driver_name
);
5194 ret
= pci_register_driver(&e1000_driver
);
5195 pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY
, e1000e_driver_name
,
5196 PM_QOS_DEFAULT_VALUE
);
5200 module_init(e1000_init_module
);
5203 * e1000_exit_module - Driver Exit Cleanup Routine
5205 * e1000_exit_module is called just before the driver is removed
5208 static void __exit
e1000_exit_module(void)
5210 pci_unregister_driver(&e1000_driver
);
5211 pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY
, e1000e_driver_name
);
5213 module_exit(e1000_exit_module
);
5216 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
5217 MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
5218 MODULE_LICENSE("GPL");
5219 MODULE_VERSION(DRV_VERSION
);