1 /*******************************************************************************
3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2008 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *******************************************************************************/
29 #include <linux/module.h>
30 #include <linux/types.h>
31 #include <linux/init.h>
32 #include <linux/pci.h>
33 #include <linux/vmalloc.h>
34 #include <linux/pagemap.h>
35 #include <linux/delay.h>
36 #include <linux/netdevice.h>
37 #include <linux/tcp.h>
38 #include <linux/ipv6.h>
39 #include <net/checksum.h>
40 #include <net/ip6_checksum.h>
41 #include <linux/mii.h>
42 #include <linux/ethtool.h>
43 #include <linux/if_vlan.h>
44 #include <linux/cpu.h>
45 #include <linux/smp.h>
46 #include <linux/pm_qos_params.h>
50 #define DRV_VERSION "0.3.3.3-k2"
51 char e1000e_driver_name
[] = "e1000e";
52 const char e1000e_driver_version
[] = DRV_VERSION
;
54 static const struct e1000_info
*e1000_info_tbl
[] = {
55 [board_82571
] = &e1000_82571_info
,
56 [board_82572
] = &e1000_82572_info
,
57 [board_82573
] = &e1000_82573_info
,
58 [board_80003es2lan
] = &e1000_es2_info
,
59 [board_ich8lan
] = &e1000_ich8_info
,
60 [board_ich9lan
] = &e1000_ich9_info
,
65 * e1000_get_hw_dev_name - return device name string
66 * used by hardware layer to print debugging information
68 char *e1000e_get_hw_dev_name(struct e1000_hw
*hw
)
70 return hw
->adapter
->netdev
->name
;
75 * e1000_desc_unused - calculate if we have unused descriptors
77 static int e1000_desc_unused(struct e1000_ring
*ring
)
79 if (ring
->next_to_clean
> ring
->next_to_use
)
80 return ring
->next_to_clean
- ring
->next_to_use
- 1;
82 return ring
->count
+ ring
->next_to_clean
- ring
->next_to_use
- 1;
86 * e1000_receive_skb - helper function to handle Rx indications
87 * @adapter: board private structure
88 * @status: descriptor status field as written by hardware
89 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
90 * @skb: pointer to sk_buff to be indicated to stack
92 static void e1000_receive_skb(struct e1000_adapter
*adapter
,
93 struct net_device
*netdev
,
95 u8 status
, __le16 vlan
)
97 skb
->protocol
= eth_type_trans(skb
, netdev
);
99 if (adapter
->vlgrp
&& (status
& E1000_RXD_STAT_VP
))
100 vlan_hwaccel_receive_skb(skb
, adapter
->vlgrp
,
103 netif_receive_skb(skb
);
105 netdev
->last_rx
= jiffies
;
109 * e1000_rx_checksum - Receive Checksum Offload for 82543
110 * @adapter: board private structure
111 * @status_err: receive descriptor status and error fields
112 * @csum: receive descriptor csum field
113 * @sk_buff: socket buffer with received data
115 static void e1000_rx_checksum(struct e1000_adapter
*adapter
, u32 status_err
,
116 u32 csum
, struct sk_buff
*skb
)
118 u16 status
= (u16
)status_err
;
119 u8 errors
= (u8
)(status_err
>> 24);
120 skb
->ip_summed
= CHECKSUM_NONE
;
122 /* Ignore Checksum bit is set */
123 if (status
& E1000_RXD_STAT_IXSM
)
125 /* TCP/UDP checksum error bit is set */
126 if (errors
& E1000_RXD_ERR_TCPE
) {
127 /* let the stack verify checksum errors */
128 adapter
->hw_csum_err
++;
132 /* TCP/UDP Checksum has not been calculated */
133 if (!(status
& (E1000_RXD_STAT_TCPCS
| E1000_RXD_STAT_UDPCS
)))
136 /* It must be a TCP or UDP packet with a valid checksum */
137 if (status
& E1000_RXD_STAT_TCPCS
) {
138 /* TCP checksum is good */
139 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
142 * IP fragment with UDP payload
143 * Hardware complements the payload checksum, so we undo it
144 * and then put the value in host order for further stack use.
146 __sum16 sum
= (__force __sum16
)htons(csum
);
147 skb
->csum
= csum_unfold(~sum
);
148 skb
->ip_summed
= CHECKSUM_COMPLETE
;
150 adapter
->hw_csum_good
++;
154 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
155 * @adapter: address of board private structure
157 static void e1000_alloc_rx_buffers(struct e1000_adapter
*adapter
,
160 struct net_device
*netdev
= adapter
->netdev
;
161 struct pci_dev
*pdev
= adapter
->pdev
;
162 struct e1000_ring
*rx_ring
= adapter
->rx_ring
;
163 struct e1000_rx_desc
*rx_desc
;
164 struct e1000_buffer
*buffer_info
;
167 unsigned int bufsz
= adapter
->rx_buffer_len
+ NET_IP_ALIGN
;
169 i
= rx_ring
->next_to_use
;
170 buffer_info
= &rx_ring
->buffer_info
[i
];
172 while (cleaned_count
--) {
173 skb
= buffer_info
->skb
;
179 skb
= netdev_alloc_skb(netdev
, bufsz
);
181 /* Better luck next round */
182 adapter
->alloc_rx_buff_failed
++;
187 * Make buffer alignment 2 beyond a 16 byte boundary
188 * this will result in a 16 byte aligned IP header after
189 * the 14 byte MAC header is removed
191 skb_reserve(skb
, NET_IP_ALIGN
);
193 buffer_info
->skb
= skb
;
195 buffer_info
->dma
= pci_map_single(pdev
, skb
->data
,
196 adapter
->rx_buffer_len
,
198 if (pci_dma_mapping_error(pdev
, buffer_info
->dma
)) {
199 dev_err(&pdev
->dev
, "RX DMA map failed\n");
200 adapter
->rx_dma_failed
++;
204 rx_desc
= E1000_RX_DESC(*rx_ring
, i
);
205 rx_desc
->buffer_addr
= cpu_to_le64(buffer_info
->dma
);
208 if (i
== rx_ring
->count
)
210 buffer_info
= &rx_ring
->buffer_info
[i
];
213 if (rx_ring
->next_to_use
!= i
) {
214 rx_ring
->next_to_use
= i
;
216 i
= (rx_ring
->count
- 1);
219 * Force memory writes to complete before letting h/w
220 * know there are new descriptors to fetch. (Only
221 * applicable for weak-ordered memory model archs,
225 writel(i
, adapter
->hw
.hw_addr
+ rx_ring
->tail
);
230 * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split
231 * @adapter: address of board private structure
233 static void e1000_alloc_rx_buffers_ps(struct e1000_adapter
*adapter
,
236 struct net_device
*netdev
= adapter
->netdev
;
237 struct pci_dev
*pdev
= adapter
->pdev
;
238 union e1000_rx_desc_packet_split
*rx_desc
;
239 struct e1000_ring
*rx_ring
= adapter
->rx_ring
;
240 struct e1000_buffer
*buffer_info
;
241 struct e1000_ps_page
*ps_page
;
245 i
= rx_ring
->next_to_use
;
246 buffer_info
= &rx_ring
->buffer_info
[i
];
248 while (cleaned_count
--) {
249 rx_desc
= E1000_RX_DESC_PS(*rx_ring
, i
);
251 for (j
= 0; j
< PS_PAGE_BUFFERS
; j
++) {
252 ps_page
= &buffer_info
->ps_pages
[j
];
253 if (j
>= adapter
->rx_ps_pages
) {
254 /* all unused desc entries get hw null ptr */
255 rx_desc
->read
.buffer_addr
[j
+1] = ~cpu_to_le64(0);
258 if (!ps_page
->page
) {
259 ps_page
->page
= alloc_page(GFP_ATOMIC
);
260 if (!ps_page
->page
) {
261 adapter
->alloc_rx_buff_failed
++;
264 ps_page
->dma
= pci_map_page(pdev
,
268 if (pci_dma_mapping_error(pdev
, ps_page
->dma
)) {
269 dev_err(&adapter
->pdev
->dev
,
270 "RX DMA page map failed\n");
271 adapter
->rx_dma_failed
++;
276 * Refresh the desc even if buffer_addrs
277 * didn't change because each write-back
280 rx_desc
->read
.buffer_addr
[j
+1] =
281 cpu_to_le64(ps_page
->dma
);
284 skb
= netdev_alloc_skb(netdev
,
285 adapter
->rx_ps_bsize0
+ NET_IP_ALIGN
);
288 adapter
->alloc_rx_buff_failed
++;
293 * Make buffer alignment 2 beyond a 16 byte boundary
294 * this will result in a 16 byte aligned IP header after
295 * the 14 byte MAC header is removed
297 skb_reserve(skb
, NET_IP_ALIGN
);
299 buffer_info
->skb
= skb
;
300 buffer_info
->dma
= pci_map_single(pdev
, skb
->data
,
301 adapter
->rx_ps_bsize0
,
303 if (pci_dma_mapping_error(pdev
, buffer_info
->dma
)) {
304 dev_err(&pdev
->dev
, "RX DMA map failed\n");
305 adapter
->rx_dma_failed
++;
307 dev_kfree_skb_any(skb
);
308 buffer_info
->skb
= NULL
;
312 rx_desc
->read
.buffer_addr
[0] = cpu_to_le64(buffer_info
->dma
);
315 if (i
== rx_ring
->count
)
317 buffer_info
= &rx_ring
->buffer_info
[i
];
321 if (rx_ring
->next_to_use
!= i
) {
322 rx_ring
->next_to_use
= i
;
325 i
= (rx_ring
->count
- 1);
328 * Force memory writes to complete before letting h/w
329 * know there are new descriptors to fetch. (Only
330 * applicable for weak-ordered memory model archs,
335 * Hardware increments by 16 bytes, but packet split
336 * descriptors are 32 bytes...so we increment tail
339 writel(i
<<1, adapter
->hw
.hw_addr
+ rx_ring
->tail
);
344 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
345 * @adapter: address of board private structure
346 * @rx_ring: pointer to receive ring structure
347 * @cleaned_count: number of buffers to allocate this pass
350 static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter
*adapter
,
353 struct net_device
*netdev
= adapter
->netdev
;
354 struct pci_dev
*pdev
= adapter
->pdev
;
355 struct e1000_rx_desc
*rx_desc
;
356 struct e1000_ring
*rx_ring
= adapter
->rx_ring
;
357 struct e1000_buffer
*buffer_info
;
360 unsigned int bufsz
= 256 -
361 16 /* for skb_reserve */ -
364 i
= rx_ring
->next_to_use
;
365 buffer_info
= &rx_ring
->buffer_info
[i
];
367 while (cleaned_count
--) {
368 skb
= buffer_info
->skb
;
374 skb
= netdev_alloc_skb(netdev
, bufsz
);
375 if (unlikely(!skb
)) {
376 /* Better luck next round */
377 adapter
->alloc_rx_buff_failed
++;
381 /* Make buffer alignment 2 beyond a 16 byte boundary
382 * this will result in a 16 byte aligned IP header after
383 * the 14 byte MAC header is removed
385 skb_reserve(skb
, NET_IP_ALIGN
);
387 buffer_info
->skb
= skb
;
389 /* allocate a new page if necessary */
390 if (!buffer_info
->page
) {
391 buffer_info
->page
= alloc_page(GFP_ATOMIC
);
392 if (unlikely(!buffer_info
->page
)) {
393 adapter
->alloc_rx_buff_failed
++;
398 if (!buffer_info
->dma
)
399 buffer_info
->dma
= pci_map_page(pdev
,
400 buffer_info
->page
, 0,
404 rx_desc
= E1000_RX_DESC(*rx_ring
, i
);
405 rx_desc
->buffer_addr
= cpu_to_le64(buffer_info
->dma
);
407 if (unlikely(++i
== rx_ring
->count
))
409 buffer_info
= &rx_ring
->buffer_info
[i
];
412 if (likely(rx_ring
->next_to_use
!= i
)) {
413 rx_ring
->next_to_use
= i
;
414 if (unlikely(i
-- == 0))
415 i
= (rx_ring
->count
- 1);
417 /* Force memory writes to complete before letting h/w
418 * know there are new descriptors to fetch. (Only
419 * applicable for weak-ordered memory model archs,
422 writel(i
, adapter
->hw
.hw_addr
+ rx_ring
->tail
);
427 * e1000_clean_rx_irq - Send received data up the network stack; legacy
428 * @adapter: board private structure
430 * the return value indicates whether actual cleaning was done, there
431 * is no guarantee that everything was cleaned
433 static bool e1000_clean_rx_irq(struct e1000_adapter
*adapter
,
434 int *work_done
, int work_to_do
)
436 struct net_device
*netdev
= adapter
->netdev
;
437 struct pci_dev
*pdev
= adapter
->pdev
;
438 struct e1000_ring
*rx_ring
= adapter
->rx_ring
;
439 struct e1000_rx_desc
*rx_desc
, *next_rxd
;
440 struct e1000_buffer
*buffer_info
, *next_buffer
;
443 int cleaned_count
= 0;
445 unsigned int total_rx_bytes
= 0, total_rx_packets
= 0;
447 i
= rx_ring
->next_to_clean
;
448 rx_desc
= E1000_RX_DESC(*rx_ring
, i
);
449 buffer_info
= &rx_ring
->buffer_info
[i
];
451 while (rx_desc
->status
& E1000_RXD_STAT_DD
) {
455 if (*work_done
>= work_to_do
)
459 status
= rx_desc
->status
;
460 skb
= buffer_info
->skb
;
461 buffer_info
->skb
= NULL
;
463 prefetch(skb
->data
- NET_IP_ALIGN
);
466 if (i
== rx_ring
->count
)
468 next_rxd
= E1000_RX_DESC(*rx_ring
, i
);
471 next_buffer
= &rx_ring
->buffer_info
[i
];
475 pci_unmap_single(pdev
,
477 adapter
->rx_buffer_len
,
479 buffer_info
->dma
= 0;
481 length
= le16_to_cpu(rx_desc
->length
);
483 /* !EOP means multiple descriptors were used to store a single
484 * packet, also make sure the frame isn't just CRC only */
485 if (!(status
& E1000_RXD_STAT_EOP
) || (length
<= 4)) {
486 /* All receives must fit into a single buffer */
487 e_dbg("%s: Receive packet consumed multiple buffers\n",
490 buffer_info
->skb
= skb
;
494 if (rx_desc
->errors
& E1000_RXD_ERR_FRAME_ERR_MASK
) {
496 buffer_info
->skb
= skb
;
500 total_rx_bytes
+= length
;
504 * code added for copybreak, this should improve
505 * performance for small packets with large amounts
506 * of reassembly being done in the stack
508 if (length
< copybreak
) {
509 struct sk_buff
*new_skb
=
510 netdev_alloc_skb(netdev
, length
+ NET_IP_ALIGN
);
512 skb_reserve(new_skb
, NET_IP_ALIGN
);
513 skb_copy_to_linear_data_offset(new_skb
,
519 /* save the skb in buffer_info as good */
520 buffer_info
->skb
= skb
;
523 /* else just continue with the old one */
525 /* end copybreak code */
526 skb_put(skb
, length
);
528 /* Receive Checksum Offload */
529 e1000_rx_checksum(adapter
,
531 ((u32
)(rx_desc
->errors
) << 24),
532 le16_to_cpu(rx_desc
->csum
), skb
);
534 e1000_receive_skb(adapter
, netdev
, skb
,status
,rx_desc
->special
);
539 /* return some buffers to hardware, one at a time is too slow */
540 if (cleaned_count
>= E1000_RX_BUFFER_WRITE
) {
541 adapter
->alloc_rx_buf(adapter
, cleaned_count
);
545 /* use prefetched values */
547 buffer_info
= next_buffer
;
549 rx_ring
->next_to_clean
= i
;
551 cleaned_count
= e1000_desc_unused(rx_ring
);
553 adapter
->alloc_rx_buf(adapter
, cleaned_count
);
555 adapter
->total_rx_bytes
+= total_rx_bytes
;
556 adapter
->total_rx_packets
+= total_rx_packets
;
557 adapter
->net_stats
.rx_bytes
+= total_rx_bytes
;
558 adapter
->net_stats
.rx_packets
+= total_rx_packets
;
562 static void e1000_put_txbuf(struct e1000_adapter
*adapter
,
563 struct e1000_buffer
*buffer_info
)
565 if (buffer_info
->dma
) {
566 pci_unmap_page(adapter
->pdev
, buffer_info
->dma
,
567 buffer_info
->length
, PCI_DMA_TODEVICE
);
568 buffer_info
->dma
= 0;
570 if (buffer_info
->skb
) {
571 dev_kfree_skb_any(buffer_info
->skb
);
572 buffer_info
->skb
= NULL
;
576 static void e1000_print_tx_hang(struct e1000_adapter
*adapter
)
578 struct e1000_ring
*tx_ring
= adapter
->tx_ring
;
579 unsigned int i
= tx_ring
->next_to_clean
;
580 unsigned int eop
= tx_ring
->buffer_info
[i
].next_to_watch
;
581 struct e1000_tx_desc
*eop_desc
= E1000_TX_DESC(*tx_ring
, eop
);
583 /* detected Tx unit hang */
584 e_err("Detected Tx Unit Hang:\n"
587 " next_to_use <%x>\n"
588 " next_to_clean <%x>\n"
589 "buffer_info[next_to_clean]:\n"
590 " time_stamp <%lx>\n"
591 " next_to_watch <%x>\n"
593 " next_to_watch.status <%x>\n",
594 readl(adapter
->hw
.hw_addr
+ tx_ring
->head
),
595 readl(adapter
->hw
.hw_addr
+ tx_ring
->tail
),
596 tx_ring
->next_to_use
,
597 tx_ring
->next_to_clean
,
598 tx_ring
->buffer_info
[eop
].time_stamp
,
601 eop_desc
->upper
.fields
.status
);
605 * e1000_clean_tx_irq - Reclaim resources after transmit completes
606 * @adapter: board private structure
608 * the return value indicates whether actual cleaning was done, there
609 * is no guarantee that everything was cleaned
611 static bool e1000_clean_tx_irq(struct e1000_adapter
*adapter
)
613 struct net_device
*netdev
= adapter
->netdev
;
614 struct e1000_hw
*hw
= &adapter
->hw
;
615 struct e1000_ring
*tx_ring
= adapter
->tx_ring
;
616 struct e1000_tx_desc
*tx_desc
, *eop_desc
;
617 struct e1000_buffer
*buffer_info
;
619 unsigned int count
= 0;
621 unsigned int total_tx_bytes
= 0, total_tx_packets
= 0;
623 i
= tx_ring
->next_to_clean
;
624 eop
= tx_ring
->buffer_info
[i
].next_to_watch
;
625 eop_desc
= E1000_TX_DESC(*tx_ring
, eop
);
627 while (eop_desc
->upper
.data
& cpu_to_le32(E1000_TXD_STAT_DD
)) {
628 for (cleaned
= 0; !cleaned
; ) {
629 tx_desc
= E1000_TX_DESC(*tx_ring
, i
);
630 buffer_info
= &tx_ring
->buffer_info
[i
];
631 cleaned
= (i
== eop
);
634 struct sk_buff
*skb
= buffer_info
->skb
;
635 unsigned int segs
, bytecount
;
636 segs
= skb_shinfo(skb
)->gso_segs
?: 1;
637 /* multiply data chunks by size of headers */
638 bytecount
= ((segs
- 1) * skb_headlen(skb
)) +
640 total_tx_packets
+= segs
;
641 total_tx_bytes
+= bytecount
;
644 e1000_put_txbuf(adapter
, buffer_info
);
645 tx_desc
->upper
.data
= 0;
648 if (i
== tx_ring
->count
)
652 eop
= tx_ring
->buffer_info
[i
].next_to_watch
;
653 eop_desc
= E1000_TX_DESC(*tx_ring
, eop
);
654 #define E1000_TX_WEIGHT 64
655 /* weight of a sort for tx, to avoid endless transmit cleanup */
656 if (count
++ == E1000_TX_WEIGHT
)
660 tx_ring
->next_to_clean
= i
;
662 #define TX_WAKE_THRESHOLD 32
663 if (cleaned
&& netif_carrier_ok(netdev
) &&
664 e1000_desc_unused(tx_ring
) >= TX_WAKE_THRESHOLD
) {
665 /* Make sure that anybody stopping the queue after this
666 * sees the new next_to_clean.
670 if (netif_queue_stopped(netdev
) &&
671 !(test_bit(__E1000_DOWN
, &adapter
->state
))) {
672 netif_wake_queue(netdev
);
673 ++adapter
->restart_queue
;
677 if (adapter
->detect_tx_hung
) {
679 * Detect a transmit hang in hardware, this serializes the
680 * check with the clearing of time_stamp and movement of i
682 adapter
->detect_tx_hung
= 0;
683 if (tx_ring
->buffer_info
[eop
].dma
&&
684 time_after(jiffies
, tx_ring
->buffer_info
[eop
].time_stamp
685 + (adapter
->tx_timeout_factor
* HZ
))
686 && !(er32(STATUS
) & E1000_STATUS_TXOFF
)) {
687 e1000_print_tx_hang(adapter
);
688 netif_stop_queue(netdev
);
691 adapter
->total_tx_bytes
+= total_tx_bytes
;
692 adapter
->total_tx_packets
+= total_tx_packets
;
693 adapter
->net_stats
.tx_bytes
+= total_tx_bytes
;
694 adapter
->net_stats
.tx_packets
+= total_tx_packets
;
699 * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split
700 * @adapter: board private structure
702 * the return value indicates whether actual cleaning was done, there
703 * is no guarantee that everything was cleaned
705 static bool e1000_clean_rx_irq_ps(struct e1000_adapter
*adapter
,
706 int *work_done
, int work_to_do
)
708 union e1000_rx_desc_packet_split
*rx_desc
, *next_rxd
;
709 struct net_device
*netdev
= adapter
->netdev
;
710 struct pci_dev
*pdev
= adapter
->pdev
;
711 struct e1000_ring
*rx_ring
= adapter
->rx_ring
;
712 struct e1000_buffer
*buffer_info
, *next_buffer
;
713 struct e1000_ps_page
*ps_page
;
717 int cleaned_count
= 0;
719 unsigned int total_rx_bytes
= 0, total_rx_packets
= 0;
721 i
= rx_ring
->next_to_clean
;
722 rx_desc
= E1000_RX_DESC_PS(*rx_ring
, i
);
723 staterr
= le32_to_cpu(rx_desc
->wb
.middle
.status_error
);
724 buffer_info
= &rx_ring
->buffer_info
[i
];
726 while (staterr
& E1000_RXD_STAT_DD
) {
727 if (*work_done
>= work_to_do
)
730 skb
= buffer_info
->skb
;
732 /* in the packet split case this is header only */
733 prefetch(skb
->data
- NET_IP_ALIGN
);
736 if (i
== rx_ring
->count
)
738 next_rxd
= E1000_RX_DESC_PS(*rx_ring
, i
);
741 next_buffer
= &rx_ring
->buffer_info
[i
];
745 pci_unmap_single(pdev
, buffer_info
->dma
,
746 adapter
->rx_ps_bsize0
,
748 buffer_info
->dma
= 0;
750 if (!(staterr
& E1000_RXD_STAT_EOP
)) {
751 e_dbg("%s: Packet Split buffers didn't pick up the "
752 "full packet\n", netdev
->name
);
753 dev_kfree_skb_irq(skb
);
757 if (staterr
& E1000_RXDEXT_ERR_FRAME_ERR_MASK
) {
758 dev_kfree_skb_irq(skb
);
762 length
= le16_to_cpu(rx_desc
->wb
.middle
.length0
);
765 e_dbg("%s: Last part of the packet spanning multiple "
766 "descriptors\n", netdev
->name
);
767 dev_kfree_skb_irq(skb
);
772 skb_put(skb
, length
);
776 * this looks ugly, but it seems compiler issues make it
777 * more efficient than reusing j
779 int l1
= le16_to_cpu(rx_desc
->wb
.upper
.length
[0]);
782 * page alloc/put takes too long and effects small packet
783 * throughput, so unsplit small packets and save the alloc/put
784 * only valid in softirq (napi) context to call kmap_*
786 if (l1
&& (l1
<= copybreak
) &&
787 ((length
+ l1
) <= adapter
->rx_ps_bsize0
)) {
790 ps_page
= &buffer_info
->ps_pages
[0];
793 * there is no documentation about how to call
794 * kmap_atomic, so we can't hold the mapping
797 pci_dma_sync_single_for_cpu(pdev
, ps_page
->dma
,
798 PAGE_SIZE
, PCI_DMA_FROMDEVICE
);
799 vaddr
= kmap_atomic(ps_page
->page
, KM_SKB_DATA_SOFTIRQ
);
800 memcpy(skb_tail_pointer(skb
), vaddr
, l1
);
801 kunmap_atomic(vaddr
, KM_SKB_DATA_SOFTIRQ
);
802 pci_dma_sync_single_for_device(pdev
, ps_page
->dma
,
803 PAGE_SIZE
, PCI_DMA_FROMDEVICE
);
810 for (j
= 0; j
< PS_PAGE_BUFFERS
; j
++) {
811 length
= le16_to_cpu(rx_desc
->wb
.upper
.length
[j
]);
815 ps_page
= &buffer_info
->ps_pages
[j
];
816 pci_unmap_page(pdev
, ps_page
->dma
, PAGE_SIZE
,
819 skb_fill_page_desc(skb
, j
, ps_page
->page
, 0, length
);
820 ps_page
->page
= NULL
;
822 skb
->data_len
+= length
;
823 skb
->truesize
+= length
;
827 total_rx_bytes
+= skb
->len
;
830 e1000_rx_checksum(adapter
, staterr
, le16_to_cpu(
831 rx_desc
->wb
.lower
.hi_dword
.csum_ip
.csum
), skb
);
833 if (rx_desc
->wb
.upper
.header_status
&
834 cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP
))
835 adapter
->rx_hdr_split
++;
837 e1000_receive_skb(adapter
, netdev
, skb
,
838 staterr
, rx_desc
->wb
.middle
.vlan
);
841 rx_desc
->wb
.middle
.status_error
&= cpu_to_le32(~0xFF);
842 buffer_info
->skb
= NULL
;
844 /* return some buffers to hardware, one at a time is too slow */
845 if (cleaned_count
>= E1000_RX_BUFFER_WRITE
) {
846 adapter
->alloc_rx_buf(adapter
, cleaned_count
);
850 /* use prefetched values */
852 buffer_info
= next_buffer
;
854 staterr
= le32_to_cpu(rx_desc
->wb
.middle
.status_error
);
856 rx_ring
->next_to_clean
= i
;
858 cleaned_count
= e1000_desc_unused(rx_ring
);
860 adapter
->alloc_rx_buf(adapter
, cleaned_count
);
862 adapter
->total_rx_bytes
+= total_rx_bytes
;
863 adapter
->total_rx_packets
+= total_rx_packets
;
864 adapter
->net_stats
.rx_bytes
+= total_rx_bytes
;
865 adapter
->net_stats
.rx_packets
+= total_rx_packets
;
870 * e1000_consume_page - helper function
872 static void e1000_consume_page(struct e1000_buffer
*bi
, struct sk_buff
*skb
,
877 skb
->data_len
+= length
;
878 skb
->truesize
+= length
;
882 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
883 * @adapter: board private structure
885 * the return value indicates whether actual cleaning was done, there
886 * is no guarantee that everything was cleaned
889 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter
*adapter
,
890 int *work_done
, int work_to_do
)
892 struct net_device
*netdev
= adapter
->netdev
;
893 struct pci_dev
*pdev
= adapter
->pdev
;
894 struct e1000_ring
*rx_ring
= adapter
->rx_ring
;
895 struct e1000_rx_desc
*rx_desc
, *next_rxd
;
896 struct e1000_buffer
*buffer_info
, *next_buffer
;
899 int cleaned_count
= 0;
900 bool cleaned
= false;
901 unsigned int total_rx_bytes
=0, total_rx_packets
=0;
903 i
= rx_ring
->next_to_clean
;
904 rx_desc
= E1000_RX_DESC(*rx_ring
, i
);
905 buffer_info
= &rx_ring
->buffer_info
[i
];
907 while (rx_desc
->status
& E1000_RXD_STAT_DD
) {
911 if (*work_done
>= work_to_do
)
915 status
= rx_desc
->status
;
916 skb
= buffer_info
->skb
;
917 buffer_info
->skb
= NULL
;
920 if (i
== rx_ring
->count
)
922 next_rxd
= E1000_RX_DESC(*rx_ring
, i
);
925 next_buffer
= &rx_ring
->buffer_info
[i
];
929 pci_unmap_page(pdev
, buffer_info
->dma
, PAGE_SIZE
,
931 buffer_info
->dma
= 0;
933 length
= le16_to_cpu(rx_desc
->length
);
935 /* errors is only valid for DD + EOP descriptors */
936 if (unlikely((status
& E1000_RXD_STAT_EOP
) &&
937 (rx_desc
->errors
& E1000_RXD_ERR_FRAME_ERR_MASK
))) {
938 /* recycle both page and skb */
939 buffer_info
->skb
= skb
;
940 /* an error means any chain goes out the window
942 if (rx_ring
->rx_skb_top
)
943 dev_kfree_skb(rx_ring
->rx_skb_top
);
944 rx_ring
->rx_skb_top
= NULL
;
948 #define rxtop rx_ring->rx_skb_top
949 if (!(status
& E1000_RXD_STAT_EOP
)) {
950 /* this descriptor is only the beginning (or middle) */
952 /* this is the beginning of a chain */
954 skb_fill_page_desc(rxtop
, 0, buffer_info
->page
,
957 /* this is the middle of a chain */
958 skb_fill_page_desc(rxtop
,
959 skb_shinfo(rxtop
)->nr_frags
,
960 buffer_info
->page
, 0, length
);
961 /* re-use the skb, only consumed the page */
962 buffer_info
->skb
= skb
;
964 e1000_consume_page(buffer_info
, rxtop
, length
);
968 /* end of the chain */
969 skb_fill_page_desc(rxtop
,
970 skb_shinfo(rxtop
)->nr_frags
,
971 buffer_info
->page
, 0, length
);
972 /* re-use the current skb, we only consumed the
974 buffer_info
->skb
= skb
;
977 e1000_consume_page(buffer_info
, skb
, length
);
979 /* no chain, got EOP, this buf is the packet
980 * copybreak to save the put_page/alloc_page */
981 if (length
<= copybreak
&&
982 skb_tailroom(skb
) >= length
) {
984 vaddr
= kmap_atomic(buffer_info
->page
,
985 KM_SKB_DATA_SOFTIRQ
);
986 memcpy(skb_tail_pointer(skb
), vaddr
,
989 KM_SKB_DATA_SOFTIRQ
);
990 /* re-use the page, so don't erase
991 * buffer_info->page */
992 skb_put(skb
, length
);
994 skb_fill_page_desc(skb
, 0,
995 buffer_info
->page
, 0,
997 e1000_consume_page(buffer_info
, skb
,
1003 /* Receive Checksum Offload XXX recompute due to CRC strip? */
1004 e1000_rx_checksum(adapter
,
1006 ((u32
)(rx_desc
->errors
) << 24),
1007 le16_to_cpu(rx_desc
->csum
), skb
);
1009 /* probably a little skewed due to removing CRC */
1010 total_rx_bytes
+= skb
->len
;
1013 /* eth type trans needs skb->data to point to something */
1014 if (!pskb_may_pull(skb
, ETH_HLEN
)) {
1015 e_err("pskb_may_pull failed.\n");
1020 e1000_receive_skb(adapter
, netdev
, skb
, status
,
1024 rx_desc
->status
= 0;
1026 /* return some buffers to hardware, one at a time is too slow */
1027 if (unlikely(cleaned_count
>= E1000_RX_BUFFER_WRITE
)) {
1028 adapter
->alloc_rx_buf(adapter
, cleaned_count
);
1032 /* use prefetched values */
1034 buffer_info
= next_buffer
;
1036 rx_ring
->next_to_clean
= i
;
1038 cleaned_count
= e1000_desc_unused(rx_ring
);
1040 adapter
->alloc_rx_buf(adapter
, cleaned_count
);
1042 adapter
->total_rx_bytes
+= total_rx_bytes
;
1043 adapter
->total_rx_packets
+= total_rx_packets
;
1044 adapter
->net_stats
.rx_bytes
+= total_rx_bytes
;
1045 adapter
->net_stats
.rx_packets
+= total_rx_packets
;
1050 * e1000_clean_rx_ring - Free Rx Buffers per Queue
1051 * @adapter: board private structure
1053 static void e1000_clean_rx_ring(struct e1000_adapter
*adapter
)
1055 struct e1000_ring
*rx_ring
= adapter
->rx_ring
;
1056 struct e1000_buffer
*buffer_info
;
1057 struct e1000_ps_page
*ps_page
;
1058 struct pci_dev
*pdev
= adapter
->pdev
;
1061 /* Free all the Rx ring sk_buffs */
1062 for (i
= 0; i
< rx_ring
->count
; i
++) {
1063 buffer_info
= &rx_ring
->buffer_info
[i
];
1064 if (buffer_info
->dma
) {
1065 if (adapter
->clean_rx
== e1000_clean_rx_irq
)
1066 pci_unmap_single(pdev
, buffer_info
->dma
,
1067 adapter
->rx_buffer_len
,
1068 PCI_DMA_FROMDEVICE
);
1069 else if (adapter
->clean_rx
== e1000_clean_jumbo_rx_irq
)
1070 pci_unmap_page(pdev
, buffer_info
->dma
,
1072 PCI_DMA_FROMDEVICE
);
1073 else if (adapter
->clean_rx
== e1000_clean_rx_irq_ps
)
1074 pci_unmap_single(pdev
, buffer_info
->dma
,
1075 adapter
->rx_ps_bsize0
,
1076 PCI_DMA_FROMDEVICE
);
1077 buffer_info
->dma
= 0;
1080 if (buffer_info
->page
) {
1081 put_page(buffer_info
->page
);
1082 buffer_info
->page
= NULL
;
1085 if (buffer_info
->skb
) {
1086 dev_kfree_skb(buffer_info
->skb
);
1087 buffer_info
->skb
= NULL
;
1090 for (j
= 0; j
< PS_PAGE_BUFFERS
; j
++) {
1091 ps_page
= &buffer_info
->ps_pages
[j
];
1094 pci_unmap_page(pdev
, ps_page
->dma
, PAGE_SIZE
,
1095 PCI_DMA_FROMDEVICE
);
1097 put_page(ps_page
->page
);
1098 ps_page
->page
= NULL
;
1102 /* there also may be some cached data from a chained receive */
1103 if (rx_ring
->rx_skb_top
) {
1104 dev_kfree_skb(rx_ring
->rx_skb_top
);
1105 rx_ring
->rx_skb_top
= NULL
;
1108 /* Zero out the descriptor ring */
1109 memset(rx_ring
->desc
, 0, rx_ring
->size
);
1111 rx_ring
->next_to_clean
= 0;
1112 rx_ring
->next_to_use
= 0;
1114 writel(0, adapter
->hw
.hw_addr
+ rx_ring
->head
);
1115 writel(0, adapter
->hw
.hw_addr
+ rx_ring
->tail
);
1119 * e1000_intr_msi - Interrupt Handler
1120 * @irq: interrupt number
1121 * @data: pointer to a network interface device structure
1123 static irqreturn_t
e1000_intr_msi(int irq
, void *data
)
1125 struct net_device
*netdev
= data
;
1126 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
1127 struct e1000_hw
*hw
= &adapter
->hw
;
1128 u32 icr
= er32(ICR
);
1131 * read ICR disables interrupts using IAM
1134 if (icr
& (E1000_ICR_RXSEQ
| E1000_ICR_LSC
)) {
1135 hw
->mac
.get_link_status
= 1;
1137 * ICH8 workaround-- Call gig speed drop workaround on cable
1138 * disconnect (LSC) before accessing any PHY registers
1140 if ((adapter
->flags
& FLAG_LSC_GIG_SPEED_DROP
) &&
1141 (!(er32(STATUS
) & E1000_STATUS_LU
)))
1142 e1000e_gig_downshift_workaround_ich8lan(hw
);
1145 * 80003ES2LAN workaround-- For packet buffer work-around on
1146 * link down event; disable receives here in the ISR and reset
1147 * adapter in watchdog
1149 if (netif_carrier_ok(netdev
) &&
1150 adapter
->flags
& FLAG_RX_NEEDS_RESTART
) {
1151 /* disable receives */
1152 u32 rctl
= er32(RCTL
);
1153 ew32(RCTL
, rctl
& ~E1000_RCTL_EN
);
1154 adapter
->flags
|= FLAG_RX_RESTART_NOW
;
1156 /* guard against interrupt when we're going down */
1157 if (!test_bit(__E1000_DOWN
, &adapter
->state
))
1158 mod_timer(&adapter
->watchdog_timer
, jiffies
+ 1);
1161 if (netif_rx_schedule_prep(netdev
, &adapter
->napi
)) {
1162 adapter
->total_tx_bytes
= 0;
1163 adapter
->total_tx_packets
= 0;
1164 adapter
->total_rx_bytes
= 0;
1165 adapter
->total_rx_packets
= 0;
1166 __netif_rx_schedule(netdev
, &adapter
->napi
);
1173 * e1000_intr - Interrupt Handler
1174 * @irq: interrupt number
1175 * @data: pointer to a network interface device structure
1177 static irqreturn_t
e1000_intr(int irq
, void *data
)
1179 struct net_device
*netdev
= data
;
1180 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
1181 struct e1000_hw
*hw
= &adapter
->hw
;
1183 u32 rctl
, icr
= er32(ICR
);
1185 return IRQ_NONE
; /* Not our interrupt */
1188 * IMS will not auto-mask if INT_ASSERTED is not set, and if it is
1189 * not set, then the adapter didn't send an interrupt
1191 if (!(icr
& E1000_ICR_INT_ASSERTED
))
1195 * Interrupt Auto-Mask...upon reading ICR,
1196 * interrupts are masked. No need for the
1200 if (icr
& (E1000_ICR_RXSEQ
| E1000_ICR_LSC
)) {
1201 hw
->mac
.get_link_status
= 1;
1203 * ICH8 workaround-- Call gig speed drop workaround on cable
1204 * disconnect (LSC) before accessing any PHY registers
1206 if ((adapter
->flags
& FLAG_LSC_GIG_SPEED_DROP
) &&
1207 (!(er32(STATUS
) & E1000_STATUS_LU
)))
1208 e1000e_gig_downshift_workaround_ich8lan(hw
);
1211 * 80003ES2LAN workaround--
1212 * For packet buffer work-around on link down event;
1213 * disable receives here in the ISR and
1214 * reset adapter in watchdog
1216 if (netif_carrier_ok(netdev
) &&
1217 (adapter
->flags
& FLAG_RX_NEEDS_RESTART
)) {
1218 /* disable receives */
1220 ew32(RCTL
, rctl
& ~E1000_RCTL_EN
);
1221 adapter
->flags
|= FLAG_RX_RESTART_NOW
;
1223 /* guard against interrupt when we're going down */
1224 if (!test_bit(__E1000_DOWN
, &adapter
->state
))
1225 mod_timer(&adapter
->watchdog_timer
, jiffies
+ 1);
1228 if (netif_rx_schedule_prep(netdev
, &adapter
->napi
)) {
1229 adapter
->total_tx_bytes
= 0;
1230 adapter
->total_tx_packets
= 0;
1231 adapter
->total_rx_bytes
= 0;
1232 adapter
->total_rx_packets
= 0;
1233 __netif_rx_schedule(netdev
, &adapter
->napi
);
1240 * e1000_request_irq - initialize interrupts
1242 * Attempts to configure interrupts using the best available
1243 * capabilities of the hardware and kernel.
1245 static int e1000_request_irq(struct e1000_adapter
*adapter
)
1247 struct net_device
*netdev
= adapter
->netdev
;
1248 int irq_flags
= IRQF_SHARED
;
1251 if (!(adapter
->flags
& FLAG_MSI_TEST_FAILED
)) {
1252 err
= pci_enable_msi(adapter
->pdev
);
1254 adapter
->flags
|= FLAG_MSI_ENABLED
;
1259 err
= request_irq(adapter
->pdev
->irq
,
1260 ((adapter
->flags
& FLAG_MSI_ENABLED
) ?
1261 &e1000_intr_msi
: &e1000_intr
),
1262 irq_flags
, netdev
->name
, netdev
);
1264 if (adapter
->flags
& FLAG_MSI_ENABLED
) {
1265 pci_disable_msi(adapter
->pdev
);
1266 adapter
->flags
&= ~FLAG_MSI_ENABLED
;
1268 e_err("Unable to allocate interrupt, Error: %d\n", err
);
1274 static void e1000_free_irq(struct e1000_adapter
*adapter
)
1276 struct net_device
*netdev
= adapter
->netdev
;
1278 free_irq(adapter
->pdev
->irq
, netdev
);
1279 if (adapter
->flags
& FLAG_MSI_ENABLED
) {
1280 pci_disable_msi(adapter
->pdev
);
1281 adapter
->flags
&= ~FLAG_MSI_ENABLED
;
1286 * e1000_irq_disable - Mask off interrupt generation on the NIC
1288 static void e1000_irq_disable(struct e1000_adapter
*adapter
)
1290 struct e1000_hw
*hw
= &adapter
->hw
;
1294 synchronize_irq(adapter
->pdev
->irq
);
1298 * e1000_irq_enable - Enable default interrupt generation settings
1300 static void e1000_irq_enable(struct e1000_adapter
*adapter
)
1302 struct e1000_hw
*hw
= &adapter
->hw
;
1304 ew32(IMS
, IMS_ENABLE_MASK
);
1309 * e1000_get_hw_control - get control of the h/w from f/w
1310 * @adapter: address of board private structure
1312 * e1000_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit.
1313 * For ASF and Pass Through versions of f/w this means that
1314 * the driver is loaded. For AMT version (only with 82573)
1315 * of the f/w this means that the network i/f is open.
1317 static void e1000_get_hw_control(struct e1000_adapter
*adapter
)
1319 struct e1000_hw
*hw
= &adapter
->hw
;
1323 /* Let firmware know the driver has taken over */
1324 if (adapter
->flags
& FLAG_HAS_SWSM_ON_LOAD
) {
1326 ew32(SWSM
, swsm
| E1000_SWSM_DRV_LOAD
);
1327 } else if (adapter
->flags
& FLAG_HAS_CTRLEXT_ON_LOAD
) {
1328 ctrl_ext
= er32(CTRL_EXT
);
1329 ew32(CTRL_EXT
, ctrl_ext
| E1000_CTRL_EXT_DRV_LOAD
);
1334 * e1000_release_hw_control - release control of the h/w to f/w
1335 * @adapter: address of board private structure
1337 * e1000_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit.
1338 * For ASF and Pass Through versions of f/w this means that the
1339 * driver is no longer loaded. For AMT version (only with 82573) i
1340 * of the f/w this means that the network i/f is closed.
1343 static void e1000_release_hw_control(struct e1000_adapter
*adapter
)
1345 struct e1000_hw
*hw
= &adapter
->hw
;
1349 /* Let firmware taken over control of h/w */
1350 if (adapter
->flags
& FLAG_HAS_SWSM_ON_LOAD
) {
1352 ew32(SWSM
, swsm
& ~E1000_SWSM_DRV_LOAD
);
1353 } else if (adapter
->flags
& FLAG_HAS_CTRLEXT_ON_LOAD
) {
1354 ctrl_ext
= er32(CTRL_EXT
);
1355 ew32(CTRL_EXT
, ctrl_ext
& ~E1000_CTRL_EXT_DRV_LOAD
);
1360 * @e1000_alloc_ring - allocate memory for a ring structure
1362 static int e1000_alloc_ring_dma(struct e1000_adapter
*adapter
,
1363 struct e1000_ring
*ring
)
1365 struct pci_dev
*pdev
= adapter
->pdev
;
1367 ring
->desc
= dma_alloc_coherent(&pdev
->dev
, ring
->size
, &ring
->dma
,
1376 * e1000e_setup_tx_resources - allocate Tx resources (Descriptors)
1377 * @adapter: board private structure
1379 * Return 0 on success, negative on failure
1381 int e1000e_setup_tx_resources(struct e1000_adapter
*adapter
)
1383 struct e1000_ring
*tx_ring
= adapter
->tx_ring
;
1384 int err
= -ENOMEM
, size
;
1386 size
= sizeof(struct e1000_buffer
) * tx_ring
->count
;
1387 tx_ring
->buffer_info
= vmalloc(size
);
1388 if (!tx_ring
->buffer_info
)
1390 memset(tx_ring
->buffer_info
, 0, size
);
1392 /* round up to nearest 4K */
1393 tx_ring
->size
= tx_ring
->count
* sizeof(struct e1000_tx_desc
);
1394 tx_ring
->size
= ALIGN(tx_ring
->size
, 4096);
1396 err
= e1000_alloc_ring_dma(adapter
, tx_ring
);
1400 tx_ring
->next_to_use
= 0;
1401 tx_ring
->next_to_clean
= 0;
1402 spin_lock_init(&adapter
->tx_queue_lock
);
1406 vfree(tx_ring
->buffer_info
);
1407 e_err("Unable to allocate memory for the transmit descriptor ring\n");
1412 * e1000e_setup_rx_resources - allocate Rx resources (Descriptors)
1413 * @adapter: board private structure
1415 * Returns 0 on success, negative on failure
1417 int e1000e_setup_rx_resources(struct e1000_adapter
*adapter
)
1419 struct e1000_ring
*rx_ring
= adapter
->rx_ring
;
1420 struct e1000_buffer
*buffer_info
;
1421 int i
, size
, desc_len
, err
= -ENOMEM
;
1423 size
= sizeof(struct e1000_buffer
) * rx_ring
->count
;
1424 rx_ring
->buffer_info
= vmalloc(size
);
1425 if (!rx_ring
->buffer_info
)
1427 memset(rx_ring
->buffer_info
, 0, size
);
1429 for (i
= 0; i
< rx_ring
->count
; i
++) {
1430 buffer_info
= &rx_ring
->buffer_info
[i
];
1431 buffer_info
->ps_pages
= kcalloc(PS_PAGE_BUFFERS
,
1432 sizeof(struct e1000_ps_page
),
1434 if (!buffer_info
->ps_pages
)
1438 desc_len
= sizeof(union e1000_rx_desc_packet_split
);
1440 /* Round up to nearest 4K */
1441 rx_ring
->size
= rx_ring
->count
* desc_len
;
1442 rx_ring
->size
= ALIGN(rx_ring
->size
, 4096);
1444 err
= e1000_alloc_ring_dma(adapter
, rx_ring
);
1448 rx_ring
->next_to_clean
= 0;
1449 rx_ring
->next_to_use
= 0;
1450 rx_ring
->rx_skb_top
= NULL
;
1455 for (i
= 0; i
< rx_ring
->count
; i
++) {
1456 buffer_info
= &rx_ring
->buffer_info
[i
];
1457 kfree(buffer_info
->ps_pages
);
1460 vfree(rx_ring
->buffer_info
);
1461 e_err("Unable to allocate memory for the transmit descriptor ring\n");
1466 * e1000_clean_tx_ring - Free Tx Buffers
1467 * @adapter: board private structure
1469 static void e1000_clean_tx_ring(struct e1000_adapter
*adapter
)
1471 struct e1000_ring
*tx_ring
= adapter
->tx_ring
;
1472 struct e1000_buffer
*buffer_info
;
1476 for (i
= 0; i
< tx_ring
->count
; i
++) {
1477 buffer_info
= &tx_ring
->buffer_info
[i
];
1478 e1000_put_txbuf(adapter
, buffer_info
);
1481 size
= sizeof(struct e1000_buffer
) * tx_ring
->count
;
1482 memset(tx_ring
->buffer_info
, 0, size
);
1484 memset(tx_ring
->desc
, 0, tx_ring
->size
);
1486 tx_ring
->next_to_use
= 0;
1487 tx_ring
->next_to_clean
= 0;
1489 writel(0, adapter
->hw
.hw_addr
+ tx_ring
->head
);
1490 writel(0, adapter
->hw
.hw_addr
+ tx_ring
->tail
);
1494 * e1000e_free_tx_resources - Free Tx Resources per Queue
1495 * @adapter: board private structure
1497 * Free all transmit software resources
1499 void e1000e_free_tx_resources(struct e1000_adapter
*adapter
)
1501 struct pci_dev
*pdev
= adapter
->pdev
;
1502 struct e1000_ring
*tx_ring
= adapter
->tx_ring
;
1504 e1000_clean_tx_ring(adapter
);
1506 vfree(tx_ring
->buffer_info
);
1507 tx_ring
->buffer_info
= NULL
;
1509 dma_free_coherent(&pdev
->dev
, tx_ring
->size
, tx_ring
->desc
,
1511 tx_ring
->desc
= NULL
;
1515 * e1000e_free_rx_resources - Free Rx Resources
1516 * @adapter: board private structure
1518 * Free all receive software resources
1521 void e1000e_free_rx_resources(struct e1000_adapter
*adapter
)
1523 struct pci_dev
*pdev
= adapter
->pdev
;
1524 struct e1000_ring
*rx_ring
= adapter
->rx_ring
;
1527 e1000_clean_rx_ring(adapter
);
1529 for (i
= 0; i
< rx_ring
->count
; i
++) {
1530 kfree(rx_ring
->buffer_info
[i
].ps_pages
);
1533 vfree(rx_ring
->buffer_info
);
1534 rx_ring
->buffer_info
= NULL
;
1536 dma_free_coherent(&pdev
->dev
, rx_ring
->size
, rx_ring
->desc
,
1538 rx_ring
->desc
= NULL
;
1542 * e1000_update_itr - update the dynamic ITR value based on statistics
1543 * @adapter: pointer to adapter
1544 * @itr_setting: current adapter->itr
1545 * @packets: the number of packets during this measurement interval
1546 * @bytes: the number of bytes during this measurement interval
1548 * Stores a new ITR value based on packets and byte
1549 * counts during the last interrupt. The advantage of per interrupt
1550 * computation is faster updates and more accurate ITR for the current
1551 * traffic pattern. Constants in this function were computed
1552 * based on theoretical maximum wire speed and thresholds were set based
1553 * on testing data as well as attempting to minimize response time
1554 * while increasing bulk throughput.
1555 * this functionality is controlled by the InterruptThrottleRate module
1556 * parameter (see e1000_param.c)
1558 static unsigned int e1000_update_itr(struct e1000_adapter
*adapter
,
1559 u16 itr_setting
, int packets
,
1562 unsigned int retval
= itr_setting
;
1565 goto update_itr_done
;
1567 switch (itr_setting
) {
1568 case lowest_latency
:
1569 /* handle TSO and jumbo frames */
1570 if (bytes
/packets
> 8000)
1571 retval
= bulk_latency
;
1572 else if ((packets
< 5) && (bytes
> 512)) {
1573 retval
= low_latency
;
1576 case low_latency
: /* 50 usec aka 20000 ints/s */
1577 if (bytes
> 10000) {
1578 /* this if handles the TSO accounting */
1579 if (bytes
/packets
> 8000) {
1580 retval
= bulk_latency
;
1581 } else if ((packets
< 10) || ((bytes
/packets
) > 1200)) {
1582 retval
= bulk_latency
;
1583 } else if ((packets
> 35)) {
1584 retval
= lowest_latency
;
1586 } else if (bytes
/packets
> 2000) {
1587 retval
= bulk_latency
;
1588 } else if (packets
<= 2 && bytes
< 512) {
1589 retval
= lowest_latency
;
1592 case bulk_latency
: /* 250 usec aka 4000 ints/s */
1593 if (bytes
> 25000) {
1595 retval
= low_latency
;
1597 } else if (bytes
< 6000) {
1598 retval
= low_latency
;
1607 static void e1000_set_itr(struct e1000_adapter
*adapter
)
1609 struct e1000_hw
*hw
= &adapter
->hw
;
1611 u32 new_itr
= adapter
->itr
;
1613 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
1614 if (adapter
->link_speed
!= SPEED_1000
) {
1620 adapter
->tx_itr
= e1000_update_itr(adapter
,
1622 adapter
->total_tx_packets
,
1623 adapter
->total_tx_bytes
);
1624 /* conservative mode (itr 3) eliminates the lowest_latency setting */
1625 if (adapter
->itr_setting
== 3 && adapter
->tx_itr
== lowest_latency
)
1626 adapter
->tx_itr
= low_latency
;
1628 adapter
->rx_itr
= e1000_update_itr(adapter
,
1630 adapter
->total_rx_packets
,
1631 adapter
->total_rx_bytes
);
1632 /* conservative mode (itr 3) eliminates the lowest_latency setting */
1633 if (adapter
->itr_setting
== 3 && adapter
->rx_itr
== lowest_latency
)
1634 adapter
->rx_itr
= low_latency
;
1636 current_itr
= max(adapter
->rx_itr
, adapter
->tx_itr
);
1638 switch (current_itr
) {
1639 /* counts and packets in update_itr are dependent on these numbers */
1640 case lowest_latency
:
1644 new_itr
= 20000; /* aka hwitr = ~200 */
1654 if (new_itr
!= adapter
->itr
) {
1656 * this attempts to bias the interrupt rate towards Bulk
1657 * by adding intermediate steps when interrupt rate is
1660 new_itr
= new_itr
> adapter
->itr
?
1661 min(adapter
->itr
+ (new_itr
>> 2), new_itr
) :
1663 adapter
->itr
= new_itr
;
1664 ew32(ITR
, 1000000000 / (new_itr
* 256));
1669 * e1000_clean - NAPI Rx polling callback
1670 * @napi: struct associated with this polling callback
1671 * @budget: amount of packets driver is allowed to process this poll
1673 static int e1000_clean(struct napi_struct
*napi
, int budget
)
1675 struct e1000_adapter
*adapter
= container_of(napi
, struct e1000_adapter
, napi
);
1676 struct net_device
*poll_dev
= adapter
->netdev
;
1677 int tx_cleaned
= 0, work_done
= 0;
1679 /* Must NOT use netdev_priv macro here. */
1680 adapter
= poll_dev
->priv
;
1683 * e1000_clean is called per-cpu. This lock protects
1684 * tx_ring from being cleaned by multiple cpus
1685 * simultaneously. A failure obtaining the lock means
1686 * tx_ring is currently being cleaned anyway.
1688 if (spin_trylock(&adapter
->tx_queue_lock
)) {
1689 tx_cleaned
= e1000_clean_tx_irq(adapter
);
1690 spin_unlock(&adapter
->tx_queue_lock
);
1693 adapter
->clean_rx(adapter
, &work_done
, budget
);
1698 /* If budget not fully consumed, exit the polling mode */
1699 if (work_done
< budget
) {
1700 if (adapter
->itr_setting
& 3)
1701 e1000_set_itr(adapter
);
1702 netif_rx_complete(poll_dev
, napi
);
1703 e1000_irq_enable(adapter
);
1709 static void e1000_vlan_rx_add_vid(struct net_device
*netdev
, u16 vid
)
1711 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
1712 struct e1000_hw
*hw
= &adapter
->hw
;
1715 /* don't update vlan cookie if already programmed */
1716 if ((adapter
->hw
.mng_cookie
.status
&
1717 E1000_MNG_DHCP_COOKIE_STATUS_VLAN
) &&
1718 (vid
== adapter
->mng_vlan_id
))
1720 /* add VID to filter table */
1721 index
= (vid
>> 5) & 0x7F;
1722 vfta
= E1000_READ_REG_ARRAY(hw
, E1000_VFTA
, index
);
1723 vfta
|= (1 << (vid
& 0x1F));
1724 e1000e_write_vfta(hw
, index
, vfta
);
1727 static void e1000_vlan_rx_kill_vid(struct net_device
*netdev
, u16 vid
)
1729 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
1730 struct e1000_hw
*hw
= &adapter
->hw
;
1733 if (!test_bit(__E1000_DOWN
, &adapter
->state
))
1734 e1000_irq_disable(adapter
);
1735 vlan_group_set_device(adapter
->vlgrp
, vid
, NULL
);
1737 if (!test_bit(__E1000_DOWN
, &adapter
->state
))
1738 e1000_irq_enable(adapter
);
1740 if ((adapter
->hw
.mng_cookie
.status
&
1741 E1000_MNG_DHCP_COOKIE_STATUS_VLAN
) &&
1742 (vid
== adapter
->mng_vlan_id
)) {
1743 /* release control to f/w */
1744 e1000_release_hw_control(adapter
);
1748 /* remove VID from filter table */
1749 index
= (vid
>> 5) & 0x7F;
1750 vfta
= E1000_READ_REG_ARRAY(hw
, E1000_VFTA
, index
);
1751 vfta
&= ~(1 << (vid
& 0x1F));
1752 e1000e_write_vfta(hw
, index
, vfta
);
1755 static void e1000_update_mng_vlan(struct e1000_adapter
*adapter
)
1757 struct net_device
*netdev
= adapter
->netdev
;
1758 u16 vid
= adapter
->hw
.mng_cookie
.vlan_id
;
1759 u16 old_vid
= adapter
->mng_vlan_id
;
1761 if (!adapter
->vlgrp
)
1764 if (!vlan_group_get_device(adapter
->vlgrp
, vid
)) {
1765 adapter
->mng_vlan_id
= E1000_MNG_VLAN_NONE
;
1766 if (adapter
->hw
.mng_cookie
.status
&
1767 E1000_MNG_DHCP_COOKIE_STATUS_VLAN
) {
1768 e1000_vlan_rx_add_vid(netdev
, vid
);
1769 adapter
->mng_vlan_id
= vid
;
1772 if ((old_vid
!= (u16
)E1000_MNG_VLAN_NONE
) &&
1774 !vlan_group_get_device(adapter
->vlgrp
, old_vid
))
1775 e1000_vlan_rx_kill_vid(netdev
, old_vid
);
1777 adapter
->mng_vlan_id
= vid
;
1782 static void e1000_vlan_rx_register(struct net_device
*netdev
,
1783 struct vlan_group
*grp
)
1785 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
1786 struct e1000_hw
*hw
= &adapter
->hw
;
1789 if (!test_bit(__E1000_DOWN
, &adapter
->state
))
1790 e1000_irq_disable(adapter
);
1791 adapter
->vlgrp
= grp
;
1794 /* enable VLAN tag insert/strip */
1796 ctrl
|= E1000_CTRL_VME
;
1799 if (adapter
->flags
& FLAG_HAS_HW_VLAN_FILTER
) {
1800 /* enable VLAN receive filtering */
1802 rctl
&= ~E1000_RCTL_CFIEN
;
1804 e1000_update_mng_vlan(adapter
);
1807 /* disable VLAN tag insert/strip */
1809 ctrl
&= ~E1000_CTRL_VME
;
1812 if (adapter
->flags
& FLAG_HAS_HW_VLAN_FILTER
) {
1813 if (adapter
->mng_vlan_id
!=
1814 (u16
)E1000_MNG_VLAN_NONE
) {
1815 e1000_vlan_rx_kill_vid(netdev
,
1816 adapter
->mng_vlan_id
);
1817 adapter
->mng_vlan_id
= E1000_MNG_VLAN_NONE
;
1822 if (!test_bit(__E1000_DOWN
, &adapter
->state
))
1823 e1000_irq_enable(adapter
);
1826 static void e1000_restore_vlan(struct e1000_adapter
*adapter
)
1830 e1000_vlan_rx_register(adapter
->netdev
, adapter
->vlgrp
);
1832 if (!adapter
->vlgrp
)
1835 for (vid
= 0; vid
< VLAN_GROUP_ARRAY_LEN
; vid
++) {
1836 if (!vlan_group_get_device(adapter
->vlgrp
, vid
))
1838 e1000_vlan_rx_add_vid(adapter
->netdev
, vid
);
1842 static void e1000_init_manageability(struct e1000_adapter
*adapter
)
1844 struct e1000_hw
*hw
= &adapter
->hw
;
1847 if (!(adapter
->flags
& FLAG_MNG_PT_ENABLED
))
1853 * enable receiving management packets to the host. this will probably
1854 * generate destination unreachable messages from the host OS, but
1855 * the packets will be handled on SMBUS
1857 manc
|= E1000_MANC_EN_MNG2HOST
;
1858 manc2h
= er32(MANC2H
);
1859 #define E1000_MNG2HOST_PORT_623 (1 << 5)
1860 #define E1000_MNG2HOST_PORT_664 (1 << 6)
1861 manc2h
|= E1000_MNG2HOST_PORT_623
;
1862 manc2h
|= E1000_MNG2HOST_PORT_664
;
1863 ew32(MANC2H
, manc2h
);
1868 * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
1869 * @adapter: board private structure
1871 * Configure the Tx unit of the MAC after a reset.
1873 static void e1000_configure_tx(struct e1000_adapter
*adapter
)
1875 struct e1000_hw
*hw
= &adapter
->hw
;
1876 struct e1000_ring
*tx_ring
= adapter
->tx_ring
;
1878 u32 tdlen
, tctl
, tipg
, tarc
;
1881 /* Setup the HW Tx Head and Tail descriptor pointers */
1882 tdba
= tx_ring
->dma
;
1883 tdlen
= tx_ring
->count
* sizeof(struct e1000_tx_desc
);
1884 ew32(TDBAL
, (tdba
& DMA_32BIT_MASK
));
1885 ew32(TDBAH
, (tdba
>> 32));
1889 tx_ring
->head
= E1000_TDH
;
1890 tx_ring
->tail
= E1000_TDT
;
1892 /* Set the default values for the Tx Inter Packet Gap timer */
1893 tipg
= DEFAULT_82543_TIPG_IPGT_COPPER
; /* 8 */
1894 ipgr1
= DEFAULT_82543_TIPG_IPGR1
; /* 8 */
1895 ipgr2
= DEFAULT_82543_TIPG_IPGR2
; /* 6 */
1897 if (adapter
->flags
& FLAG_TIPG_MEDIUM_FOR_80003ESLAN
)
1898 ipgr2
= DEFAULT_80003ES2LAN_TIPG_IPGR2
; /* 7 */
1900 tipg
|= ipgr1
<< E1000_TIPG_IPGR1_SHIFT
;
1901 tipg
|= ipgr2
<< E1000_TIPG_IPGR2_SHIFT
;
1904 /* Set the Tx Interrupt Delay register */
1905 ew32(TIDV
, adapter
->tx_int_delay
);
1906 /* Tx irq moderation */
1907 ew32(TADV
, adapter
->tx_abs_int_delay
);
1909 /* Program the Transmit Control Register */
1911 tctl
&= ~E1000_TCTL_CT
;
1912 tctl
|= E1000_TCTL_PSP
| E1000_TCTL_RTLC
|
1913 (E1000_COLLISION_THRESHOLD
<< E1000_CT_SHIFT
);
1915 if (adapter
->flags
& FLAG_TARC_SPEED_MODE_BIT
) {
1916 tarc
= er32(TARC(0));
1918 * set the speed mode bit, we'll clear it if we're not at
1919 * gigabit link later
1921 #define SPEED_MODE_BIT (1 << 21)
1922 tarc
|= SPEED_MODE_BIT
;
1923 ew32(TARC(0), tarc
);
1926 /* errata: program both queues to unweighted RR */
1927 if (adapter
->flags
& FLAG_TARC_SET_BIT_ZERO
) {
1928 tarc
= er32(TARC(0));
1930 ew32(TARC(0), tarc
);
1931 tarc
= er32(TARC(1));
1933 ew32(TARC(1), tarc
);
1936 e1000e_config_collision_dist(hw
);
1938 /* Setup Transmit Descriptor Settings for eop descriptor */
1939 adapter
->txd_cmd
= E1000_TXD_CMD_EOP
| E1000_TXD_CMD_IFCS
;
1941 /* only set IDE if we are delaying interrupts using the timers */
1942 if (adapter
->tx_int_delay
)
1943 adapter
->txd_cmd
|= E1000_TXD_CMD_IDE
;
1945 /* enable Report Status bit */
1946 adapter
->txd_cmd
|= E1000_TXD_CMD_RS
;
1950 adapter
->tx_queue_len
= adapter
->netdev
->tx_queue_len
;
1954 * e1000_setup_rctl - configure the receive control registers
1955 * @adapter: Board private structure
1957 #define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
1958 (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
1959 static void e1000_setup_rctl(struct e1000_adapter
*adapter
)
1961 struct e1000_hw
*hw
= &adapter
->hw
;
1966 /* Program MC offset vector base */
1968 rctl
&= ~(3 << E1000_RCTL_MO_SHIFT
);
1969 rctl
|= E1000_RCTL_EN
| E1000_RCTL_BAM
|
1970 E1000_RCTL_LBM_NO
| E1000_RCTL_RDMTS_HALF
|
1971 (adapter
->hw
.mac
.mc_filter_type
<< E1000_RCTL_MO_SHIFT
);
1973 /* Do not Store bad packets */
1974 rctl
&= ~E1000_RCTL_SBP
;
1976 /* Enable Long Packet receive */
1977 if (adapter
->netdev
->mtu
<= ETH_DATA_LEN
)
1978 rctl
&= ~E1000_RCTL_LPE
;
1980 rctl
|= E1000_RCTL_LPE
;
1982 /* Enable hardware CRC frame stripping */
1983 rctl
|= E1000_RCTL_SECRC
;
1985 /* Setup buffer sizes */
1986 rctl
&= ~E1000_RCTL_SZ_4096
;
1987 rctl
|= E1000_RCTL_BSEX
;
1988 switch (adapter
->rx_buffer_len
) {
1990 rctl
|= E1000_RCTL_SZ_256
;
1991 rctl
&= ~E1000_RCTL_BSEX
;
1994 rctl
|= E1000_RCTL_SZ_512
;
1995 rctl
&= ~E1000_RCTL_BSEX
;
1998 rctl
|= E1000_RCTL_SZ_1024
;
1999 rctl
&= ~E1000_RCTL_BSEX
;
2003 rctl
|= E1000_RCTL_SZ_2048
;
2004 rctl
&= ~E1000_RCTL_BSEX
;
2007 rctl
|= E1000_RCTL_SZ_4096
;
2010 rctl
|= E1000_RCTL_SZ_8192
;
2013 rctl
|= E1000_RCTL_SZ_16384
;
2018 * 82571 and greater support packet-split where the protocol
2019 * header is placed in skb->data and the packet data is
2020 * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
2021 * In the case of a non-split, skb->data is linearly filled,
2022 * followed by the page buffers. Therefore, skb->data is
2023 * sized to hold the largest protocol header.
2025 * allocations using alloc_page take too long for regular MTU
2026 * so only enable packet split for jumbo frames
2028 * Using pages when the page size is greater than 16k wastes
2029 * a lot of memory, since we allocate 3 pages at all times
2032 pages
= PAGE_USE_COUNT(adapter
->netdev
->mtu
);
2033 if (!(adapter
->flags
& FLAG_IS_ICH
) && (pages
<= 3) &&
2034 (PAGE_SIZE
<= 16384) && (rctl
& E1000_RCTL_LPE
))
2035 adapter
->rx_ps_pages
= pages
;
2037 adapter
->rx_ps_pages
= 0;
2039 if (adapter
->rx_ps_pages
) {
2040 /* Configure extra packet-split registers */
2041 rfctl
= er32(RFCTL
);
2042 rfctl
|= E1000_RFCTL_EXTEN
;
2044 * disable packet split support for IPv6 extension headers,
2045 * because some malformed IPv6 headers can hang the Rx
2047 rfctl
|= (E1000_RFCTL_IPV6_EX_DIS
|
2048 E1000_RFCTL_NEW_IPV6_EXT_DIS
);
2052 /* Enable Packet split descriptors */
2053 rctl
|= E1000_RCTL_DTYP_PS
;
2055 psrctl
|= adapter
->rx_ps_bsize0
>>
2056 E1000_PSRCTL_BSIZE0_SHIFT
;
2058 switch (adapter
->rx_ps_pages
) {
2060 psrctl
|= PAGE_SIZE
<<
2061 E1000_PSRCTL_BSIZE3_SHIFT
;
2063 psrctl
|= PAGE_SIZE
<<
2064 E1000_PSRCTL_BSIZE2_SHIFT
;
2066 psrctl
|= PAGE_SIZE
>>
2067 E1000_PSRCTL_BSIZE1_SHIFT
;
2071 ew32(PSRCTL
, psrctl
);
2075 /* just started the receive unit, no need to restart */
2076 adapter
->flags
&= ~FLAG_RX_RESTART_NOW
;
2080 * e1000_configure_rx - Configure Receive Unit after Reset
2081 * @adapter: board private structure
2083 * Configure the Rx unit of the MAC after a reset.
2085 static void e1000_configure_rx(struct e1000_adapter
*adapter
)
2087 struct e1000_hw
*hw
= &adapter
->hw
;
2088 struct e1000_ring
*rx_ring
= adapter
->rx_ring
;
2090 u32 rdlen
, rctl
, rxcsum
, ctrl_ext
;
2092 if (adapter
->rx_ps_pages
) {
2093 /* this is a 32 byte descriptor */
2094 rdlen
= rx_ring
->count
*
2095 sizeof(union e1000_rx_desc_packet_split
);
2096 adapter
->clean_rx
= e1000_clean_rx_irq_ps
;
2097 adapter
->alloc_rx_buf
= e1000_alloc_rx_buffers_ps
;
2098 } else if (adapter
->netdev
->mtu
> ETH_FRAME_LEN
+ ETH_FCS_LEN
) {
2099 rdlen
= rx_ring
->count
* sizeof(struct e1000_rx_desc
);
2100 adapter
->clean_rx
= e1000_clean_jumbo_rx_irq
;
2101 adapter
->alloc_rx_buf
= e1000_alloc_jumbo_rx_buffers
;
2103 rdlen
= rx_ring
->count
* sizeof(struct e1000_rx_desc
);
2104 adapter
->clean_rx
= e1000_clean_rx_irq
;
2105 adapter
->alloc_rx_buf
= e1000_alloc_rx_buffers
;
2108 /* disable receives while setting up the descriptors */
2110 ew32(RCTL
, rctl
& ~E1000_RCTL_EN
);
2114 /* set the Receive Delay Timer Register */
2115 ew32(RDTR
, adapter
->rx_int_delay
);
2117 /* irq moderation */
2118 ew32(RADV
, adapter
->rx_abs_int_delay
);
2119 if (adapter
->itr_setting
!= 0)
2120 ew32(ITR
, 1000000000 / (adapter
->itr
* 256));
2122 ctrl_ext
= er32(CTRL_EXT
);
2123 /* Reset delay timers after every interrupt */
2124 ctrl_ext
|= E1000_CTRL_EXT_INT_TIMER_CLR
;
2125 /* Auto-Mask interrupts upon ICR access */
2126 ctrl_ext
|= E1000_CTRL_EXT_IAME
;
2127 ew32(IAM
, 0xffffffff);
2128 ew32(CTRL_EXT
, ctrl_ext
);
2132 * Setup the HW Rx Head and Tail Descriptor Pointers and
2133 * the Base and Length of the Rx Descriptor Ring
2135 rdba
= rx_ring
->dma
;
2136 ew32(RDBAL
, (rdba
& DMA_32BIT_MASK
));
2137 ew32(RDBAH
, (rdba
>> 32));
2141 rx_ring
->head
= E1000_RDH
;
2142 rx_ring
->tail
= E1000_RDT
;
2144 /* Enable Receive Checksum Offload for TCP and UDP */
2145 rxcsum
= er32(RXCSUM
);
2146 if (adapter
->flags
& FLAG_RX_CSUM_ENABLED
) {
2147 rxcsum
|= E1000_RXCSUM_TUOFL
;
2150 * IPv4 payload checksum for UDP fragments must be
2151 * used in conjunction with packet-split.
2153 if (adapter
->rx_ps_pages
)
2154 rxcsum
|= E1000_RXCSUM_IPPCSE
;
2156 rxcsum
&= ~E1000_RXCSUM_TUOFL
;
2157 /* no need to clear IPPCSE as it defaults to 0 */
2159 ew32(RXCSUM
, rxcsum
);
2162 * Enable early receives on supported devices, only takes effect when
2163 * packet size is equal or larger than the specified value (in 8 byte
2164 * units), e.g. using jumbo frames when setting to E1000_ERT_2048
2166 if ((adapter
->flags
& FLAG_HAS_ERT
) &&
2167 (adapter
->netdev
->mtu
> ETH_DATA_LEN
)) {
2168 u32 rxdctl
= er32(RXDCTL(0));
2169 ew32(RXDCTL(0), rxdctl
| 0x3);
2170 ew32(ERT
, E1000_ERT_2048
| (1 << 13));
2172 * With jumbo frames and early-receive enabled, excessive
2173 * C4->C2 latencies result in dropped transactions.
2175 pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY
,
2176 e1000e_driver_name
, 55);
2178 pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY
,
2180 PM_QOS_DEFAULT_VALUE
);
2183 /* Enable Receives */
2188 * e1000_update_mc_addr_list - Update Multicast addresses
2189 * @hw: pointer to the HW structure
2190 * @mc_addr_list: array of multicast addresses to program
2191 * @mc_addr_count: number of multicast addresses to program
2192 * @rar_used_count: the first RAR register free to program
2193 * @rar_count: total number of supported Receive Address Registers
2195 * Updates the Receive Address Registers and Multicast Table Array.
2196 * The caller must have a packed mc_addr_list of multicast addresses.
2197 * The parameter rar_count will usually be hw->mac.rar_entry_count
2198 * unless there are workarounds that change this. Currently no func pointer
2199 * exists and all implementations are handled in the generic version of this
2202 static void e1000_update_mc_addr_list(struct e1000_hw
*hw
, u8
*mc_addr_list
,
2203 u32 mc_addr_count
, u32 rar_used_count
,
2206 hw
->mac
.ops
.update_mc_addr_list(hw
, mc_addr_list
, mc_addr_count
,
2207 rar_used_count
, rar_count
);
2211 * e1000_set_multi - Multicast and Promiscuous mode set
2212 * @netdev: network interface device structure
2214 * The set_multi entry point is called whenever the multicast address
2215 * list or the network interface flags are updated. This routine is
2216 * responsible for configuring the hardware for proper multicast,
2217 * promiscuous mode, and all-multi behavior.
2219 static void e1000_set_multi(struct net_device
*netdev
)
2221 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
2222 struct e1000_hw
*hw
= &adapter
->hw
;
2223 struct e1000_mac_info
*mac
= &hw
->mac
;
2224 struct dev_mc_list
*mc_ptr
;
2229 /* Check for Promiscuous and All Multicast modes */
2233 if (netdev
->flags
& IFF_PROMISC
) {
2234 rctl
|= (E1000_RCTL_UPE
| E1000_RCTL_MPE
);
2235 rctl
&= ~E1000_RCTL_VFE
;
2237 if (netdev
->flags
& IFF_ALLMULTI
) {
2238 rctl
|= E1000_RCTL_MPE
;
2239 rctl
&= ~E1000_RCTL_UPE
;
2241 rctl
&= ~(E1000_RCTL_UPE
| E1000_RCTL_MPE
);
2243 if (adapter
->flags
& FLAG_HAS_HW_VLAN_FILTER
)
2244 rctl
|= E1000_RCTL_VFE
;
2249 if (netdev
->mc_count
) {
2250 mta_list
= kmalloc(netdev
->mc_count
* 6, GFP_ATOMIC
);
2254 /* prepare a packed array of only addresses. */
2255 mc_ptr
= netdev
->mc_list
;
2257 for (i
= 0; i
< netdev
->mc_count
; i
++) {
2260 memcpy(mta_list
+ (i
*ETH_ALEN
), mc_ptr
->dmi_addr
,
2262 mc_ptr
= mc_ptr
->next
;
2265 e1000_update_mc_addr_list(hw
, mta_list
, i
, 1,
2266 mac
->rar_entry_count
);
2270 * if we're called from probe, we might not have
2271 * anything to do here, so clear out the list
2273 e1000_update_mc_addr_list(hw
, NULL
, 0, 1, mac
->rar_entry_count
);
2278 * e1000_configure - configure the hardware for Rx and Tx
2279 * @adapter: private board structure
2281 static void e1000_configure(struct e1000_adapter
*adapter
)
2283 e1000_set_multi(adapter
->netdev
);
2285 e1000_restore_vlan(adapter
);
2286 e1000_init_manageability(adapter
);
2288 e1000_configure_tx(adapter
);
2289 e1000_setup_rctl(adapter
);
2290 e1000_configure_rx(adapter
);
2291 adapter
->alloc_rx_buf(adapter
, e1000_desc_unused(adapter
->rx_ring
));
2295 * e1000e_power_up_phy - restore link in case the phy was powered down
2296 * @adapter: address of board private structure
2298 * The phy may be powered down to save power and turn off link when the
2299 * driver is unloaded and wake on lan is not enabled (among others)
2300 * *** this routine MUST be followed by a call to e1000e_reset ***
2302 void e1000e_power_up_phy(struct e1000_adapter
*adapter
)
2306 /* Just clear the power down bit to wake the phy back up */
2307 if (adapter
->hw
.phy
.media_type
== e1000_media_type_copper
) {
2309 * According to the manual, the phy will retain its
2310 * settings across a power-down/up cycle
2312 e1e_rphy(&adapter
->hw
, PHY_CONTROL
, &mii_reg
);
2313 mii_reg
&= ~MII_CR_POWER_DOWN
;
2314 e1e_wphy(&adapter
->hw
, PHY_CONTROL
, mii_reg
);
2317 adapter
->hw
.mac
.ops
.setup_link(&adapter
->hw
);
2321 * e1000_power_down_phy - Power down the PHY
2323 * Power down the PHY so no link is implied when interface is down
2324 * The PHY cannot be powered down is management or WoL is active
2326 static void e1000_power_down_phy(struct e1000_adapter
*adapter
)
2328 struct e1000_hw
*hw
= &adapter
->hw
;
2331 /* WoL is enabled */
2335 /* non-copper PHY? */
2336 if (adapter
->hw
.phy
.media_type
!= e1000_media_type_copper
)
2339 /* reset is blocked because of a SoL/IDER session */
2340 if (e1000e_check_mng_mode(hw
) || e1000_check_reset_block(hw
))
2343 /* manageability (AMT) is enabled */
2344 if (er32(MANC
) & E1000_MANC_SMBUS_EN
)
2347 /* power down the PHY */
2348 e1e_rphy(hw
, PHY_CONTROL
, &mii_reg
);
2349 mii_reg
|= MII_CR_POWER_DOWN
;
2350 e1e_wphy(hw
, PHY_CONTROL
, mii_reg
);
2355 * e1000e_reset - bring the hardware into a known good state
2357 * This function boots the hardware and enables some settings that
2358 * require a configuration cycle of the hardware - those cannot be
2359 * set/changed during runtime. After reset the device needs to be
2360 * properly configured for Rx, Tx etc.
2362 void e1000e_reset(struct e1000_adapter
*adapter
)
2364 struct e1000_mac_info
*mac
= &adapter
->hw
.mac
;
2365 struct e1000_fc_info
*fc
= &adapter
->hw
.fc
;
2366 struct e1000_hw
*hw
= &adapter
->hw
;
2367 u32 tx_space
, min_tx_space
, min_rx_space
;
2368 u32 pba
= adapter
->pba
;
2371 /* reset Packet Buffer Allocation to default */
2374 if (adapter
->max_frame_size
> ETH_FRAME_LEN
+ ETH_FCS_LEN
) {
2376 * To maintain wire speed transmits, the Tx FIFO should be
2377 * large enough to accommodate two full transmit packets,
2378 * rounded up to the next 1KB and expressed in KB. Likewise,
2379 * the Rx FIFO should be large enough to accommodate at least
2380 * one full receive packet and is similarly rounded up and
2384 /* upper 16 bits has Tx packet buffer allocation size in KB */
2385 tx_space
= pba
>> 16;
2386 /* lower 16 bits has Rx packet buffer allocation size in KB */
2389 * the Tx fifo also stores 16 bytes of information about the tx
2390 * but don't include ethernet FCS because hardware appends it
2392 min_tx_space
= (adapter
->max_frame_size
+
2393 sizeof(struct e1000_tx_desc
) -
2395 min_tx_space
= ALIGN(min_tx_space
, 1024);
2396 min_tx_space
>>= 10;
2397 /* software strips receive CRC, so leave room for it */
2398 min_rx_space
= adapter
->max_frame_size
;
2399 min_rx_space
= ALIGN(min_rx_space
, 1024);
2400 min_rx_space
>>= 10;
2403 * If current Tx allocation is less than the min Tx FIFO size,
2404 * and the min Tx FIFO size is less than the current Rx FIFO
2405 * allocation, take space away from current Rx allocation
2407 if ((tx_space
< min_tx_space
) &&
2408 ((min_tx_space
- tx_space
) < pba
)) {
2409 pba
-= min_tx_space
- tx_space
;
2412 * if short on Rx space, Rx wins and must trump tx
2413 * adjustment or use Early Receive if available
2415 if ((pba
< min_rx_space
) &&
2416 (!(adapter
->flags
& FLAG_HAS_ERT
)))
2417 /* ERT enabled in e1000_configure_rx */
2426 * flow control settings
2428 * The high water mark must be low enough to fit one full frame
2429 * (or the size used for early receive) above it in the Rx FIFO.
2430 * Set it to the lower of:
2431 * - 90% of the Rx FIFO size, and
2432 * - the full Rx FIFO size minus the early receive size (for parts
2433 * with ERT support assuming ERT set to E1000_ERT_2048), or
2434 * - the full Rx FIFO size minus one full frame
2436 if (adapter
->flags
& FLAG_HAS_ERT
)
2437 hwm
= min(((pba
<< 10) * 9 / 10),
2438 ((pba
<< 10) - (E1000_ERT_2048
<< 3)));
2440 hwm
= min(((pba
<< 10) * 9 / 10),
2441 ((pba
<< 10) - adapter
->max_frame_size
));
2443 fc
->high_water
= hwm
& 0xFFF8; /* 8-byte granularity */
2444 fc
->low_water
= fc
->high_water
- 8;
2446 if (adapter
->flags
& FLAG_DISABLE_FC_PAUSE_TIME
)
2447 fc
->pause_time
= 0xFFFF;
2449 fc
->pause_time
= E1000_FC_PAUSE_TIME
;
2451 fc
->type
= fc
->original_type
;
2453 /* Allow time for pending master requests to run */
2454 mac
->ops
.reset_hw(hw
);
2457 * For parts with AMT enabled, let the firmware know
2458 * that the network interface is in control
2460 if (adapter
->flags
& FLAG_HAS_AMT
)
2461 e1000_get_hw_control(adapter
);
2465 if (mac
->ops
.init_hw(hw
))
2466 e_err("Hardware Error\n");
2468 e1000_update_mng_vlan(adapter
);
2470 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
2471 ew32(VET
, ETH_P_8021Q
);
2473 e1000e_reset_adaptive(hw
);
2474 e1000_get_phy_info(hw
);
2476 if (!(adapter
->flags
& FLAG_SMART_POWER_DOWN
)) {
2479 * speed up time to link by disabling smart power down, ignore
2480 * the return value of this function because there is nothing
2481 * different we would do if it failed
2483 e1e_rphy(hw
, IGP02E1000_PHY_POWER_MGMT
, &phy_data
);
2484 phy_data
&= ~IGP02E1000_PM_SPD
;
2485 e1e_wphy(hw
, IGP02E1000_PHY_POWER_MGMT
, phy_data
);
2489 int e1000e_up(struct e1000_adapter
*adapter
)
2491 struct e1000_hw
*hw
= &adapter
->hw
;
2493 /* hardware has been reset, we need to reload some things */
2494 e1000_configure(adapter
);
2496 clear_bit(__E1000_DOWN
, &adapter
->state
);
2498 napi_enable(&adapter
->napi
);
2499 e1000_irq_enable(adapter
);
2501 /* fire a link change interrupt to start the watchdog */
2502 ew32(ICS
, E1000_ICS_LSC
);
2506 void e1000e_down(struct e1000_adapter
*adapter
)
2508 struct net_device
*netdev
= adapter
->netdev
;
2509 struct e1000_hw
*hw
= &adapter
->hw
;
2513 * signal that we're down so the interrupt handler does not
2514 * reschedule our watchdog timer
2516 set_bit(__E1000_DOWN
, &adapter
->state
);
2518 /* disable receives in the hardware */
2520 ew32(RCTL
, rctl
& ~E1000_RCTL_EN
);
2521 /* flush and sleep below */
2523 netif_tx_stop_all_queues(netdev
);
2525 /* disable transmits in the hardware */
2527 tctl
&= ~E1000_TCTL_EN
;
2529 /* flush both disables and wait for them to finish */
2533 napi_disable(&adapter
->napi
);
2534 e1000_irq_disable(adapter
);
2536 del_timer_sync(&adapter
->watchdog_timer
);
2537 del_timer_sync(&adapter
->phy_info_timer
);
2539 netdev
->tx_queue_len
= adapter
->tx_queue_len
;
2540 netif_carrier_off(netdev
);
2541 adapter
->link_speed
= 0;
2542 adapter
->link_duplex
= 0;
2544 if (!pci_channel_offline(adapter
->pdev
))
2545 e1000e_reset(adapter
);
2546 e1000_clean_tx_ring(adapter
);
2547 e1000_clean_rx_ring(adapter
);
2550 * TODO: for power management, we could drop the link and
2551 * pci_disable_device here.
2555 void e1000e_reinit_locked(struct e1000_adapter
*adapter
)
2558 while (test_and_set_bit(__E1000_RESETTING
, &adapter
->state
))
2560 e1000e_down(adapter
);
2562 clear_bit(__E1000_RESETTING
, &adapter
->state
);
2566 * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
2567 * @adapter: board private structure to initialize
2569 * e1000_sw_init initializes the Adapter private data structure.
2570 * Fields are initialized based on PCI device information and
2571 * OS network device settings (MTU size).
2573 static int __devinit
e1000_sw_init(struct e1000_adapter
*adapter
)
2575 struct net_device
*netdev
= adapter
->netdev
;
2577 adapter
->rx_buffer_len
= ETH_FRAME_LEN
+ VLAN_HLEN
+ ETH_FCS_LEN
;
2578 adapter
->rx_ps_bsize0
= 128;
2579 adapter
->max_frame_size
= netdev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
;
2580 adapter
->min_frame_size
= ETH_ZLEN
+ ETH_FCS_LEN
;
2582 adapter
->tx_ring
= kzalloc(sizeof(struct e1000_ring
), GFP_KERNEL
);
2583 if (!adapter
->tx_ring
)
2586 adapter
->rx_ring
= kzalloc(sizeof(struct e1000_ring
), GFP_KERNEL
);
2587 if (!adapter
->rx_ring
)
2590 spin_lock_init(&adapter
->tx_queue_lock
);
2592 /* Explicitly disable IRQ since the NIC can be in any state. */
2593 e1000_irq_disable(adapter
);
2595 spin_lock_init(&adapter
->stats_lock
);
2597 set_bit(__E1000_DOWN
, &adapter
->state
);
2601 e_err("Unable to allocate memory for queues\n");
2602 kfree(adapter
->rx_ring
);
2603 kfree(adapter
->tx_ring
);
2608 * e1000_intr_msi_test - Interrupt Handler
2609 * @irq: interrupt number
2610 * @data: pointer to a network interface device structure
2612 static irqreturn_t
e1000_intr_msi_test(int irq
, void *data
)
2614 struct net_device
*netdev
= data
;
2615 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
2616 struct e1000_hw
*hw
= &adapter
->hw
;
2617 u32 icr
= er32(ICR
);
2619 e_dbg("%s: icr is %08X\n", netdev
->name
, icr
);
2620 if (icr
& E1000_ICR_RXSEQ
) {
2621 adapter
->flags
&= ~FLAG_MSI_TEST_FAILED
;
2629 * e1000_test_msi_interrupt - Returns 0 for successful test
2630 * @adapter: board private struct
2632 * code flow taken from tg3.c
2634 static int e1000_test_msi_interrupt(struct e1000_adapter
*adapter
)
2636 struct net_device
*netdev
= adapter
->netdev
;
2637 struct e1000_hw
*hw
= &adapter
->hw
;
2640 /* poll_enable hasn't been called yet, so don't need disable */
2641 /* clear any pending events */
2644 /* free the real vector and request a test handler */
2645 e1000_free_irq(adapter
);
2647 /* Assume that the test fails, if it succeeds then the test
2648 * MSI irq handler will unset this flag */
2649 adapter
->flags
|= FLAG_MSI_TEST_FAILED
;
2651 err
= pci_enable_msi(adapter
->pdev
);
2653 goto msi_test_failed
;
2655 err
= request_irq(adapter
->pdev
->irq
, &e1000_intr_msi_test
, 0,
2656 netdev
->name
, netdev
);
2658 pci_disable_msi(adapter
->pdev
);
2659 goto msi_test_failed
;
2664 e1000_irq_enable(adapter
);
2666 /* fire an unusual interrupt on the test handler */
2667 ew32(ICS
, E1000_ICS_RXSEQ
);
2671 e1000_irq_disable(adapter
);
2675 if (adapter
->flags
& FLAG_MSI_TEST_FAILED
) {
2677 e_info("MSI interrupt test failed!\n");
2680 free_irq(adapter
->pdev
->irq
, netdev
);
2681 pci_disable_msi(adapter
->pdev
);
2684 goto msi_test_failed
;
2686 /* okay so the test worked, restore settings */
2687 e_dbg("%s: MSI interrupt test succeeded!\n", netdev
->name
);
2689 /* restore the original vector, even if it failed */
2690 e1000_request_irq(adapter
);
2695 * e1000_test_msi - Returns 0 if MSI test succeeds or INTx mode is restored
2696 * @adapter: board private struct
2698 * code flow taken from tg3.c, called with e1000 interrupts disabled.
2700 static int e1000_test_msi(struct e1000_adapter
*adapter
)
2705 if (!(adapter
->flags
& FLAG_MSI_ENABLED
))
2708 /* disable SERR in case the MSI write causes a master abort */
2709 pci_read_config_word(adapter
->pdev
, PCI_COMMAND
, &pci_cmd
);
2710 pci_write_config_word(adapter
->pdev
, PCI_COMMAND
,
2711 pci_cmd
& ~PCI_COMMAND_SERR
);
2713 err
= e1000_test_msi_interrupt(adapter
);
2715 /* restore previous setting of command word */
2716 pci_write_config_word(adapter
->pdev
, PCI_COMMAND
, pci_cmd
);
2722 /* EIO means MSI test failed */
2726 /* back to INTx mode */
2727 e_warn("MSI interrupt test failed, using legacy interrupt.\n");
2729 e1000_free_irq(adapter
);
2731 err
= e1000_request_irq(adapter
);
2737 * e1000_open - Called when a network interface is made active
2738 * @netdev: network interface device structure
2740 * Returns 0 on success, negative value on failure
2742 * The open entry point is called when a network interface is made
2743 * active by the system (IFF_UP). At this point all resources needed
2744 * for transmit and receive operations are allocated, the interrupt
2745 * handler is registered with the OS, the watchdog timer is started,
2746 * and the stack is notified that the interface is ready.
2748 static int e1000_open(struct net_device
*netdev
)
2750 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
2751 struct e1000_hw
*hw
= &adapter
->hw
;
2754 /* disallow open during test */
2755 if (test_bit(__E1000_TESTING
, &adapter
->state
))
2758 /* allocate transmit descriptors */
2759 err
= e1000e_setup_tx_resources(adapter
);
2763 /* allocate receive descriptors */
2764 err
= e1000e_setup_rx_resources(adapter
);
2768 e1000e_power_up_phy(adapter
);
2770 adapter
->mng_vlan_id
= E1000_MNG_VLAN_NONE
;
2771 if ((adapter
->hw
.mng_cookie
.status
&
2772 E1000_MNG_DHCP_COOKIE_STATUS_VLAN
))
2773 e1000_update_mng_vlan(adapter
);
2776 * If AMT is enabled, let the firmware know that the network
2777 * interface is now open
2779 if (adapter
->flags
& FLAG_HAS_AMT
)
2780 e1000_get_hw_control(adapter
);
2783 * before we allocate an interrupt, we must be ready to handle it.
2784 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
2785 * as soon as we call pci_request_irq, so we have to setup our
2786 * clean_rx handler before we do so.
2788 e1000_configure(adapter
);
2790 err
= e1000_request_irq(adapter
);
2795 * Work around PCIe errata with MSI interrupts causing some chipsets to
2796 * ignore e1000e MSI messages, which means we need to test our MSI
2800 err
= e1000_test_msi(adapter
);
2802 e_err("Interrupt allocation failed\n");
2807 /* From here on the code is the same as e1000e_up() */
2808 clear_bit(__E1000_DOWN
, &adapter
->state
);
2810 napi_enable(&adapter
->napi
);
2812 e1000_irq_enable(adapter
);
2814 netif_tx_start_all_queues(netdev
);
2816 /* fire a link status change interrupt to start the watchdog */
2817 ew32(ICS
, E1000_ICS_LSC
);
2822 e1000_release_hw_control(adapter
);
2823 e1000_power_down_phy(adapter
);
2824 e1000e_free_rx_resources(adapter
);
2826 e1000e_free_tx_resources(adapter
);
2828 e1000e_reset(adapter
);
2834 * e1000_close - Disables a network interface
2835 * @netdev: network interface device structure
2837 * Returns 0, this is not allowed to fail
2839 * The close entry point is called when an interface is de-activated
2840 * by the OS. The hardware is still under the drivers control, but
2841 * needs to be disabled. A global MAC reset is issued to stop the
2842 * hardware, and all transmit and receive resources are freed.
2844 static int e1000_close(struct net_device
*netdev
)
2846 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
2848 WARN_ON(test_bit(__E1000_RESETTING
, &adapter
->state
));
2849 e1000e_down(adapter
);
2850 e1000_power_down_phy(adapter
);
2851 e1000_free_irq(adapter
);
2853 e1000e_free_tx_resources(adapter
);
2854 e1000e_free_rx_resources(adapter
);
2857 * kill manageability vlan ID if supported, but not if a vlan with
2858 * the same ID is registered on the host OS (let 8021q kill it)
2860 if ((adapter
->hw
.mng_cookie
.status
&
2861 E1000_MNG_DHCP_COOKIE_STATUS_VLAN
) &&
2863 vlan_group_get_device(adapter
->vlgrp
, adapter
->mng_vlan_id
)))
2864 e1000_vlan_rx_kill_vid(netdev
, adapter
->mng_vlan_id
);
2867 * If AMT is enabled, let the firmware know that the network
2868 * interface is now closed
2870 if (adapter
->flags
& FLAG_HAS_AMT
)
2871 e1000_release_hw_control(adapter
);
2876 * e1000_set_mac - Change the Ethernet Address of the NIC
2877 * @netdev: network interface device structure
2878 * @p: pointer to an address structure
2880 * Returns 0 on success, negative on failure
2882 static int e1000_set_mac(struct net_device
*netdev
, void *p
)
2884 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
2885 struct sockaddr
*addr
= p
;
2887 if (!is_valid_ether_addr(addr
->sa_data
))
2888 return -EADDRNOTAVAIL
;
2890 memcpy(netdev
->dev_addr
, addr
->sa_data
, netdev
->addr_len
);
2891 memcpy(adapter
->hw
.mac
.addr
, addr
->sa_data
, netdev
->addr_len
);
2893 e1000e_rar_set(&adapter
->hw
, adapter
->hw
.mac
.addr
, 0);
2895 if (adapter
->flags
& FLAG_RESET_OVERWRITES_LAA
) {
2896 /* activate the work around */
2897 e1000e_set_laa_state_82571(&adapter
->hw
, 1);
2900 * Hold a copy of the LAA in RAR[14] This is done so that
2901 * between the time RAR[0] gets clobbered and the time it
2902 * gets fixed (in e1000_watchdog), the actual LAA is in one
2903 * of the RARs and no incoming packets directed to this port
2904 * are dropped. Eventually the LAA will be in RAR[0] and
2907 e1000e_rar_set(&adapter
->hw
,
2908 adapter
->hw
.mac
.addr
,
2909 adapter
->hw
.mac
.rar_entry_count
- 1);
2916 * Need to wait a few seconds after link up to get diagnostic information from
2919 static void e1000_update_phy_info(unsigned long data
)
2921 struct e1000_adapter
*adapter
= (struct e1000_adapter
*) data
;
2922 e1000_get_phy_info(&adapter
->hw
);
2926 * e1000e_update_stats - Update the board statistics counters
2927 * @adapter: board private structure
2929 void e1000e_update_stats(struct e1000_adapter
*adapter
)
2931 struct e1000_hw
*hw
= &adapter
->hw
;
2932 struct pci_dev
*pdev
= adapter
->pdev
;
2933 unsigned long irq_flags
;
2936 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
2939 * Prevent stats update while adapter is being reset, or if the pci
2940 * connection is down.
2942 if (adapter
->link_speed
== 0)
2944 if (pci_channel_offline(pdev
))
2947 spin_lock_irqsave(&adapter
->stats_lock
, irq_flags
);
2950 * these counters are modified from e1000_adjust_tbi_stats,
2951 * called from the interrupt context, so they must only
2952 * be written while holding adapter->stats_lock
2955 adapter
->stats
.crcerrs
+= er32(CRCERRS
);
2956 adapter
->stats
.gprc
+= er32(GPRC
);
2957 adapter
->stats
.gorc
+= er32(GORCL
);
2958 er32(GORCH
); /* Clear gorc */
2959 adapter
->stats
.bprc
+= er32(BPRC
);
2960 adapter
->stats
.mprc
+= er32(MPRC
);
2961 adapter
->stats
.roc
+= er32(ROC
);
2963 adapter
->stats
.mpc
+= er32(MPC
);
2964 adapter
->stats
.scc
+= er32(SCC
);
2965 adapter
->stats
.ecol
+= er32(ECOL
);
2966 adapter
->stats
.mcc
+= er32(MCC
);
2967 adapter
->stats
.latecol
+= er32(LATECOL
);
2968 adapter
->stats
.dc
+= er32(DC
);
2969 adapter
->stats
.xonrxc
+= er32(XONRXC
);
2970 adapter
->stats
.xontxc
+= er32(XONTXC
);
2971 adapter
->stats
.xoffrxc
+= er32(XOFFRXC
);
2972 adapter
->stats
.xofftxc
+= er32(XOFFTXC
);
2973 adapter
->stats
.gptc
+= er32(GPTC
);
2974 adapter
->stats
.gotc
+= er32(GOTCL
);
2975 er32(GOTCH
); /* Clear gotc */
2976 adapter
->stats
.rnbc
+= er32(RNBC
);
2977 adapter
->stats
.ruc
+= er32(RUC
);
2979 adapter
->stats
.mptc
+= er32(MPTC
);
2980 adapter
->stats
.bptc
+= er32(BPTC
);
2982 /* used for adaptive IFS */
2984 hw
->mac
.tx_packet_delta
= er32(TPT
);
2985 adapter
->stats
.tpt
+= hw
->mac
.tx_packet_delta
;
2986 hw
->mac
.collision_delta
= er32(COLC
);
2987 adapter
->stats
.colc
+= hw
->mac
.collision_delta
;
2989 adapter
->stats
.algnerrc
+= er32(ALGNERRC
);
2990 adapter
->stats
.rxerrc
+= er32(RXERRC
);
2991 adapter
->stats
.tncrs
+= er32(TNCRS
);
2992 adapter
->stats
.cexterr
+= er32(CEXTERR
);
2993 adapter
->stats
.tsctc
+= er32(TSCTC
);
2994 adapter
->stats
.tsctfc
+= er32(TSCTFC
);
2996 /* Fill out the OS statistics structure */
2997 adapter
->net_stats
.multicast
= adapter
->stats
.mprc
;
2998 adapter
->net_stats
.collisions
= adapter
->stats
.colc
;
3003 * RLEC on some newer hardware can be incorrect so build
3004 * our own version based on RUC and ROC
3006 adapter
->net_stats
.rx_errors
= adapter
->stats
.rxerrc
+
3007 adapter
->stats
.crcerrs
+ adapter
->stats
.algnerrc
+
3008 adapter
->stats
.ruc
+ adapter
->stats
.roc
+
3009 adapter
->stats
.cexterr
;
3010 adapter
->net_stats
.rx_length_errors
= adapter
->stats
.ruc
+
3012 adapter
->net_stats
.rx_crc_errors
= adapter
->stats
.crcerrs
;
3013 adapter
->net_stats
.rx_frame_errors
= adapter
->stats
.algnerrc
;
3014 adapter
->net_stats
.rx_missed_errors
= adapter
->stats
.mpc
;
3017 adapter
->net_stats
.tx_errors
= adapter
->stats
.ecol
+
3018 adapter
->stats
.latecol
;
3019 adapter
->net_stats
.tx_aborted_errors
= adapter
->stats
.ecol
;
3020 adapter
->net_stats
.tx_window_errors
= adapter
->stats
.latecol
;
3021 adapter
->net_stats
.tx_carrier_errors
= adapter
->stats
.tncrs
;
3023 /* Tx Dropped needs to be maintained elsewhere */
3026 if (hw
->phy
.media_type
== e1000_media_type_copper
) {
3027 if ((adapter
->link_speed
== SPEED_1000
) &&
3028 (!e1e_rphy(hw
, PHY_1000T_STATUS
, &phy_tmp
))) {
3029 phy_tmp
&= PHY_IDLE_ERROR_COUNT_MASK
;
3030 adapter
->phy_stats
.idle_errors
+= phy_tmp
;
3034 /* Management Stats */
3035 adapter
->stats
.mgptc
+= er32(MGTPTC
);
3036 adapter
->stats
.mgprc
+= er32(MGTPRC
);
3037 adapter
->stats
.mgpdc
+= er32(MGTPDC
);
3039 spin_unlock_irqrestore(&adapter
->stats_lock
, irq_flags
);
3043 * e1000_phy_read_status - Update the PHY register status snapshot
3044 * @adapter: board private structure
3046 static void e1000_phy_read_status(struct e1000_adapter
*adapter
)
3048 struct e1000_hw
*hw
= &adapter
->hw
;
3049 struct e1000_phy_regs
*phy
= &adapter
->phy_regs
;
3051 unsigned long irq_flags
;
3054 spin_lock_irqsave(&adapter
->stats_lock
, irq_flags
);
3056 if ((er32(STATUS
) & E1000_STATUS_LU
) &&
3057 (adapter
->hw
.phy
.media_type
== e1000_media_type_copper
)) {
3058 ret_val
= e1e_rphy(hw
, PHY_CONTROL
, &phy
->bmcr
);
3059 ret_val
|= e1e_rphy(hw
, PHY_STATUS
, &phy
->bmsr
);
3060 ret_val
|= e1e_rphy(hw
, PHY_AUTONEG_ADV
, &phy
->advertise
);
3061 ret_val
|= e1e_rphy(hw
, PHY_LP_ABILITY
, &phy
->lpa
);
3062 ret_val
|= e1e_rphy(hw
, PHY_AUTONEG_EXP
, &phy
->expansion
);
3063 ret_val
|= e1e_rphy(hw
, PHY_1000T_CTRL
, &phy
->ctrl1000
);
3064 ret_val
|= e1e_rphy(hw
, PHY_1000T_STATUS
, &phy
->stat1000
);
3065 ret_val
|= e1e_rphy(hw
, PHY_EXT_STATUS
, &phy
->estatus
);
3067 e_warn("Error reading PHY register\n");
3070 * Do not read PHY registers if link is not up
3071 * Set values to typical power-on defaults
3073 phy
->bmcr
= (BMCR_SPEED1000
| BMCR_ANENABLE
| BMCR_FULLDPLX
);
3074 phy
->bmsr
= (BMSR_100FULL
| BMSR_100HALF
| BMSR_10FULL
|
3075 BMSR_10HALF
| BMSR_ESTATEN
| BMSR_ANEGCAPABLE
|
3077 phy
->advertise
= (ADVERTISE_PAUSE_ASYM
| ADVERTISE_PAUSE_CAP
|
3078 ADVERTISE_ALL
| ADVERTISE_CSMA
);
3080 phy
->expansion
= EXPANSION_ENABLENPAGE
;
3081 phy
->ctrl1000
= ADVERTISE_1000FULL
;
3083 phy
->estatus
= (ESTATUS_1000_TFULL
| ESTATUS_1000_THALF
);
3086 spin_unlock_irqrestore(&adapter
->stats_lock
, irq_flags
);
3089 static void e1000_print_link_info(struct e1000_adapter
*adapter
)
3091 struct e1000_hw
*hw
= &adapter
->hw
;
3092 u32 ctrl
= er32(CTRL
);
3094 e_info("Link is Up %d Mbps %s, Flow Control: %s\n",
3095 adapter
->link_speed
,
3096 (adapter
->link_duplex
== FULL_DUPLEX
) ?
3097 "Full Duplex" : "Half Duplex",
3098 ((ctrl
& E1000_CTRL_TFCE
) && (ctrl
& E1000_CTRL_RFCE
)) ?
3100 ((ctrl
& E1000_CTRL_RFCE
) ? "RX" :
3101 ((ctrl
& E1000_CTRL_TFCE
) ? "TX" : "None" )));
3104 static bool e1000_has_link(struct e1000_adapter
*adapter
)
3106 struct e1000_hw
*hw
= &adapter
->hw
;
3107 bool link_active
= 0;
3111 * get_link_status is set on LSC (link status) interrupt or
3112 * Rx sequence error interrupt. get_link_status will stay
3113 * false until the check_for_link establishes link
3114 * for copper adapters ONLY
3116 switch (hw
->phy
.media_type
) {
3117 case e1000_media_type_copper
:
3118 if (hw
->mac
.get_link_status
) {
3119 ret_val
= hw
->mac
.ops
.check_for_link(hw
);
3120 link_active
= !hw
->mac
.get_link_status
;
3125 case e1000_media_type_fiber
:
3126 ret_val
= hw
->mac
.ops
.check_for_link(hw
);
3127 link_active
= !!(er32(STATUS
) & E1000_STATUS_LU
);
3129 case e1000_media_type_internal_serdes
:
3130 ret_val
= hw
->mac
.ops
.check_for_link(hw
);
3131 link_active
= adapter
->hw
.mac
.serdes_has_link
;
3134 case e1000_media_type_unknown
:
3138 if ((ret_val
== E1000_ERR_PHY
) && (hw
->phy
.type
== e1000_phy_igp_3
) &&
3139 (er32(CTRL
) & E1000_PHY_CTRL_GBE_DISABLE
)) {
3140 /* See e1000_kmrn_lock_loss_workaround_ich8lan() */
3141 e_info("Gigabit has been disabled, downgrading speed\n");
3147 static void e1000e_enable_receives(struct e1000_adapter
*adapter
)
3149 /* make sure the receive unit is started */
3150 if ((adapter
->flags
& FLAG_RX_NEEDS_RESTART
) &&
3151 (adapter
->flags
& FLAG_RX_RESTART_NOW
)) {
3152 struct e1000_hw
*hw
= &adapter
->hw
;
3153 u32 rctl
= er32(RCTL
);
3154 ew32(RCTL
, rctl
| E1000_RCTL_EN
);
3155 adapter
->flags
&= ~FLAG_RX_RESTART_NOW
;
3160 * e1000_watchdog - Timer Call-back
3161 * @data: pointer to adapter cast into an unsigned long
3163 static void e1000_watchdog(unsigned long data
)
3165 struct e1000_adapter
*adapter
= (struct e1000_adapter
*) data
;
3167 /* Do the rest outside of interrupt context */
3168 schedule_work(&adapter
->watchdog_task
);
3170 /* TODO: make this use queue_delayed_work() */
3173 static void e1000_watchdog_task(struct work_struct
*work
)
3175 struct e1000_adapter
*adapter
= container_of(work
,
3176 struct e1000_adapter
, watchdog_task
);
3177 struct net_device
*netdev
= adapter
->netdev
;
3178 struct e1000_mac_info
*mac
= &adapter
->hw
.mac
;
3179 struct e1000_ring
*tx_ring
= adapter
->tx_ring
;
3180 struct e1000_hw
*hw
= &adapter
->hw
;
3184 link
= e1000_has_link(adapter
);
3185 if ((netif_carrier_ok(netdev
)) && link
) {
3186 e1000e_enable_receives(adapter
);
3190 if ((e1000e_enable_tx_pkt_filtering(hw
)) &&
3191 (adapter
->mng_vlan_id
!= adapter
->hw
.mng_cookie
.vlan_id
))
3192 e1000_update_mng_vlan(adapter
);
3195 if (!netif_carrier_ok(netdev
)) {
3197 /* update snapshot of PHY registers on LSC */
3198 e1000_phy_read_status(adapter
);
3199 mac
->ops
.get_link_up_info(&adapter
->hw
,
3200 &adapter
->link_speed
,
3201 &adapter
->link_duplex
);
3202 e1000_print_link_info(adapter
);
3204 * tweak tx_queue_len according to speed/duplex
3205 * and adjust the timeout factor
3207 netdev
->tx_queue_len
= adapter
->tx_queue_len
;
3208 adapter
->tx_timeout_factor
= 1;
3209 switch (adapter
->link_speed
) {
3212 netdev
->tx_queue_len
= 10;
3213 adapter
->tx_timeout_factor
= 16;
3217 netdev
->tx_queue_len
= 100;
3218 /* maybe add some timeout factor ? */
3223 * workaround: re-program speed mode bit after
3226 if ((adapter
->flags
& FLAG_TARC_SPEED_MODE_BIT
) &&
3229 tarc0
= er32(TARC(0));
3230 tarc0
&= ~SPEED_MODE_BIT
;
3231 ew32(TARC(0), tarc0
);
3235 * disable TSO for pcie and 10/100 speeds, to avoid
3236 * some hardware issues
3238 if (!(adapter
->flags
& FLAG_TSO_FORCE
)) {
3239 switch (adapter
->link_speed
) {
3242 e_info("10/100 speed: disabling TSO\n");
3243 netdev
->features
&= ~NETIF_F_TSO
;
3244 netdev
->features
&= ~NETIF_F_TSO6
;
3247 netdev
->features
|= NETIF_F_TSO
;
3248 netdev
->features
|= NETIF_F_TSO6
;
3257 * enable transmits in the hardware, need to do this
3258 * after setting TARC(0)
3261 tctl
|= E1000_TCTL_EN
;
3264 netif_carrier_on(netdev
);
3265 netif_tx_wake_all_queues(netdev
);
3267 if (!test_bit(__E1000_DOWN
, &adapter
->state
))
3268 mod_timer(&adapter
->phy_info_timer
,
3269 round_jiffies(jiffies
+ 2 * HZ
));
3272 if (netif_carrier_ok(netdev
)) {
3273 adapter
->link_speed
= 0;
3274 adapter
->link_duplex
= 0;
3275 e_info("Link is Down\n");
3276 netif_carrier_off(netdev
);
3277 netif_tx_stop_all_queues(netdev
);
3278 if (!test_bit(__E1000_DOWN
, &adapter
->state
))
3279 mod_timer(&adapter
->phy_info_timer
,
3280 round_jiffies(jiffies
+ 2 * HZ
));
3282 if (adapter
->flags
& FLAG_RX_NEEDS_RESTART
)
3283 schedule_work(&adapter
->reset_task
);
3288 e1000e_update_stats(adapter
);
3290 mac
->tx_packet_delta
= adapter
->stats
.tpt
- adapter
->tpt_old
;
3291 adapter
->tpt_old
= adapter
->stats
.tpt
;
3292 mac
->collision_delta
= adapter
->stats
.colc
- adapter
->colc_old
;
3293 adapter
->colc_old
= adapter
->stats
.colc
;
3295 adapter
->gorc
= adapter
->stats
.gorc
- adapter
->gorc_old
;
3296 adapter
->gorc_old
= adapter
->stats
.gorc
;
3297 adapter
->gotc
= adapter
->stats
.gotc
- adapter
->gotc_old
;
3298 adapter
->gotc_old
= adapter
->stats
.gotc
;
3300 e1000e_update_adaptive(&adapter
->hw
);
3302 if (!netif_carrier_ok(netdev
)) {
3303 tx_pending
= (e1000_desc_unused(tx_ring
) + 1 <
3307 * We've lost link, so the controller stops DMA,
3308 * but we've got queued Tx work that's never going
3309 * to get done, so reset controller to flush Tx.
3310 * (Do the reset outside of interrupt context).
3312 adapter
->tx_timeout_count
++;
3313 schedule_work(&adapter
->reset_task
);
3317 /* Cause software interrupt to ensure Rx ring is cleaned */
3318 ew32(ICS
, E1000_ICS_RXDMT0
);
3320 /* Force detection of hung controller every watchdog period */
3321 adapter
->detect_tx_hung
= 1;
3324 * With 82571 controllers, LAA may be overwritten due to controller
3325 * reset from the other port. Set the appropriate LAA in RAR[0]
3327 if (e1000e_get_laa_state_82571(hw
))
3328 e1000e_rar_set(hw
, adapter
->hw
.mac
.addr
, 0);
3330 /* Reset the timer */
3331 if (!test_bit(__E1000_DOWN
, &adapter
->state
))
3332 mod_timer(&adapter
->watchdog_timer
,
3333 round_jiffies(jiffies
+ 2 * HZ
));
3336 #define E1000_TX_FLAGS_CSUM 0x00000001
3337 #define E1000_TX_FLAGS_VLAN 0x00000002
3338 #define E1000_TX_FLAGS_TSO 0x00000004
3339 #define E1000_TX_FLAGS_IPV4 0x00000008
3340 #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
3341 #define E1000_TX_FLAGS_VLAN_SHIFT 16
3343 static int e1000_tso(struct e1000_adapter
*adapter
,
3344 struct sk_buff
*skb
)
3346 struct e1000_ring
*tx_ring
= adapter
->tx_ring
;
3347 struct e1000_context_desc
*context_desc
;
3348 struct e1000_buffer
*buffer_info
;
3351 u16 ipcse
= 0, tucse
, mss
;
3352 u8 ipcss
, ipcso
, tucss
, tucso
, hdr_len
;
3355 if (skb_is_gso(skb
)) {
3356 if (skb_header_cloned(skb
)) {
3357 err
= pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
);
3362 hdr_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
3363 mss
= skb_shinfo(skb
)->gso_size
;
3364 if (skb
->protocol
== htons(ETH_P_IP
)) {
3365 struct iphdr
*iph
= ip_hdr(skb
);
3368 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(iph
->saddr
,
3372 cmd_length
= E1000_TXD_CMD_IP
;
3373 ipcse
= skb_transport_offset(skb
) - 1;
3374 } else if (skb_shinfo(skb
)->gso_type
== SKB_GSO_TCPV6
) {
3375 ipv6_hdr(skb
)->payload_len
= 0;
3376 tcp_hdr(skb
)->check
=
3377 ~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
3378 &ipv6_hdr(skb
)->daddr
,
3382 ipcss
= skb_network_offset(skb
);
3383 ipcso
= (void *)&(ip_hdr(skb
)->check
) - (void *)skb
->data
;
3384 tucss
= skb_transport_offset(skb
);
3385 tucso
= (void *)&(tcp_hdr(skb
)->check
) - (void *)skb
->data
;
3388 cmd_length
|= (E1000_TXD_CMD_DEXT
| E1000_TXD_CMD_TSE
|
3389 E1000_TXD_CMD_TCP
| (skb
->len
- (hdr_len
)));
3391 i
= tx_ring
->next_to_use
;
3392 context_desc
= E1000_CONTEXT_DESC(*tx_ring
, i
);
3393 buffer_info
= &tx_ring
->buffer_info
[i
];
3395 context_desc
->lower_setup
.ip_fields
.ipcss
= ipcss
;
3396 context_desc
->lower_setup
.ip_fields
.ipcso
= ipcso
;
3397 context_desc
->lower_setup
.ip_fields
.ipcse
= cpu_to_le16(ipcse
);
3398 context_desc
->upper_setup
.tcp_fields
.tucss
= tucss
;
3399 context_desc
->upper_setup
.tcp_fields
.tucso
= tucso
;
3400 context_desc
->upper_setup
.tcp_fields
.tucse
= cpu_to_le16(tucse
);
3401 context_desc
->tcp_seg_setup
.fields
.mss
= cpu_to_le16(mss
);
3402 context_desc
->tcp_seg_setup
.fields
.hdr_len
= hdr_len
;
3403 context_desc
->cmd_and_length
= cpu_to_le32(cmd_length
);
3405 buffer_info
->time_stamp
= jiffies
;
3406 buffer_info
->next_to_watch
= i
;
3409 if (i
== tx_ring
->count
)
3411 tx_ring
->next_to_use
= i
;
3419 static bool e1000_tx_csum(struct e1000_adapter
*adapter
, struct sk_buff
*skb
)
3421 struct e1000_ring
*tx_ring
= adapter
->tx_ring
;
3422 struct e1000_context_desc
*context_desc
;
3423 struct e1000_buffer
*buffer_info
;
3427 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
3428 css
= skb_transport_offset(skb
);
3430 i
= tx_ring
->next_to_use
;
3431 buffer_info
= &tx_ring
->buffer_info
[i
];
3432 context_desc
= E1000_CONTEXT_DESC(*tx_ring
, i
);
3434 context_desc
->lower_setup
.ip_config
= 0;
3435 context_desc
->upper_setup
.tcp_fields
.tucss
= css
;
3436 context_desc
->upper_setup
.tcp_fields
.tucso
=
3437 css
+ skb
->csum_offset
;
3438 context_desc
->upper_setup
.tcp_fields
.tucse
= 0;
3439 context_desc
->tcp_seg_setup
.data
= 0;
3440 context_desc
->cmd_and_length
= cpu_to_le32(E1000_TXD_CMD_DEXT
);
3442 buffer_info
->time_stamp
= jiffies
;
3443 buffer_info
->next_to_watch
= i
;
3446 if (i
== tx_ring
->count
)
3448 tx_ring
->next_to_use
= i
;
3456 #define E1000_MAX_PER_TXD 8192
3457 #define E1000_MAX_TXD_PWR 12
3459 static int e1000_tx_map(struct e1000_adapter
*adapter
,
3460 struct sk_buff
*skb
, unsigned int first
,
3461 unsigned int max_per_txd
, unsigned int nr_frags
,
3464 struct e1000_ring
*tx_ring
= adapter
->tx_ring
;
3465 struct e1000_buffer
*buffer_info
;
3466 unsigned int len
= skb
->len
- skb
->data_len
;
3467 unsigned int offset
= 0, size
, count
= 0, i
;
3470 i
= tx_ring
->next_to_use
;
3473 buffer_info
= &tx_ring
->buffer_info
[i
];
3474 size
= min(len
, max_per_txd
);
3476 /* Workaround for premature desc write-backs
3477 * in TSO mode. Append 4-byte sentinel desc */
3478 if (mss
&& !nr_frags
&& size
== len
&& size
> 8)
3481 buffer_info
->length
= size
;
3482 /* set time_stamp *before* dma to help avoid a possible race */
3483 buffer_info
->time_stamp
= jiffies
;
3485 pci_map_single(adapter
->pdev
,
3489 if (pci_dma_mapping_error(adapter
->pdev
, buffer_info
->dma
)) {
3490 dev_err(&adapter
->pdev
->dev
, "TX DMA map failed\n");
3491 adapter
->tx_dma_failed
++;
3494 buffer_info
->next_to_watch
= i
;
3500 if (i
== tx_ring
->count
)
3504 for (f
= 0; f
< nr_frags
; f
++) {
3505 struct skb_frag_struct
*frag
;
3507 frag
= &skb_shinfo(skb
)->frags
[f
];
3509 offset
= frag
->page_offset
;
3512 buffer_info
= &tx_ring
->buffer_info
[i
];
3513 size
= min(len
, max_per_txd
);
3514 /* Workaround for premature desc write-backs
3515 * in TSO mode. Append 4-byte sentinel desc */
3516 if (mss
&& f
== (nr_frags
-1) && size
== len
&& size
> 8)
3519 buffer_info
->length
= size
;
3520 buffer_info
->time_stamp
= jiffies
;
3522 pci_map_page(adapter
->pdev
,
3527 if (pci_dma_mapping_error(adapter
->pdev
,
3528 buffer_info
->dma
)) {
3529 dev_err(&adapter
->pdev
->dev
,
3530 "TX DMA page map failed\n");
3531 adapter
->tx_dma_failed
++;
3535 buffer_info
->next_to_watch
= i
;
3542 if (i
== tx_ring
->count
)
3548 i
= tx_ring
->count
- 1;
3552 tx_ring
->buffer_info
[i
].skb
= skb
;
3553 tx_ring
->buffer_info
[first
].next_to_watch
= i
;
3558 static void e1000_tx_queue(struct e1000_adapter
*adapter
,
3559 int tx_flags
, int count
)
3561 struct e1000_ring
*tx_ring
= adapter
->tx_ring
;
3562 struct e1000_tx_desc
*tx_desc
= NULL
;
3563 struct e1000_buffer
*buffer_info
;
3564 u32 txd_upper
= 0, txd_lower
= E1000_TXD_CMD_IFCS
;
3567 if (tx_flags
& E1000_TX_FLAGS_TSO
) {
3568 txd_lower
|= E1000_TXD_CMD_DEXT
| E1000_TXD_DTYP_D
|
3570 txd_upper
|= E1000_TXD_POPTS_TXSM
<< 8;
3572 if (tx_flags
& E1000_TX_FLAGS_IPV4
)
3573 txd_upper
|= E1000_TXD_POPTS_IXSM
<< 8;
3576 if (tx_flags
& E1000_TX_FLAGS_CSUM
) {
3577 txd_lower
|= E1000_TXD_CMD_DEXT
| E1000_TXD_DTYP_D
;
3578 txd_upper
|= E1000_TXD_POPTS_TXSM
<< 8;
3581 if (tx_flags
& E1000_TX_FLAGS_VLAN
) {
3582 txd_lower
|= E1000_TXD_CMD_VLE
;
3583 txd_upper
|= (tx_flags
& E1000_TX_FLAGS_VLAN_MASK
);
3586 i
= tx_ring
->next_to_use
;
3589 buffer_info
= &tx_ring
->buffer_info
[i
];
3590 tx_desc
= E1000_TX_DESC(*tx_ring
, i
);
3591 tx_desc
->buffer_addr
= cpu_to_le64(buffer_info
->dma
);
3592 tx_desc
->lower
.data
=
3593 cpu_to_le32(txd_lower
| buffer_info
->length
);
3594 tx_desc
->upper
.data
= cpu_to_le32(txd_upper
);
3597 if (i
== tx_ring
->count
)
3601 tx_desc
->lower
.data
|= cpu_to_le32(adapter
->txd_cmd
);
3604 * Force memory writes to complete before letting h/w
3605 * know there are new descriptors to fetch. (Only
3606 * applicable for weak-ordered memory model archs,
3611 tx_ring
->next_to_use
= i
;
3612 writel(i
, adapter
->hw
.hw_addr
+ tx_ring
->tail
);
3614 * we need this if more than one processor can write to our tail
3615 * at a time, it synchronizes IO on IA64/Altix systems
3620 #define MINIMUM_DHCP_PACKET_SIZE 282
3621 static int e1000_transfer_dhcp_info(struct e1000_adapter
*adapter
,
3622 struct sk_buff
*skb
)
3624 struct e1000_hw
*hw
= &adapter
->hw
;
3627 if (vlan_tx_tag_present(skb
)) {
3628 if (!((vlan_tx_tag_get(skb
) == adapter
->hw
.mng_cookie
.vlan_id
)
3629 && (adapter
->hw
.mng_cookie
.status
&
3630 E1000_MNG_DHCP_COOKIE_STATUS_VLAN
)))
3634 if (skb
->len
<= MINIMUM_DHCP_PACKET_SIZE
)
3637 if (((struct ethhdr
*) skb
->data
)->h_proto
!= htons(ETH_P_IP
))
3641 const struct iphdr
*ip
= (struct iphdr
*)((u8
*)skb
->data
+14);
3644 if (ip
->protocol
!= IPPROTO_UDP
)
3647 udp
= (struct udphdr
*)((u8
*)ip
+ (ip
->ihl
<< 2));
3648 if (ntohs(udp
->dest
) != 67)
3651 offset
= (u8
*)udp
+ 8 - skb
->data
;
3652 length
= skb
->len
- offset
;
3653 return e1000e_mng_write_dhcp_info(hw
, (u8
*)udp
+ 8, length
);
3659 static int __e1000_maybe_stop_tx(struct net_device
*netdev
, int size
)
3661 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
3663 netif_stop_queue(netdev
);
3665 * Herbert's original patch had:
3666 * smp_mb__after_netif_stop_queue();
3667 * but since that doesn't exist yet, just open code it.
3672 * We need to check again in a case another CPU has just
3673 * made room available.
3675 if (e1000_desc_unused(adapter
->tx_ring
) < size
)
3679 netif_start_queue(netdev
);
3680 ++adapter
->restart_queue
;
3684 static int e1000_maybe_stop_tx(struct net_device
*netdev
, int size
)
3686 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
3688 if (e1000_desc_unused(adapter
->tx_ring
) >= size
)
3690 return __e1000_maybe_stop_tx(netdev
, size
);
3693 #define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
3694 static int e1000_xmit_frame(struct sk_buff
*skb
, struct net_device
*netdev
)
3696 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
3697 struct e1000_ring
*tx_ring
= adapter
->tx_ring
;
3699 unsigned int max_per_txd
= E1000_MAX_PER_TXD
;
3700 unsigned int max_txd_pwr
= E1000_MAX_TXD_PWR
;
3701 unsigned int tx_flags
= 0;
3702 unsigned int len
= skb
->len
- skb
->data_len
;
3703 unsigned long irq_flags
;
3704 unsigned int nr_frags
;
3710 if (test_bit(__E1000_DOWN
, &adapter
->state
)) {
3711 dev_kfree_skb_any(skb
);
3712 return NETDEV_TX_OK
;
3715 if (skb
->len
<= 0) {
3716 dev_kfree_skb_any(skb
);
3717 return NETDEV_TX_OK
;
3720 mss
= skb_shinfo(skb
)->gso_size
;
3722 * The controller does a simple calculation to
3723 * make sure there is enough room in the FIFO before
3724 * initiating the DMA for each buffer. The calc is:
3725 * 4 = ceil(buffer len/mss). To make sure we don't
3726 * overrun the FIFO, adjust the max buffer len if mss
3731 max_per_txd
= min(mss
<< 2, max_per_txd
);
3732 max_txd_pwr
= fls(max_per_txd
) - 1;
3735 * TSO Workaround for 82571/2/3 Controllers -- if skb->data
3736 * points to just header, pull a few bytes of payload from
3737 * frags into skb->data
3739 hdr_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
3741 * we do this workaround for ES2LAN, but it is un-necessary,
3742 * avoiding it could save a lot of cycles
3744 if (skb
->data_len
&& (hdr_len
== len
)) {
3745 unsigned int pull_size
;
3747 pull_size
= min((unsigned int)4, skb
->data_len
);
3748 if (!__pskb_pull_tail(skb
, pull_size
)) {
3749 e_err("__pskb_pull_tail failed.\n");
3750 dev_kfree_skb_any(skb
);
3751 return NETDEV_TX_OK
;
3753 len
= skb
->len
- skb
->data_len
;
3757 /* reserve a descriptor for the offload context */
3758 if ((mss
) || (skb
->ip_summed
== CHECKSUM_PARTIAL
))
3762 count
+= TXD_USE_COUNT(len
, max_txd_pwr
);
3764 nr_frags
= skb_shinfo(skb
)->nr_frags
;
3765 for (f
= 0; f
< nr_frags
; f
++)
3766 count
+= TXD_USE_COUNT(skb_shinfo(skb
)->frags
[f
].size
,
3769 if (adapter
->hw
.mac
.tx_pkt_filtering
)
3770 e1000_transfer_dhcp_info(adapter
, skb
);
3772 if (!spin_trylock_irqsave(&adapter
->tx_queue_lock
, irq_flags
))
3773 /* Collision - tell upper layer to requeue */
3774 return NETDEV_TX_LOCKED
;
3777 * need: count + 2 desc gap to keep tail from touching
3778 * head, otherwise try next time
3780 if (e1000_maybe_stop_tx(netdev
, count
+ 2)) {
3781 spin_unlock_irqrestore(&adapter
->tx_queue_lock
, irq_flags
);
3782 return NETDEV_TX_BUSY
;
3785 if (adapter
->vlgrp
&& vlan_tx_tag_present(skb
)) {
3786 tx_flags
|= E1000_TX_FLAGS_VLAN
;
3787 tx_flags
|= (vlan_tx_tag_get(skb
) << E1000_TX_FLAGS_VLAN_SHIFT
);
3790 first
= tx_ring
->next_to_use
;
3792 tso
= e1000_tso(adapter
, skb
);
3794 dev_kfree_skb_any(skb
);
3795 spin_unlock_irqrestore(&adapter
->tx_queue_lock
, irq_flags
);
3796 return NETDEV_TX_OK
;
3800 tx_flags
|= E1000_TX_FLAGS_TSO
;
3801 else if (e1000_tx_csum(adapter
, skb
))
3802 tx_flags
|= E1000_TX_FLAGS_CSUM
;
3805 * Old method was to assume IPv4 packet by default if TSO was enabled.
3806 * 82571 hardware supports TSO capabilities for IPv6 as well...
3807 * no longer assume, we must.
3809 if (skb
->protocol
== htons(ETH_P_IP
))
3810 tx_flags
|= E1000_TX_FLAGS_IPV4
;
3812 count
= e1000_tx_map(adapter
, skb
, first
, max_per_txd
, nr_frags
, mss
);
3814 /* handle pci_map_single() error in e1000_tx_map */
3815 dev_kfree_skb_any(skb
);
3816 spin_unlock_irqrestore(&adapter
->tx_queue_lock
, irq_flags
);
3817 return NETDEV_TX_OK
;
3820 e1000_tx_queue(adapter
, tx_flags
, count
);
3822 netdev
->trans_start
= jiffies
;
3824 /* Make sure there is space in the ring for the next send. */
3825 e1000_maybe_stop_tx(netdev
, MAX_SKB_FRAGS
+ 2);
3827 spin_unlock_irqrestore(&adapter
->tx_queue_lock
, irq_flags
);
3828 return NETDEV_TX_OK
;
3832 * e1000_tx_timeout - Respond to a Tx Hang
3833 * @netdev: network interface device structure
3835 static void e1000_tx_timeout(struct net_device
*netdev
)
3837 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
3839 /* Do the reset outside of interrupt context */
3840 adapter
->tx_timeout_count
++;
3841 schedule_work(&adapter
->reset_task
);
3844 static void e1000_reset_task(struct work_struct
*work
)
3846 struct e1000_adapter
*adapter
;
3847 adapter
= container_of(work
, struct e1000_adapter
, reset_task
);
3849 e1000e_reinit_locked(adapter
);
3853 * e1000_get_stats - Get System Network Statistics
3854 * @netdev: network interface device structure
3856 * Returns the address of the device statistics structure.
3857 * The statistics are actually updated from the timer callback.
3859 static struct net_device_stats
*e1000_get_stats(struct net_device
*netdev
)
3861 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
3863 /* only return the current stats */
3864 return &adapter
->net_stats
;
3868 * e1000_change_mtu - Change the Maximum Transfer Unit
3869 * @netdev: network interface device structure
3870 * @new_mtu: new value for maximum frame size
3872 * Returns 0 on success, negative on failure
3874 static int e1000_change_mtu(struct net_device
*netdev
, int new_mtu
)
3876 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
3877 int max_frame
= new_mtu
+ ETH_HLEN
+ ETH_FCS_LEN
;
3879 if ((new_mtu
< ETH_ZLEN
+ ETH_FCS_LEN
+ VLAN_HLEN
) ||
3880 (max_frame
> MAX_JUMBO_FRAME_SIZE
)) {
3881 e_err("Invalid MTU setting\n");
3885 /* Jumbo frame size limits */
3886 if (max_frame
> ETH_FRAME_LEN
+ ETH_FCS_LEN
) {
3887 if (!(adapter
->flags
& FLAG_HAS_JUMBO_FRAMES
)) {
3888 e_err("Jumbo Frames not supported.\n");
3891 if (adapter
->hw
.phy
.type
== e1000_phy_ife
) {
3892 e_err("Jumbo Frames not supported.\n");
3897 #define MAX_STD_JUMBO_FRAME_SIZE 9234
3898 if (max_frame
> MAX_STD_JUMBO_FRAME_SIZE
) {
3899 e_err("MTU > 9216 not supported.\n");
3903 while (test_and_set_bit(__E1000_RESETTING
, &adapter
->state
))
3905 /* e1000e_down has a dependency on max_frame_size */
3906 adapter
->max_frame_size
= max_frame
;
3907 if (netif_running(netdev
))
3908 e1000e_down(adapter
);
3911 * NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3912 * means we reserve 2 more, this pushes us to allocate from the next
3914 * i.e. RXBUFFER_2048 --> size-4096 slab
3915 * However with the new *_jumbo_rx* routines, jumbo receives will use
3919 if (max_frame
<= 256)
3920 adapter
->rx_buffer_len
= 256;
3921 else if (max_frame
<= 512)
3922 adapter
->rx_buffer_len
= 512;
3923 else if (max_frame
<= 1024)
3924 adapter
->rx_buffer_len
= 1024;
3925 else if (max_frame
<= 2048)
3926 adapter
->rx_buffer_len
= 2048;
3928 adapter
->rx_buffer_len
= 4096;
3930 /* adjust allocation if LPE protects us, and we aren't using SBP */
3931 if ((max_frame
== ETH_FRAME_LEN
+ ETH_FCS_LEN
) ||
3932 (max_frame
== ETH_FRAME_LEN
+ VLAN_HLEN
+ ETH_FCS_LEN
))
3933 adapter
->rx_buffer_len
= ETH_FRAME_LEN
+ VLAN_HLEN
3936 e_info("changing MTU from %d to %d\n", netdev
->mtu
, new_mtu
);
3937 netdev
->mtu
= new_mtu
;
3939 if (netif_running(netdev
))
3942 e1000e_reset(adapter
);
3944 clear_bit(__E1000_RESETTING
, &adapter
->state
);
3949 static int e1000_mii_ioctl(struct net_device
*netdev
, struct ifreq
*ifr
,
3952 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
3953 struct mii_ioctl_data
*data
= if_mii(ifr
);
3955 if (adapter
->hw
.phy
.media_type
!= e1000_media_type_copper
)
3960 data
->phy_id
= adapter
->hw
.phy
.addr
;
3963 if (!capable(CAP_NET_ADMIN
))
3965 switch (data
->reg_num
& 0x1F) {
3967 data
->val_out
= adapter
->phy_regs
.bmcr
;
3970 data
->val_out
= adapter
->phy_regs
.bmsr
;
3973 data
->val_out
= (adapter
->hw
.phy
.id
>> 16);
3976 data
->val_out
= (adapter
->hw
.phy
.id
& 0xFFFF);
3979 data
->val_out
= adapter
->phy_regs
.advertise
;
3982 data
->val_out
= adapter
->phy_regs
.lpa
;
3985 data
->val_out
= adapter
->phy_regs
.expansion
;
3988 data
->val_out
= adapter
->phy_regs
.ctrl1000
;
3991 data
->val_out
= adapter
->phy_regs
.stat1000
;
3994 data
->val_out
= adapter
->phy_regs
.estatus
;
4007 static int e1000_ioctl(struct net_device
*netdev
, struct ifreq
*ifr
, int cmd
)
4013 return e1000_mii_ioctl(netdev
, ifr
, cmd
);
4019 static int e1000_suspend(struct pci_dev
*pdev
, pm_message_t state
)
4021 struct net_device
*netdev
= pci_get_drvdata(pdev
);
4022 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
4023 struct e1000_hw
*hw
= &adapter
->hw
;
4024 u32 ctrl
, ctrl_ext
, rctl
, status
;
4025 u32 wufc
= adapter
->wol
;
4028 netif_device_detach(netdev
);
4030 if (netif_running(netdev
)) {
4031 WARN_ON(test_bit(__E1000_RESETTING
, &adapter
->state
));
4032 e1000e_down(adapter
);
4033 e1000_free_irq(adapter
);
4036 retval
= pci_save_state(pdev
);
4040 status
= er32(STATUS
);
4041 if (status
& E1000_STATUS_LU
)
4042 wufc
&= ~E1000_WUFC_LNKC
;
4045 e1000_setup_rctl(adapter
);
4046 e1000_set_multi(netdev
);
4048 /* turn on all-multi mode if wake on multicast is enabled */
4049 if (wufc
& E1000_WUFC_MC
) {
4051 rctl
|= E1000_RCTL_MPE
;
4056 /* advertise wake from D3Cold */
4057 #define E1000_CTRL_ADVD3WUC 0x00100000
4058 /* phy power management enable */
4059 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
4060 ctrl
|= E1000_CTRL_ADVD3WUC
|
4061 E1000_CTRL_EN_PHY_PWR_MGMT
;
4064 if (adapter
->hw
.phy
.media_type
== e1000_media_type_fiber
||
4065 adapter
->hw
.phy
.media_type
==
4066 e1000_media_type_internal_serdes
) {
4067 /* keep the laser running in D3 */
4068 ctrl_ext
= er32(CTRL_EXT
);
4069 ctrl_ext
|= E1000_CTRL_EXT_SDP7_DATA
;
4070 ew32(CTRL_EXT
, ctrl_ext
);
4073 if (adapter
->flags
& FLAG_IS_ICH
)
4074 e1000e_disable_gig_wol_ich8lan(&adapter
->hw
);
4076 /* Allow time for pending master requests to run */
4077 e1000e_disable_pcie_master(&adapter
->hw
);
4079 ew32(WUC
, E1000_WUC_PME_EN
);
4081 pci_enable_wake(pdev
, PCI_D3hot
, 1);
4082 pci_enable_wake(pdev
, PCI_D3cold
, 1);
4086 pci_enable_wake(pdev
, PCI_D3hot
, 0);
4087 pci_enable_wake(pdev
, PCI_D3cold
, 0);
4090 /* make sure adapter isn't asleep if manageability is enabled */
4091 if (adapter
->flags
& FLAG_MNG_PT_ENABLED
) {
4092 pci_enable_wake(pdev
, PCI_D3hot
, 1);
4093 pci_enable_wake(pdev
, PCI_D3cold
, 1);
4096 if (adapter
->hw
.phy
.type
== e1000_phy_igp_3
)
4097 e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter
->hw
);
4100 * Release control of h/w to f/w. If f/w is AMT enabled, this
4101 * would have already happened in close and is redundant.
4103 e1000_release_hw_control(adapter
);
4105 pci_disable_device(pdev
);
4107 pci_set_power_state(pdev
, pci_choose_state(pdev
, state
));
4112 static void e1000e_disable_l1aspm(struct pci_dev
*pdev
)
4118 * 82573 workaround - disable L1 ASPM on mobile chipsets
4120 * L1 ASPM on various mobile (ich7) chipsets do not behave properly
4121 * resulting in lost data or garbage information on the pci-e link
4122 * level. This could result in (false) bad EEPROM checksum errors,
4123 * long ping times (up to 2s) or even a system freeze/hang.
4125 * Unfortunately this feature saves about 1W power consumption when
4128 pos
= pci_find_capability(pdev
, PCI_CAP_ID_EXP
);
4129 pci_read_config_word(pdev
, pos
+ PCI_EXP_LNKCTL
, &val
);
4131 dev_warn(&pdev
->dev
, "Disabling L1 ASPM\n");
4133 pci_write_config_word(pdev
, pos
+ PCI_EXP_LNKCTL
, val
);
4138 static int e1000_resume(struct pci_dev
*pdev
)
4140 struct net_device
*netdev
= pci_get_drvdata(pdev
);
4141 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
4142 struct e1000_hw
*hw
= &adapter
->hw
;
4145 pci_set_power_state(pdev
, PCI_D0
);
4146 pci_restore_state(pdev
);
4147 e1000e_disable_l1aspm(pdev
);
4149 err
= pci_enable_device_mem(pdev
);
4152 "Cannot enable PCI device from suspend\n");
4156 pci_set_master(pdev
);
4158 pci_enable_wake(pdev
, PCI_D3hot
, 0);
4159 pci_enable_wake(pdev
, PCI_D3cold
, 0);
4161 if (netif_running(netdev
)) {
4162 err
= e1000_request_irq(adapter
);
4167 e1000e_power_up_phy(adapter
);
4168 e1000e_reset(adapter
);
4171 e1000_init_manageability(adapter
);
4173 if (netif_running(netdev
))
4176 netif_device_attach(netdev
);
4179 * If the controller has AMT, do not set DRV_LOAD until the interface
4180 * is up. For all other cases, let the f/w know that the h/w is now
4181 * under the control of the driver.
4183 if (!(adapter
->flags
& FLAG_HAS_AMT
))
4184 e1000_get_hw_control(adapter
);
4190 static void e1000_shutdown(struct pci_dev
*pdev
)
4192 e1000_suspend(pdev
, PMSG_SUSPEND
);
4195 #ifdef CONFIG_NET_POLL_CONTROLLER
4197 * Polling 'interrupt' - used by things like netconsole to send skbs
4198 * without having to re-enable interrupts. It's not called while
4199 * the interrupt routine is executing.
4201 static void e1000_netpoll(struct net_device
*netdev
)
4203 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
4205 disable_irq(adapter
->pdev
->irq
);
4206 e1000_intr(adapter
->pdev
->irq
, netdev
);
4208 enable_irq(adapter
->pdev
->irq
);
4213 * e1000_io_error_detected - called when PCI error is detected
4214 * @pdev: Pointer to PCI device
4215 * @state: The current pci connection state
4217 * This function is called after a PCI bus error affecting
4218 * this device has been detected.
4220 static pci_ers_result_t
e1000_io_error_detected(struct pci_dev
*pdev
,
4221 pci_channel_state_t state
)
4223 struct net_device
*netdev
= pci_get_drvdata(pdev
);
4224 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
4226 netif_device_detach(netdev
);
4228 if (netif_running(netdev
))
4229 e1000e_down(adapter
);
4230 pci_disable_device(pdev
);
4232 /* Request a slot slot reset. */
4233 return PCI_ERS_RESULT_NEED_RESET
;
4237 * e1000_io_slot_reset - called after the pci bus has been reset.
4238 * @pdev: Pointer to PCI device
4240 * Restart the card from scratch, as if from a cold-boot. Implementation
4241 * resembles the first-half of the e1000_resume routine.
4243 static pci_ers_result_t
e1000_io_slot_reset(struct pci_dev
*pdev
)
4245 struct net_device
*netdev
= pci_get_drvdata(pdev
);
4246 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
4247 struct e1000_hw
*hw
= &adapter
->hw
;
4250 e1000e_disable_l1aspm(pdev
);
4251 err
= pci_enable_device_mem(pdev
);
4254 "Cannot re-enable PCI device after reset.\n");
4255 return PCI_ERS_RESULT_DISCONNECT
;
4257 pci_set_master(pdev
);
4258 pci_restore_state(pdev
);
4260 pci_enable_wake(pdev
, PCI_D3hot
, 0);
4261 pci_enable_wake(pdev
, PCI_D3cold
, 0);
4263 e1000e_reset(adapter
);
4266 return PCI_ERS_RESULT_RECOVERED
;
4270 * e1000_io_resume - called when traffic can start flowing again.
4271 * @pdev: Pointer to PCI device
4273 * This callback is called when the error recovery driver tells us that
4274 * its OK to resume normal operation. Implementation resembles the
4275 * second-half of the e1000_resume routine.
4277 static void e1000_io_resume(struct pci_dev
*pdev
)
4279 struct net_device
*netdev
= pci_get_drvdata(pdev
);
4280 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
4282 e1000_init_manageability(adapter
);
4284 if (netif_running(netdev
)) {
4285 if (e1000e_up(adapter
)) {
4287 "can't bring device back up after reset\n");
4292 netif_device_attach(netdev
);
4295 * If the controller has AMT, do not set DRV_LOAD until the interface
4296 * is up. For all other cases, let the f/w know that the h/w is now
4297 * under the control of the driver.
4299 if (!(adapter
->flags
& FLAG_HAS_AMT
))
4300 e1000_get_hw_control(adapter
);
4304 static void e1000_print_device_info(struct e1000_adapter
*adapter
)
4306 struct e1000_hw
*hw
= &adapter
->hw
;
4307 struct net_device
*netdev
= adapter
->netdev
;
4310 /* print bus type/speed/width info */
4311 e_info("(PCI Express:2.5GB/s:%s) %02x:%02x:%02x:%02x:%02x:%02x\n",
4313 ((hw
->bus
.width
== e1000_bus_width_pcie_x4
) ? "Width x4" :
4316 netdev
->dev_addr
[0], netdev
->dev_addr
[1],
4317 netdev
->dev_addr
[2], netdev
->dev_addr
[3],
4318 netdev
->dev_addr
[4], netdev
->dev_addr
[5]);
4319 e_info("Intel(R) PRO/%s Network Connection\n",
4320 (hw
->phy
.type
== e1000_phy_ife
) ? "10/100" : "1000");
4321 e1000e_read_pba_num(hw
, &pba_num
);
4322 e_info("MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
4323 hw
->mac
.type
, hw
->phy
.type
, (pba_num
>> 8), (pba_num
& 0xff));
4326 static void e1000_eeprom_checks(struct e1000_adapter
*adapter
)
4328 struct e1000_hw
*hw
= &adapter
->hw
;
4332 if (hw
->mac
.type
!= e1000_82573
)
4335 ret_val
= e1000_read_nvm(hw
, NVM_INIT_CONTROL2_REG
, 1, &buf
);
4336 if (!(le16_to_cpu(buf
) & (1 << 0))) {
4337 /* Deep Smart Power Down (DSPD) */
4338 e_warn("Warning: detected DSPD enabled in EEPROM\n");
4341 ret_val
= e1000_read_nvm(hw
, NVM_INIT_3GIO_3
, 1, &buf
);
4342 if (le16_to_cpu(buf
) & (3 << 2)) {
4344 e_warn("Warning: detected ASPM enabled in EEPROM\n");
4349 * e1000_probe - Device Initialization Routine
4350 * @pdev: PCI device information struct
4351 * @ent: entry in e1000_pci_tbl
4353 * Returns 0 on success, negative on failure
4355 * e1000_probe initializes an adapter identified by a pci_dev structure.
4356 * The OS initialization, configuring of the adapter private structure,
4357 * and a hardware reset occur.
4359 static int __devinit
e1000_probe(struct pci_dev
*pdev
,
4360 const struct pci_device_id
*ent
)
4362 struct net_device
*netdev
;
4363 struct e1000_adapter
*adapter
;
4364 struct e1000_hw
*hw
;
4365 const struct e1000_info
*ei
= e1000_info_tbl
[ent
->driver_data
];
4366 resource_size_t mmio_start
, mmio_len
;
4367 resource_size_t flash_start
, flash_len
;
4369 static int cards_found
;
4370 int i
, err
, pci_using_dac
;
4371 u16 eeprom_data
= 0;
4372 u16 eeprom_apme_mask
= E1000_EEPROM_APME
;
4374 e1000e_disable_l1aspm(pdev
);
4376 err
= pci_enable_device_mem(pdev
);
4381 err
= pci_set_dma_mask(pdev
, DMA_64BIT_MASK
);
4383 err
= pci_set_consistent_dma_mask(pdev
, DMA_64BIT_MASK
);
4387 err
= pci_set_dma_mask(pdev
, DMA_32BIT_MASK
);
4389 err
= pci_set_consistent_dma_mask(pdev
,
4392 dev_err(&pdev
->dev
, "No usable DMA "
4393 "configuration, aborting\n");
4399 err
= pci_request_selected_regions(pdev
,
4400 pci_select_bars(pdev
, IORESOURCE_MEM
),
4401 e1000e_driver_name
);
4405 pci_set_master(pdev
);
4406 pci_save_state(pdev
);
4409 netdev
= alloc_etherdev(sizeof(struct e1000_adapter
));
4411 goto err_alloc_etherdev
;
4413 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
4415 pci_set_drvdata(pdev
, netdev
);
4416 adapter
= netdev_priv(netdev
);
4418 adapter
->netdev
= netdev
;
4419 adapter
->pdev
= pdev
;
4421 adapter
->pba
= ei
->pba
;
4422 adapter
->flags
= ei
->flags
;
4423 adapter
->hw
.adapter
= adapter
;
4424 adapter
->hw
.mac
.type
= ei
->mac
;
4425 adapter
->msg_enable
= (1 << NETIF_MSG_DRV
| NETIF_MSG_PROBE
) - 1;
4427 mmio_start
= pci_resource_start(pdev
, 0);
4428 mmio_len
= pci_resource_len(pdev
, 0);
4431 adapter
->hw
.hw_addr
= ioremap(mmio_start
, mmio_len
);
4432 if (!adapter
->hw
.hw_addr
)
4435 if ((adapter
->flags
& FLAG_HAS_FLASH
) &&
4436 (pci_resource_flags(pdev
, 1) & IORESOURCE_MEM
)) {
4437 flash_start
= pci_resource_start(pdev
, 1);
4438 flash_len
= pci_resource_len(pdev
, 1);
4439 adapter
->hw
.flash_address
= ioremap(flash_start
, flash_len
);
4440 if (!adapter
->hw
.flash_address
)
4444 /* construct the net_device struct */
4445 netdev
->open
= &e1000_open
;
4446 netdev
->stop
= &e1000_close
;
4447 netdev
->hard_start_xmit
= &e1000_xmit_frame
;
4448 netdev
->get_stats
= &e1000_get_stats
;
4449 netdev
->set_multicast_list
= &e1000_set_multi
;
4450 netdev
->set_mac_address
= &e1000_set_mac
;
4451 netdev
->change_mtu
= &e1000_change_mtu
;
4452 netdev
->do_ioctl
= &e1000_ioctl
;
4453 e1000e_set_ethtool_ops(netdev
);
4454 netdev
->tx_timeout
= &e1000_tx_timeout
;
4455 netdev
->watchdog_timeo
= 5 * HZ
;
4456 netif_napi_add(netdev
, &adapter
->napi
, e1000_clean
, 64);
4457 netdev
->vlan_rx_register
= e1000_vlan_rx_register
;
4458 netdev
->vlan_rx_add_vid
= e1000_vlan_rx_add_vid
;
4459 netdev
->vlan_rx_kill_vid
= e1000_vlan_rx_kill_vid
;
4460 #ifdef CONFIG_NET_POLL_CONTROLLER
4461 netdev
->poll_controller
= e1000_netpoll
;
4463 strncpy(netdev
->name
, pci_name(pdev
), sizeof(netdev
->name
) - 1);
4465 netdev
->mem_start
= mmio_start
;
4466 netdev
->mem_end
= mmio_start
+ mmio_len
;
4468 adapter
->bd_number
= cards_found
++;
4470 /* setup adapter struct */
4471 err
= e1000_sw_init(adapter
);
4477 memcpy(&hw
->mac
.ops
, ei
->mac_ops
, sizeof(hw
->mac
.ops
));
4478 memcpy(&hw
->nvm
.ops
, ei
->nvm_ops
, sizeof(hw
->nvm
.ops
));
4479 memcpy(&hw
->phy
.ops
, ei
->phy_ops
, sizeof(hw
->phy
.ops
));
4481 err
= ei
->get_variants(adapter
);
4485 hw
->mac
.ops
.get_bus_info(&adapter
->hw
);
4487 adapter
->hw
.phy
.autoneg_wait_to_complete
= 0;
4489 /* Copper options */
4490 if (adapter
->hw
.phy
.media_type
== e1000_media_type_copper
) {
4491 adapter
->hw
.phy
.mdix
= AUTO_ALL_MODES
;
4492 adapter
->hw
.phy
.disable_polarity_correction
= 0;
4493 adapter
->hw
.phy
.ms_type
= e1000_ms_hw_default
;
4496 if (e1000_check_reset_block(&adapter
->hw
))
4497 e_info("PHY reset is blocked due to SOL/IDER session.\n");
4499 netdev
->features
= NETIF_F_SG
|
4501 NETIF_F_HW_VLAN_TX
|
4504 if (adapter
->flags
& FLAG_HAS_HW_VLAN_FILTER
)
4505 netdev
->features
|= NETIF_F_HW_VLAN_FILTER
;
4507 netdev
->features
|= NETIF_F_TSO
;
4508 netdev
->features
|= NETIF_F_TSO6
;
4510 netdev
->vlan_features
|= NETIF_F_TSO
;
4511 netdev
->vlan_features
|= NETIF_F_TSO6
;
4512 netdev
->vlan_features
|= NETIF_F_HW_CSUM
;
4513 netdev
->vlan_features
|= NETIF_F_SG
;
4516 netdev
->features
|= NETIF_F_HIGHDMA
;
4519 * We should not be using LLTX anymore, but we are still Tx faster with
4522 netdev
->features
|= NETIF_F_LLTX
;
4524 if (e1000e_enable_mng_pass_thru(&adapter
->hw
))
4525 adapter
->flags
|= FLAG_MNG_PT_ENABLED
;
4528 * before reading the NVM, reset the controller to
4529 * put the device in a known good starting state
4531 adapter
->hw
.mac
.ops
.reset_hw(&adapter
->hw
);
4534 * systems with ASPM and others may see the checksum fail on the first
4535 * attempt. Let's give it a few tries
4538 if (e1000_validate_nvm_checksum(&adapter
->hw
) >= 0)
4541 e_err("The NVM Checksum Is Not Valid\n");
4547 e1000_eeprom_checks(adapter
);
4549 /* copy the MAC address out of the NVM */
4550 if (e1000e_read_mac_addr(&adapter
->hw
))
4551 e_err("NVM Read Error while reading MAC address\n");
4553 memcpy(netdev
->dev_addr
, adapter
->hw
.mac
.addr
, netdev
->addr_len
);
4554 memcpy(netdev
->perm_addr
, adapter
->hw
.mac
.addr
, netdev
->addr_len
);
4556 if (!is_valid_ether_addr(netdev
->perm_addr
)) {
4557 e_err("Invalid MAC Address: %02x:%02x:%02x:%02x:%02x:%02x\n",
4558 netdev
->perm_addr
[0], netdev
->perm_addr
[1],
4559 netdev
->perm_addr
[2], netdev
->perm_addr
[3],
4560 netdev
->perm_addr
[4], netdev
->perm_addr
[5]);
4565 init_timer(&adapter
->watchdog_timer
);
4566 adapter
->watchdog_timer
.function
= &e1000_watchdog
;
4567 adapter
->watchdog_timer
.data
= (unsigned long) adapter
;
4569 init_timer(&adapter
->phy_info_timer
);
4570 adapter
->phy_info_timer
.function
= &e1000_update_phy_info
;
4571 adapter
->phy_info_timer
.data
= (unsigned long) adapter
;
4573 INIT_WORK(&adapter
->reset_task
, e1000_reset_task
);
4574 INIT_WORK(&adapter
->watchdog_task
, e1000_watchdog_task
);
4576 e1000e_check_options(adapter
);
4578 /* Initialize link parameters. User can change them with ethtool */
4579 adapter
->hw
.mac
.autoneg
= 1;
4580 adapter
->fc_autoneg
= 1;
4581 adapter
->hw
.fc
.original_type
= e1000_fc_default
;
4582 adapter
->hw
.fc
.type
= e1000_fc_default
;
4583 adapter
->hw
.phy
.autoneg_advertised
= 0x2f;
4585 /* ring size defaults */
4586 adapter
->rx_ring
->count
= 256;
4587 adapter
->tx_ring
->count
= 256;
4590 * Initial Wake on LAN setting - If APM wake is enabled in
4591 * the EEPROM, enable the ACPI Magic Packet filter
4593 if (adapter
->flags
& FLAG_APME_IN_WUC
) {
4594 /* APME bit in EEPROM is mapped to WUC.APME */
4595 eeprom_data
= er32(WUC
);
4596 eeprom_apme_mask
= E1000_WUC_APME
;
4597 } else if (adapter
->flags
& FLAG_APME_IN_CTRL3
) {
4598 if (adapter
->flags
& FLAG_APME_CHECK_PORT_B
&&
4599 (adapter
->hw
.bus
.func
== 1))
4600 e1000_read_nvm(&adapter
->hw
,
4601 NVM_INIT_CONTROL3_PORT_B
, 1, &eeprom_data
);
4603 e1000_read_nvm(&adapter
->hw
,
4604 NVM_INIT_CONTROL3_PORT_A
, 1, &eeprom_data
);
4607 /* fetch WoL from EEPROM */
4608 if (eeprom_data
& eeprom_apme_mask
)
4609 adapter
->eeprom_wol
|= E1000_WUFC_MAG
;
4612 * now that we have the eeprom settings, apply the special cases
4613 * where the eeprom may be wrong or the board simply won't support
4614 * wake on lan on a particular port
4616 if (!(adapter
->flags
& FLAG_HAS_WOL
))
4617 adapter
->eeprom_wol
= 0;
4619 /* initialize the wol settings based on the eeprom settings */
4620 adapter
->wol
= adapter
->eeprom_wol
;
4622 /* reset the hardware with the new settings */
4623 e1000e_reset(adapter
);
4626 * If the controller has AMT, do not set DRV_LOAD until the interface
4627 * is up. For all other cases, let the f/w know that the h/w is now
4628 * under the control of the driver.
4630 if (!(adapter
->flags
& FLAG_HAS_AMT
))
4631 e1000_get_hw_control(adapter
);
4633 /* tell the stack to leave us alone until e1000_open() is called */
4634 netif_carrier_off(netdev
);
4635 netif_tx_stop_all_queues(netdev
);
4637 strcpy(netdev
->name
, "eth%d");
4638 err
= register_netdev(netdev
);
4642 e1000_print_device_info(adapter
);
4647 if (!(adapter
->flags
& FLAG_HAS_AMT
))
4648 e1000_release_hw_control(adapter
);
4650 if (!e1000_check_reset_block(&adapter
->hw
))
4651 e1000_phy_hw_reset(&adapter
->hw
);
4654 kfree(adapter
->tx_ring
);
4655 kfree(adapter
->rx_ring
);
4657 if (adapter
->hw
.flash_address
)
4658 iounmap(adapter
->hw
.flash_address
);
4660 iounmap(adapter
->hw
.hw_addr
);
4662 free_netdev(netdev
);
4664 pci_release_selected_regions(pdev
,
4665 pci_select_bars(pdev
, IORESOURCE_MEM
));
4668 pci_disable_device(pdev
);
4673 * e1000_remove - Device Removal Routine
4674 * @pdev: PCI device information struct
4676 * e1000_remove is called by the PCI subsystem to alert the driver
4677 * that it should release a PCI device. The could be caused by a
4678 * Hot-Plug event, or because the driver is going to be removed from
4681 static void __devexit
e1000_remove(struct pci_dev
*pdev
)
4683 struct net_device
*netdev
= pci_get_drvdata(pdev
);
4684 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
4687 * flush_scheduled work may reschedule our watchdog task, so
4688 * explicitly disable watchdog tasks from being rescheduled
4690 set_bit(__E1000_DOWN
, &adapter
->state
);
4691 del_timer_sync(&adapter
->watchdog_timer
);
4692 del_timer_sync(&adapter
->phy_info_timer
);
4694 flush_scheduled_work();
4697 * Release control of h/w to f/w. If f/w is AMT enabled, this
4698 * would have already happened in close and is redundant.
4700 e1000_release_hw_control(adapter
);
4702 unregister_netdev(netdev
);
4704 if (!e1000_check_reset_block(&adapter
->hw
))
4705 e1000_phy_hw_reset(&adapter
->hw
);
4707 kfree(adapter
->tx_ring
);
4708 kfree(adapter
->rx_ring
);
4710 iounmap(adapter
->hw
.hw_addr
);
4711 if (adapter
->hw
.flash_address
)
4712 iounmap(adapter
->hw
.flash_address
);
4713 pci_release_selected_regions(pdev
,
4714 pci_select_bars(pdev
, IORESOURCE_MEM
));
4716 free_netdev(netdev
);
4718 pci_disable_device(pdev
);
4721 /* PCI Error Recovery (ERS) */
4722 static struct pci_error_handlers e1000_err_handler
= {
4723 .error_detected
= e1000_io_error_detected
,
4724 .slot_reset
= e1000_io_slot_reset
,
4725 .resume
= e1000_io_resume
,
4728 static struct pci_device_id e1000_pci_tbl
[] = {
4729 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82571EB_COPPER
), board_82571
},
4730 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82571EB_FIBER
), board_82571
},
4731 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82571EB_QUAD_COPPER
), board_82571
},
4732 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82571EB_QUAD_COPPER_LP
), board_82571
},
4733 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82571EB_QUAD_FIBER
), board_82571
},
4734 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82571EB_SERDES
), board_82571
},
4735 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82571EB_SERDES_DUAL
), board_82571
},
4736 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82571EB_SERDES_QUAD
), board_82571
},
4737 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82571PT_QUAD_COPPER
), board_82571
},
4739 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82572EI
), board_82572
},
4740 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82572EI_COPPER
), board_82572
},
4741 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82572EI_FIBER
), board_82572
},
4742 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82572EI_SERDES
), board_82572
},
4744 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82573E
), board_82573
},
4745 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82573E_IAMT
), board_82573
},
4746 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82573L
), board_82573
},
4748 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_80003ES2LAN_COPPER_DPT
),
4749 board_80003es2lan
},
4750 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_80003ES2LAN_COPPER_SPT
),
4751 board_80003es2lan
},
4752 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_80003ES2LAN_SERDES_DPT
),
4753 board_80003es2lan
},
4754 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_80003ES2LAN_SERDES_SPT
),
4755 board_80003es2lan
},
4757 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_ICH8_IFE
), board_ich8lan
},
4758 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_ICH8_IFE_G
), board_ich8lan
},
4759 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_ICH8_IFE_GT
), board_ich8lan
},
4760 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_ICH8_IGP_AMT
), board_ich8lan
},
4761 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_ICH8_IGP_C
), board_ich8lan
},
4762 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_ICH8_IGP_M
), board_ich8lan
},
4763 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_ICH8_IGP_M_AMT
), board_ich8lan
},
4765 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_ICH9_IFE
), board_ich9lan
},
4766 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_ICH9_IFE_G
), board_ich9lan
},
4767 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_ICH9_IFE_GT
), board_ich9lan
},
4768 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_ICH9_IGP_AMT
), board_ich9lan
},
4769 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_ICH9_IGP_C
), board_ich9lan
},
4770 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_ICH9_BM
), board_ich9lan
},
4771 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_ICH9_IGP_M
), board_ich9lan
},
4772 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_ICH9_IGP_M_AMT
), board_ich9lan
},
4773 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_ICH9_IGP_M_V
), board_ich9lan
},
4775 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_ICH10_R_BM_LM
), board_ich9lan
},
4776 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_ICH10_R_BM_LF
), board_ich9lan
},
4777 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_ICH10_R_BM_V
), board_ich9lan
},
4779 { } /* terminate list */
4781 MODULE_DEVICE_TABLE(pci
, e1000_pci_tbl
);
4783 /* PCI Device API Driver */
4784 static struct pci_driver e1000_driver
= {
4785 .name
= e1000e_driver_name
,
4786 .id_table
= e1000_pci_tbl
,
4787 .probe
= e1000_probe
,
4788 .remove
= __devexit_p(e1000_remove
),
4790 /* Power Management Hooks */
4791 .suspend
= e1000_suspend
,
4792 .resume
= e1000_resume
,
4794 .shutdown
= e1000_shutdown
,
4795 .err_handler
= &e1000_err_handler
4799 * e1000_init_module - Driver Registration Routine
4801 * e1000_init_module is the first routine called when the driver is
4802 * loaded. All it does is register with the PCI subsystem.
4804 static int __init
e1000_init_module(void)
4807 printk(KERN_INFO
"%s: Intel(R) PRO/1000 Network Driver - %s\n",
4808 e1000e_driver_name
, e1000e_driver_version
);
4809 printk(KERN_INFO
"%s: Copyright (c) 1999-2008 Intel Corporation.\n",
4810 e1000e_driver_name
);
4811 ret
= pci_register_driver(&e1000_driver
);
4812 pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY
, e1000e_driver_name
,
4813 PM_QOS_DEFAULT_VALUE
);
4817 module_init(e1000_init_module
);
4820 * e1000_exit_module - Driver Exit Cleanup Routine
4822 * e1000_exit_module is called just before the driver is removed
4825 static void __exit
e1000_exit_module(void)
4827 pci_unregister_driver(&e1000_driver
);
4828 pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY
, e1000e_driver_name
);
4830 module_exit(e1000_exit_module
);
4833 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
4834 MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
4835 MODULE_LICENSE("GPL");
4836 MODULE_VERSION(DRV_VERSION
);