Revert "e1000: fix shared interrupt warning message"
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / net / e1000 / e1000_main.c
blob98215fdd7d10bc23a6b50c59c720d901a65f0636
1 /*******************************************************************************
3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2006 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *******************************************************************************/
29 #include "e1000.h"
30 #include <net/ip6_checksum.h>
32 char e1000_driver_name[] = "e1000";
33 static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
34 #ifndef CONFIG_E1000_NAPI
35 #define DRIVERNAPI
36 #else
37 #define DRIVERNAPI "-NAPI"
38 #endif
39 #define DRV_VERSION "7.3.20-k2"DRIVERNAPI
40 char e1000_driver_version[] = DRV_VERSION;
41 static char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
43 /* e1000_pci_tbl - PCI Device ID Table
45 * Last entry must be all 0s
47 * Macro expands to...
48 * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
50 static struct pci_device_id e1000_pci_tbl[] = {
51 INTEL_E1000_ETHERNET_DEVICE(0x1000),
52 INTEL_E1000_ETHERNET_DEVICE(0x1001),
53 INTEL_E1000_ETHERNET_DEVICE(0x1004),
54 INTEL_E1000_ETHERNET_DEVICE(0x1008),
55 INTEL_E1000_ETHERNET_DEVICE(0x1009),
56 INTEL_E1000_ETHERNET_DEVICE(0x100C),
57 INTEL_E1000_ETHERNET_DEVICE(0x100D),
58 INTEL_E1000_ETHERNET_DEVICE(0x100E),
59 INTEL_E1000_ETHERNET_DEVICE(0x100F),
60 INTEL_E1000_ETHERNET_DEVICE(0x1010),
61 INTEL_E1000_ETHERNET_DEVICE(0x1011),
62 INTEL_E1000_ETHERNET_DEVICE(0x1012),
63 INTEL_E1000_ETHERNET_DEVICE(0x1013),
64 INTEL_E1000_ETHERNET_DEVICE(0x1014),
65 INTEL_E1000_ETHERNET_DEVICE(0x1015),
66 INTEL_E1000_ETHERNET_DEVICE(0x1016),
67 INTEL_E1000_ETHERNET_DEVICE(0x1017),
68 INTEL_E1000_ETHERNET_DEVICE(0x1018),
69 INTEL_E1000_ETHERNET_DEVICE(0x1019),
70 INTEL_E1000_ETHERNET_DEVICE(0x101A),
71 INTEL_E1000_ETHERNET_DEVICE(0x101D),
72 INTEL_E1000_ETHERNET_DEVICE(0x101E),
73 INTEL_E1000_ETHERNET_DEVICE(0x1026),
74 INTEL_E1000_ETHERNET_DEVICE(0x1027),
75 INTEL_E1000_ETHERNET_DEVICE(0x1028),
76 INTEL_E1000_ETHERNET_DEVICE(0x1049),
77 INTEL_E1000_ETHERNET_DEVICE(0x104A),
78 INTEL_E1000_ETHERNET_DEVICE(0x104B),
79 INTEL_E1000_ETHERNET_DEVICE(0x104C),
80 INTEL_E1000_ETHERNET_DEVICE(0x104D),
81 INTEL_E1000_ETHERNET_DEVICE(0x105E),
82 INTEL_E1000_ETHERNET_DEVICE(0x105F),
83 INTEL_E1000_ETHERNET_DEVICE(0x1060),
84 INTEL_E1000_ETHERNET_DEVICE(0x1075),
85 INTEL_E1000_ETHERNET_DEVICE(0x1076),
86 INTEL_E1000_ETHERNET_DEVICE(0x1077),
87 INTEL_E1000_ETHERNET_DEVICE(0x1078),
88 INTEL_E1000_ETHERNET_DEVICE(0x1079),
89 INTEL_E1000_ETHERNET_DEVICE(0x107A),
90 INTEL_E1000_ETHERNET_DEVICE(0x107B),
91 INTEL_E1000_ETHERNET_DEVICE(0x107C),
92 INTEL_E1000_ETHERNET_DEVICE(0x107D),
93 INTEL_E1000_ETHERNET_DEVICE(0x107E),
94 INTEL_E1000_ETHERNET_DEVICE(0x107F),
95 INTEL_E1000_ETHERNET_DEVICE(0x108A),
96 INTEL_E1000_ETHERNET_DEVICE(0x108B),
97 INTEL_E1000_ETHERNET_DEVICE(0x108C),
98 INTEL_E1000_ETHERNET_DEVICE(0x1096),
99 INTEL_E1000_ETHERNET_DEVICE(0x1098),
100 INTEL_E1000_ETHERNET_DEVICE(0x1099),
101 INTEL_E1000_ETHERNET_DEVICE(0x109A),
102 INTEL_E1000_ETHERNET_DEVICE(0x10A4),
103 INTEL_E1000_ETHERNET_DEVICE(0x10B5),
104 INTEL_E1000_ETHERNET_DEVICE(0x10B9),
105 INTEL_E1000_ETHERNET_DEVICE(0x10BA),
106 INTEL_E1000_ETHERNET_DEVICE(0x10BB),
107 INTEL_E1000_ETHERNET_DEVICE(0x10BC),
108 INTEL_E1000_ETHERNET_DEVICE(0x10C4),
109 INTEL_E1000_ETHERNET_DEVICE(0x10C5),
110 /* required last entry */
111 {0,}
114 MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
116 int e1000_up(struct e1000_adapter *adapter);
117 void e1000_down(struct e1000_adapter *adapter);
118 void e1000_reinit_locked(struct e1000_adapter *adapter);
119 void e1000_reset(struct e1000_adapter *adapter);
120 int e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx);
121 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
122 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
123 void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
124 void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
125 static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
126 struct e1000_tx_ring *txdr);
127 static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
128 struct e1000_rx_ring *rxdr);
129 static void e1000_free_tx_resources(struct e1000_adapter *adapter,
130 struct e1000_tx_ring *tx_ring);
131 static void e1000_free_rx_resources(struct e1000_adapter *adapter,
132 struct e1000_rx_ring *rx_ring);
133 void e1000_update_stats(struct e1000_adapter *adapter);
135 static int e1000_init_module(void);
136 static void e1000_exit_module(void);
137 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
138 static void __devexit e1000_remove(struct pci_dev *pdev);
139 static int e1000_alloc_queues(struct e1000_adapter *adapter);
140 static int e1000_sw_init(struct e1000_adapter *adapter);
141 static int e1000_open(struct net_device *netdev);
142 static int e1000_close(struct net_device *netdev);
143 static void e1000_configure_tx(struct e1000_adapter *adapter);
144 static void e1000_configure_rx(struct e1000_adapter *adapter);
145 static void e1000_setup_rctl(struct e1000_adapter *adapter);
146 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
147 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
148 static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
149 struct e1000_tx_ring *tx_ring);
150 static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
151 struct e1000_rx_ring *rx_ring);
152 static void e1000_set_multi(struct net_device *netdev);
153 static void e1000_update_phy_info(unsigned long data);
154 static void e1000_watchdog(unsigned long data);
155 static void e1000_82547_tx_fifo_stall(unsigned long data);
156 static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
157 static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
158 static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
159 static int e1000_set_mac(struct net_device *netdev, void *p);
160 static irqreturn_t e1000_intr(int irq, void *data);
161 #ifdef CONFIG_PCI_MSI
162 static irqreturn_t e1000_intr_msi(int irq, void *data);
163 #endif
164 static boolean_t e1000_clean_tx_irq(struct e1000_adapter *adapter,
165 struct e1000_tx_ring *tx_ring);
166 #ifdef CONFIG_E1000_NAPI
167 static int e1000_clean(struct net_device *poll_dev, int *budget);
168 static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter,
169 struct e1000_rx_ring *rx_ring,
170 int *work_done, int work_to_do);
171 static boolean_t e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
172 struct e1000_rx_ring *rx_ring,
173 int *work_done, int work_to_do);
174 #else
175 static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter,
176 struct e1000_rx_ring *rx_ring);
177 static boolean_t e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
178 struct e1000_rx_ring *rx_ring);
179 #endif
180 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
181 struct e1000_rx_ring *rx_ring,
182 int cleaned_count);
183 static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
184 struct e1000_rx_ring *rx_ring,
185 int cleaned_count);
186 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
187 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
188 int cmd);
189 void e1000_set_ethtool_ops(struct net_device *netdev);
190 static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
191 static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
192 static void e1000_tx_timeout(struct net_device *dev);
193 static void e1000_reset_task(struct work_struct *work);
194 static void e1000_smartspeed(struct e1000_adapter *adapter);
195 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
196 struct sk_buff *skb);
198 static void e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp);
199 static void e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid);
200 static void e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid);
201 static void e1000_restore_vlan(struct e1000_adapter *adapter);
203 static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
204 #ifdef CONFIG_PM
205 static int e1000_resume(struct pci_dev *pdev);
206 #endif
207 static void e1000_shutdown(struct pci_dev *pdev);
209 #ifdef CONFIG_NET_POLL_CONTROLLER
210 /* for netdump / net console */
211 static void e1000_netpoll (struct net_device *netdev);
212 #endif
214 extern void e1000_check_options(struct e1000_adapter *adapter);
216 #define COPYBREAK_DEFAULT 256
217 static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT;
218 module_param(copybreak, uint, 0644);
219 MODULE_PARM_DESC(copybreak,
220 "Maximum size of packet that is copied to a new buffer on receive");
222 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
223 pci_channel_state_t state);
224 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
225 static void e1000_io_resume(struct pci_dev *pdev);
227 static struct pci_error_handlers e1000_err_handler = {
228 .error_detected = e1000_io_error_detected,
229 .slot_reset = e1000_io_slot_reset,
230 .resume = e1000_io_resume,
233 static struct pci_driver e1000_driver = {
234 .name = e1000_driver_name,
235 .id_table = e1000_pci_tbl,
236 .probe = e1000_probe,
237 .remove = __devexit_p(e1000_remove),
238 #ifdef CONFIG_PM
239 /* Power Managment Hooks */
240 .suspend = e1000_suspend,
241 .resume = e1000_resume,
242 #endif
243 .shutdown = e1000_shutdown,
244 .err_handler = &e1000_err_handler
247 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
248 MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
249 MODULE_LICENSE("GPL");
250 MODULE_VERSION(DRV_VERSION);
252 static int debug = NETIF_MSG_DRV | NETIF_MSG_PROBE;
253 module_param(debug, int, 0);
254 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
257 * e1000_init_module - Driver Registration Routine
259 * e1000_init_module is the first routine called when the driver is
260 * loaded. All it does is register with the PCI subsystem.
263 static int __init
264 e1000_init_module(void)
266 int ret;
267 printk(KERN_INFO "%s - version %s\n",
268 e1000_driver_string, e1000_driver_version);
270 printk(KERN_INFO "%s\n", e1000_copyright);
272 ret = pci_register_driver(&e1000_driver);
273 if (copybreak != COPYBREAK_DEFAULT) {
274 if (copybreak == 0)
275 printk(KERN_INFO "e1000: copybreak disabled\n");
276 else
277 printk(KERN_INFO "e1000: copybreak enabled for "
278 "packets <= %u bytes\n", copybreak);
280 return ret;
283 module_init(e1000_init_module);
286 * e1000_exit_module - Driver Exit Cleanup Routine
288 * e1000_exit_module is called just before the driver is removed
289 * from memory.
292 static void __exit
293 e1000_exit_module(void)
295 pci_unregister_driver(&e1000_driver);
298 module_exit(e1000_exit_module);
300 static int e1000_request_irq(struct e1000_adapter *adapter)
302 struct net_device *netdev = adapter->netdev;
303 int flags, err = 0;
305 flags = IRQF_SHARED;
306 #ifdef CONFIG_PCI_MSI
307 if (adapter->hw.mac_type >= e1000_82571) {
308 adapter->have_msi = TRUE;
309 if ((err = pci_enable_msi(adapter->pdev))) {
310 DPRINTK(PROBE, ERR,
311 "Unable to allocate MSI interrupt Error: %d\n", err);
312 adapter->have_msi = FALSE;
315 if (adapter->have_msi) {
316 flags &= ~IRQF_SHARED;
317 err = request_irq(adapter->pdev->irq, &e1000_intr_msi, flags,
318 netdev->name, netdev);
319 if (err)
320 DPRINTK(PROBE, ERR,
321 "Unable to allocate interrupt Error: %d\n", err);
322 } else
323 #endif
324 if ((err = request_irq(adapter->pdev->irq, &e1000_intr, flags,
325 netdev->name, netdev)))
326 DPRINTK(PROBE, ERR,
327 "Unable to allocate interrupt Error: %d\n", err);
329 return err;
332 static void e1000_free_irq(struct e1000_adapter *adapter)
334 struct net_device *netdev = adapter->netdev;
336 free_irq(adapter->pdev->irq, netdev);
338 #ifdef CONFIG_PCI_MSI
339 if (adapter->have_msi)
340 pci_disable_msi(adapter->pdev);
341 #endif
345 * e1000_irq_disable - Mask off interrupt generation on the NIC
346 * @adapter: board private structure
349 static void
350 e1000_irq_disable(struct e1000_adapter *adapter)
352 atomic_inc(&adapter->irq_sem);
353 E1000_WRITE_REG(&adapter->hw, IMC, ~0);
354 E1000_WRITE_FLUSH(&adapter->hw);
355 synchronize_irq(adapter->pdev->irq);
359 * e1000_irq_enable - Enable default interrupt generation settings
360 * @adapter: board private structure
363 static void
364 e1000_irq_enable(struct e1000_adapter *adapter)
366 if (likely(atomic_dec_and_test(&adapter->irq_sem))) {
367 E1000_WRITE_REG(&adapter->hw, IMS, IMS_ENABLE_MASK);
368 E1000_WRITE_FLUSH(&adapter->hw);
372 static void
373 e1000_update_mng_vlan(struct e1000_adapter *adapter)
375 struct net_device *netdev = adapter->netdev;
376 uint16_t vid = adapter->hw.mng_cookie.vlan_id;
377 uint16_t old_vid = adapter->mng_vlan_id;
378 if (adapter->vlgrp) {
379 if (!adapter->vlgrp->vlan_devices[vid]) {
380 if (adapter->hw.mng_cookie.status &
381 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
382 e1000_vlan_rx_add_vid(netdev, vid);
383 adapter->mng_vlan_id = vid;
384 } else
385 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
387 if ((old_vid != (uint16_t)E1000_MNG_VLAN_NONE) &&
388 (vid != old_vid) &&
389 !adapter->vlgrp->vlan_devices[old_vid])
390 e1000_vlan_rx_kill_vid(netdev, old_vid);
391 } else
392 adapter->mng_vlan_id = vid;
397 * e1000_release_hw_control - release control of the h/w to f/w
398 * @adapter: address of board private structure
400 * e1000_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
401 * For ASF and Pass Through versions of f/w this means that the
402 * driver is no longer loaded. For AMT version (only with 82573) i
403 * of the f/w this means that the network i/f is closed.
407 static void
408 e1000_release_hw_control(struct e1000_adapter *adapter)
410 uint32_t ctrl_ext;
411 uint32_t swsm;
412 uint32_t extcnf;
414 /* Let firmware taken over control of h/w */
415 switch (adapter->hw.mac_type) {
416 case e1000_82571:
417 case e1000_82572:
418 case e1000_80003es2lan:
419 ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
420 E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
421 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
422 break;
423 case e1000_82573:
424 swsm = E1000_READ_REG(&adapter->hw, SWSM);
425 E1000_WRITE_REG(&adapter->hw, SWSM,
426 swsm & ~E1000_SWSM_DRV_LOAD);
427 case e1000_ich8lan:
428 extcnf = E1000_READ_REG(&adapter->hw, CTRL_EXT);
429 E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
430 extcnf & ~E1000_CTRL_EXT_DRV_LOAD);
431 break;
432 default:
433 break;
438 * e1000_get_hw_control - get control of the h/w from f/w
439 * @adapter: address of board private structure
441 * e1000_get_hw_control sets {CTRL_EXT|FWSM}:DRV_LOAD bit.
442 * For ASF and Pass Through versions of f/w this means that
443 * the driver is loaded. For AMT version (only with 82573)
444 * of the f/w this means that the network i/f is open.
448 static void
449 e1000_get_hw_control(struct e1000_adapter *adapter)
451 uint32_t ctrl_ext;
452 uint32_t swsm;
453 uint32_t extcnf;
455 /* Let firmware know the driver has taken over */
456 switch (adapter->hw.mac_type) {
457 case e1000_82571:
458 case e1000_82572:
459 case e1000_80003es2lan:
460 ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
461 E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
462 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
463 break;
464 case e1000_82573:
465 swsm = E1000_READ_REG(&adapter->hw, SWSM);
466 E1000_WRITE_REG(&adapter->hw, SWSM,
467 swsm | E1000_SWSM_DRV_LOAD);
468 break;
469 case e1000_ich8lan:
470 extcnf = E1000_READ_REG(&adapter->hw, EXTCNF_CTRL);
471 E1000_WRITE_REG(&adapter->hw, EXTCNF_CTRL,
472 extcnf | E1000_EXTCNF_CTRL_SWFLAG);
473 break;
474 default:
475 break;
479 static void
480 e1000_init_manageability(struct e1000_adapter *adapter)
482 if (adapter->en_mng_pt) {
483 uint32_t manc = E1000_READ_REG(&adapter->hw, MANC);
485 /* disable hardware interception of ARP */
486 manc &= ~(E1000_MANC_ARP_EN);
488 /* enable receiving management packets to the host */
489 /* this will probably generate destination unreachable messages
490 * from the host OS, but the packets will be handled on SMBUS */
491 if (adapter->hw.has_manc2h) {
492 uint32_t manc2h = E1000_READ_REG(&adapter->hw, MANC2H);
494 manc |= E1000_MANC_EN_MNG2HOST;
495 #define E1000_MNG2HOST_PORT_623 (1 << 5)
496 #define E1000_MNG2HOST_PORT_664 (1 << 6)
497 manc2h |= E1000_MNG2HOST_PORT_623;
498 manc2h |= E1000_MNG2HOST_PORT_664;
499 E1000_WRITE_REG(&adapter->hw, MANC2H, manc2h);
502 E1000_WRITE_REG(&adapter->hw, MANC, manc);
506 static void
507 e1000_release_manageability(struct e1000_adapter *adapter)
509 if (adapter->en_mng_pt) {
510 uint32_t manc = E1000_READ_REG(&adapter->hw, MANC);
512 /* re-enable hardware interception of ARP */
513 manc |= E1000_MANC_ARP_EN;
515 if (adapter->hw.has_manc2h)
516 manc &= ~E1000_MANC_EN_MNG2HOST;
518 /* don't explicitly have to mess with MANC2H since
519 * MANC has an enable disable that gates MANC2H */
521 E1000_WRITE_REG(&adapter->hw, MANC, manc);
526 e1000_up(struct e1000_adapter *adapter)
528 struct net_device *netdev = adapter->netdev;
529 int i;
531 /* hardware has been reset, we need to reload some things */
533 e1000_set_multi(netdev);
535 e1000_restore_vlan(adapter);
536 e1000_init_manageability(adapter);
538 e1000_configure_tx(adapter);
539 e1000_setup_rctl(adapter);
540 e1000_configure_rx(adapter);
541 /* call E1000_DESC_UNUSED which always leaves
542 * at least 1 descriptor unused to make sure
543 * next_to_use != next_to_clean */
544 for (i = 0; i < adapter->num_rx_queues; i++) {
545 struct e1000_rx_ring *ring = &adapter->rx_ring[i];
546 adapter->alloc_rx_buf(adapter, ring,
547 E1000_DESC_UNUSED(ring));
550 adapter->tx_queue_len = netdev->tx_queue_len;
552 #ifdef CONFIG_E1000_NAPI
553 netif_poll_enable(netdev);
554 #endif
555 e1000_irq_enable(adapter);
557 clear_bit(__E1000_DOWN, &adapter->flags);
559 /* fire a link change interrupt to start the watchdog */
560 E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_LSC);
561 return 0;
565 * e1000_power_up_phy - restore link in case the phy was powered down
566 * @adapter: address of board private structure
568 * The phy may be powered down to save power and turn off link when the
569 * driver is unloaded and wake on lan is not enabled (among others)
570 * *** this routine MUST be followed by a call to e1000_reset ***
574 void e1000_power_up_phy(struct e1000_adapter *adapter)
576 uint16_t mii_reg = 0;
578 /* Just clear the power down bit to wake the phy back up */
579 if (adapter->hw.media_type == e1000_media_type_copper) {
580 /* according to the manual, the phy will retain its
581 * settings across a power-down/up cycle */
582 e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
583 mii_reg &= ~MII_CR_POWER_DOWN;
584 e1000_write_phy_reg(&adapter->hw, PHY_CTRL, mii_reg);
588 static void e1000_power_down_phy(struct e1000_adapter *adapter)
590 /* Power down the PHY so no link is implied when interface is down *
591 * The PHY cannot be powered down if any of the following is TRUE *
592 * (a) WoL is enabled
593 * (b) AMT is active
594 * (c) SoL/IDER session is active */
595 if (!adapter->wol && adapter->hw.mac_type >= e1000_82540 &&
596 adapter->hw.media_type == e1000_media_type_copper) {
597 uint16_t mii_reg = 0;
599 switch (adapter->hw.mac_type) {
600 case e1000_82540:
601 case e1000_82545:
602 case e1000_82545_rev_3:
603 case e1000_82546:
604 case e1000_82546_rev_3:
605 case e1000_82541:
606 case e1000_82541_rev_2:
607 case e1000_82547:
608 case e1000_82547_rev_2:
609 if (E1000_READ_REG(&adapter->hw, MANC) &
610 E1000_MANC_SMBUS_EN)
611 goto out;
612 break;
613 case e1000_82571:
614 case e1000_82572:
615 case e1000_82573:
616 case e1000_80003es2lan:
617 case e1000_ich8lan:
618 if (e1000_check_mng_mode(&adapter->hw) ||
619 e1000_check_phy_reset_block(&adapter->hw))
620 goto out;
621 break;
622 default:
623 goto out;
625 e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
626 mii_reg |= MII_CR_POWER_DOWN;
627 e1000_write_phy_reg(&adapter->hw, PHY_CTRL, mii_reg);
628 mdelay(1);
630 out:
631 return;
634 void
635 e1000_down(struct e1000_adapter *adapter)
637 struct net_device *netdev = adapter->netdev;
639 /* signal that we're down so the interrupt handler does not
640 * reschedule our watchdog timer */
641 set_bit(__E1000_DOWN, &adapter->flags);
643 e1000_irq_disable(adapter);
645 del_timer_sync(&adapter->tx_fifo_stall_timer);
646 del_timer_sync(&adapter->watchdog_timer);
647 del_timer_sync(&adapter->phy_info_timer);
649 #ifdef CONFIG_E1000_NAPI
650 netif_poll_disable(netdev);
651 #endif
652 netdev->tx_queue_len = adapter->tx_queue_len;
653 adapter->link_speed = 0;
654 adapter->link_duplex = 0;
655 netif_carrier_off(netdev);
656 netif_stop_queue(netdev);
658 e1000_reset(adapter);
659 e1000_clean_all_tx_rings(adapter);
660 e1000_clean_all_rx_rings(adapter);
663 void
664 e1000_reinit_locked(struct e1000_adapter *adapter)
666 WARN_ON(in_interrupt());
667 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
668 msleep(1);
669 e1000_down(adapter);
670 e1000_up(adapter);
671 clear_bit(__E1000_RESETTING, &adapter->flags);
674 void
675 e1000_reset(struct e1000_adapter *adapter)
677 uint32_t pba = 0, tx_space, min_tx_space, min_rx_space;
678 uint16_t fc_high_water_mark = E1000_FC_HIGH_DIFF;
679 boolean_t legacy_pba_adjust = FALSE;
681 /* Repartition Pba for greater than 9k mtu
682 * To take effect CTRL.RST is required.
685 switch (adapter->hw.mac_type) {
686 case e1000_82542_rev2_0:
687 case e1000_82542_rev2_1:
688 case e1000_82543:
689 case e1000_82544:
690 case e1000_82540:
691 case e1000_82541:
692 case e1000_82541_rev_2:
693 legacy_pba_adjust = TRUE;
694 pba = E1000_PBA_48K;
695 break;
696 case e1000_82545:
697 case e1000_82545_rev_3:
698 case e1000_82546:
699 case e1000_82546_rev_3:
700 pba = E1000_PBA_48K;
701 break;
702 case e1000_82547:
703 case e1000_82547_rev_2:
704 legacy_pba_adjust = TRUE;
705 pba = E1000_PBA_30K;
706 break;
707 case e1000_82571:
708 case e1000_82572:
709 case e1000_80003es2lan:
710 pba = E1000_PBA_38K;
711 break;
712 case e1000_82573:
713 pba = E1000_PBA_20K;
714 break;
715 case e1000_ich8lan:
716 pba = E1000_PBA_8K;
717 case e1000_undefined:
718 case e1000_num_macs:
719 break;
722 if (legacy_pba_adjust == TRUE) {
723 if (adapter->netdev->mtu > E1000_RXBUFFER_8192)
724 pba -= 8; /* allocate more FIFO for Tx */
726 if (adapter->hw.mac_type == e1000_82547) {
727 adapter->tx_fifo_head = 0;
728 adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
729 adapter->tx_fifo_size =
730 (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
731 atomic_set(&adapter->tx_fifo_stall, 0);
733 } else if (adapter->hw.max_frame_size > MAXIMUM_ETHERNET_FRAME_SIZE) {
734 /* adjust PBA for jumbo frames */
735 E1000_WRITE_REG(&adapter->hw, PBA, pba);
737 /* To maintain wire speed transmits, the Tx FIFO should be
738 * large enough to accomodate two full transmit packets,
739 * rounded up to the next 1KB and expressed in KB. Likewise,
740 * the Rx FIFO should be large enough to accomodate at least
741 * one full receive packet and is similarly rounded up and
742 * expressed in KB. */
743 pba = E1000_READ_REG(&adapter->hw, PBA);
744 /* upper 16 bits has Tx packet buffer allocation size in KB */
745 tx_space = pba >> 16;
746 /* lower 16 bits has Rx packet buffer allocation size in KB */
747 pba &= 0xffff;
748 /* don't include ethernet FCS because hardware appends/strips */
749 min_rx_space = adapter->netdev->mtu + ENET_HEADER_SIZE +
750 VLAN_TAG_SIZE;
751 min_tx_space = min_rx_space;
752 min_tx_space *= 2;
753 E1000_ROUNDUP(min_tx_space, 1024);
754 min_tx_space >>= 10;
755 E1000_ROUNDUP(min_rx_space, 1024);
756 min_rx_space >>= 10;
758 /* If current Tx allocation is less than the min Tx FIFO size,
759 * and the min Tx FIFO size is less than the current Rx FIFO
760 * allocation, take space away from current Rx allocation */
761 if (tx_space < min_tx_space &&
762 ((min_tx_space - tx_space) < pba)) {
763 pba = pba - (min_tx_space - tx_space);
765 /* PCI/PCIx hardware has PBA alignment constraints */
766 switch (adapter->hw.mac_type) {
767 case e1000_82545 ... e1000_82546_rev_3:
768 pba &= ~(E1000_PBA_8K - 1);
769 break;
770 default:
771 break;
774 /* if short on rx space, rx wins and must trump tx
775 * adjustment or use Early Receive if available */
776 if (pba < min_rx_space) {
777 switch (adapter->hw.mac_type) {
778 case e1000_82573:
779 /* ERT enabled in e1000_configure_rx */
780 break;
781 default:
782 pba = min_rx_space;
783 break;
789 E1000_WRITE_REG(&adapter->hw, PBA, pba);
791 /* flow control settings */
792 /* Set the FC high water mark to 90% of the FIFO size.
793 * Required to clear last 3 LSB */
794 fc_high_water_mark = ((pba * 9216)/10) & 0xFFF8;
795 /* We can't use 90% on small FIFOs because the remainder
796 * would be less than 1 full frame. In this case, we size
797 * it to allow at least a full frame above the high water
798 * mark. */
799 if (pba < E1000_PBA_16K)
800 fc_high_water_mark = (pba * 1024) - 1600;
802 adapter->hw.fc_high_water = fc_high_water_mark;
803 adapter->hw.fc_low_water = fc_high_water_mark - 8;
804 if (adapter->hw.mac_type == e1000_80003es2lan)
805 adapter->hw.fc_pause_time = 0xFFFF;
806 else
807 adapter->hw.fc_pause_time = E1000_FC_PAUSE_TIME;
808 adapter->hw.fc_send_xon = 1;
809 adapter->hw.fc = adapter->hw.original_fc;
811 /* Allow time for pending master requests to run */
812 e1000_reset_hw(&adapter->hw);
813 if (adapter->hw.mac_type >= e1000_82544)
814 E1000_WRITE_REG(&adapter->hw, WUC, 0);
816 if (e1000_init_hw(&adapter->hw))
817 DPRINTK(PROBE, ERR, "Hardware Error\n");
818 e1000_update_mng_vlan(adapter);
820 /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
821 if (adapter->hw.mac_type >= e1000_82544 &&
822 adapter->hw.mac_type <= e1000_82547_rev_2 &&
823 adapter->hw.autoneg == 1 &&
824 adapter->hw.autoneg_advertised == ADVERTISE_1000_FULL) {
825 uint32_t ctrl = E1000_READ_REG(&adapter->hw, CTRL);
826 /* clear phy power management bit if we are in gig only mode,
827 * which if enabled will attempt negotiation to 100Mb, which
828 * can cause a loss of link at power off or driver unload */
829 ctrl &= ~E1000_CTRL_SWDPIN3;
830 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
833 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
834 E1000_WRITE_REG(&adapter->hw, VET, ETHERNET_IEEE_VLAN_TYPE);
836 e1000_reset_adaptive(&adapter->hw);
837 e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
839 if (!adapter->smart_power_down &&
840 (adapter->hw.mac_type == e1000_82571 ||
841 adapter->hw.mac_type == e1000_82572)) {
842 uint16_t phy_data = 0;
843 /* speed up time to link by disabling smart power down, ignore
844 * the return value of this function because there is nothing
845 * different we would do if it failed */
846 e1000_read_phy_reg(&adapter->hw, IGP02E1000_PHY_POWER_MGMT,
847 &phy_data);
848 phy_data &= ~IGP02E1000_PM_SPD;
849 e1000_write_phy_reg(&adapter->hw, IGP02E1000_PHY_POWER_MGMT,
850 phy_data);
853 e1000_release_manageability(adapter);
857 * e1000_probe - Device Initialization Routine
858 * @pdev: PCI device information struct
859 * @ent: entry in e1000_pci_tbl
861 * Returns 0 on success, negative on failure
863 * e1000_probe initializes an adapter identified by a pci_dev structure.
864 * The OS initialization, configuring of the adapter private structure,
865 * and a hardware reset occur.
868 static int __devinit
869 e1000_probe(struct pci_dev *pdev,
870 const struct pci_device_id *ent)
872 struct net_device *netdev;
873 struct e1000_adapter *adapter;
874 unsigned long mmio_start, mmio_len;
875 unsigned long flash_start, flash_len;
877 static int cards_found = 0;
878 static int global_quad_port_a = 0; /* global ksp3 port a indication */
879 int i, err, pci_using_dac;
880 uint16_t eeprom_data = 0;
881 uint16_t eeprom_apme_mask = E1000_EEPROM_APME;
882 if ((err = pci_enable_device(pdev)))
883 return err;
885 if (!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK)) &&
886 !(err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))) {
887 pci_using_dac = 1;
888 } else {
889 if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) &&
890 (err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))) {
891 E1000_ERR("No usable DMA configuration, aborting\n");
892 goto err_dma;
894 pci_using_dac = 0;
897 if ((err = pci_request_regions(pdev, e1000_driver_name)))
898 goto err_pci_reg;
900 pci_set_master(pdev);
902 err = -ENOMEM;
903 netdev = alloc_etherdev(sizeof(struct e1000_adapter));
904 if (!netdev)
905 goto err_alloc_etherdev;
907 SET_MODULE_OWNER(netdev);
908 SET_NETDEV_DEV(netdev, &pdev->dev);
910 pci_set_drvdata(pdev, netdev);
911 adapter = netdev_priv(netdev);
912 adapter->netdev = netdev;
913 adapter->pdev = pdev;
914 adapter->hw.back = adapter;
915 adapter->msg_enable = (1 << debug) - 1;
917 mmio_start = pci_resource_start(pdev, BAR_0);
918 mmio_len = pci_resource_len(pdev, BAR_0);
920 err = -EIO;
921 adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
922 if (!adapter->hw.hw_addr)
923 goto err_ioremap;
925 for (i = BAR_1; i <= BAR_5; i++) {
926 if (pci_resource_len(pdev, i) == 0)
927 continue;
928 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
929 adapter->hw.io_base = pci_resource_start(pdev, i);
930 break;
934 netdev->open = &e1000_open;
935 netdev->stop = &e1000_close;
936 netdev->hard_start_xmit = &e1000_xmit_frame;
937 netdev->get_stats = &e1000_get_stats;
938 netdev->set_multicast_list = &e1000_set_multi;
939 netdev->set_mac_address = &e1000_set_mac;
940 netdev->change_mtu = &e1000_change_mtu;
941 netdev->do_ioctl = &e1000_ioctl;
942 e1000_set_ethtool_ops(netdev);
943 netdev->tx_timeout = &e1000_tx_timeout;
944 netdev->watchdog_timeo = 5 * HZ;
945 #ifdef CONFIG_E1000_NAPI
946 netdev->poll = &e1000_clean;
947 netdev->weight = 64;
948 #endif
949 netdev->vlan_rx_register = e1000_vlan_rx_register;
950 netdev->vlan_rx_add_vid = e1000_vlan_rx_add_vid;
951 netdev->vlan_rx_kill_vid = e1000_vlan_rx_kill_vid;
952 #ifdef CONFIG_NET_POLL_CONTROLLER
953 netdev->poll_controller = e1000_netpoll;
954 #endif
955 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
957 netdev->mem_start = mmio_start;
958 netdev->mem_end = mmio_start + mmio_len;
959 netdev->base_addr = adapter->hw.io_base;
961 adapter->bd_number = cards_found;
963 /* setup the private structure */
965 if ((err = e1000_sw_init(adapter)))
966 goto err_sw_init;
968 err = -EIO;
969 /* Flash BAR mapping must happen after e1000_sw_init
970 * because it depends on mac_type */
971 if ((adapter->hw.mac_type == e1000_ich8lan) &&
972 (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
973 flash_start = pci_resource_start(pdev, 1);
974 flash_len = pci_resource_len(pdev, 1);
975 adapter->hw.flash_address = ioremap(flash_start, flash_len);
976 if (!adapter->hw.flash_address)
977 goto err_flashmap;
980 if (e1000_check_phy_reset_block(&adapter->hw))
981 DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n");
983 if (adapter->hw.mac_type >= e1000_82543) {
984 netdev->features = NETIF_F_SG |
985 NETIF_F_HW_CSUM |
986 NETIF_F_HW_VLAN_TX |
987 NETIF_F_HW_VLAN_RX |
988 NETIF_F_HW_VLAN_FILTER;
989 if (adapter->hw.mac_type == e1000_ich8lan)
990 netdev->features &= ~NETIF_F_HW_VLAN_FILTER;
993 if ((adapter->hw.mac_type >= e1000_82544) &&
994 (adapter->hw.mac_type != e1000_82547))
995 netdev->features |= NETIF_F_TSO;
997 if (adapter->hw.mac_type > e1000_82547_rev_2)
998 netdev->features |= NETIF_F_TSO6;
999 if (pci_using_dac)
1000 netdev->features |= NETIF_F_HIGHDMA;
1002 netdev->features |= NETIF_F_LLTX;
1004 adapter->en_mng_pt = e1000_enable_mng_pass_thru(&adapter->hw);
1006 /* initialize eeprom parameters */
1008 if (e1000_init_eeprom_params(&adapter->hw)) {
1009 E1000_ERR("EEPROM initialization failed\n");
1010 goto err_eeprom;
1013 /* before reading the EEPROM, reset the controller to
1014 * put the device in a known good starting state */
1016 e1000_reset_hw(&adapter->hw);
1018 /* make sure the EEPROM is good */
1020 if (e1000_validate_eeprom_checksum(&adapter->hw) < 0) {
1021 DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n");
1022 goto err_eeprom;
1025 /* copy the MAC address out of the EEPROM */
1027 if (e1000_read_mac_addr(&adapter->hw))
1028 DPRINTK(PROBE, ERR, "EEPROM Read Error\n");
1029 memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
1030 memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len);
1032 if (!is_valid_ether_addr(netdev->perm_addr)) {
1033 DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
1034 goto err_eeprom;
1037 e1000_get_bus_info(&adapter->hw);
1039 init_timer(&adapter->tx_fifo_stall_timer);
1040 adapter->tx_fifo_stall_timer.function = &e1000_82547_tx_fifo_stall;
1041 adapter->tx_fifo_stall_timer.data = (unsigned long) adapter;
1043 init_timer(&adapter->watchdog_timer);
1044 adapter->watchdog_timer.function = &e1000_watchdog;
1045 adapter->watchdog_timer.data = (unsigned long) adapter;
1047 init_timer(&adapter->phy_info_timer);
1048 adapter->phy_info_timer.function = &e1000_update_phy_info;
1049 adapter->phy_info_timer.data = (unsigned long) adapter;
1051 INIT_WORK(&adapter->reset_task, e1000_reset_task);
1053 e1000_check_options(adapter);
1055 /* Initial Wake on LAN setting
1056 * If APM wake is enabled in the EEPROM,
1057 * enable the ACPI Magic Packet filter
1060 switch (adapter->hw.mac_type) {
1061 case e1000_82542_rev2_0:
1062 case e1000_82542_rev2_1:
1063 case e1000_82543:
1064 break;
1065 case e1000_82544:
1066 e1000_read_eeprom(&adapter->hw,
1067 EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
1068 eeprom_apme_mask = E1000_EEPROM_82544_APM;
1069 break;
1070 case e1000_ich8lan:
1071 e1000_read_eeprom(&adapter->hw,
1072 EEPROM_INIT_CONTROL1_REG, 1, &eeprom_data);
1073 eeprom_apme_mask = E1000_EEPROM_ICH8_APME;
1074 break;
1075 case e1000_82546:
1076 case e1000_82546_rev_3:
1077 case e1000_82571:
1078 case e1000_80003es2lan:
1079 if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1){
1080 e1000_read_eeprom(&adapter->hw,
1081 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
1082 break;
1084 /* Fall Through */
1085 default:
1086 e1000_read_eeprom(&adapter->hw,
1087 EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
1088 break;
1090 if (eeprom_data & eeprom_apme_mask)
1091 adapter->eeprom_wol |= E1000_WUFC_MAG;
1093 /* now that we have the eeprom settings, apply the special cases
1094 * where the eeprom may be wrong or the board simply won't support
1095 * wake on lan on a particular port */
1096 switch (pdev->device) {
1097 case E1000_DEV_ID_82546GB_PCIE:
1098 adapter->eeprom_wol = 0;
1099 break;
1100 case E1000_DEV_ID_82546EB_FIBER:
1101 case E1000_DEV_ID_82546GB_FIBER:
1102 case E1000_DEV_ID_82571EB_FIBER:
1103 /* Wake events only supported on port A for dual fiber
1104 * regardless of eeprom setting */
1105 if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1)
1106 adapter->eeprom_wol = 0;
1107 break;
1108 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
1109 case E1000_DEV_ID_82571EB_QUAD_COPPER:
1110 case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE:
1111 /* if quad port adapter, disable WoL on all but port A */
1112 if (global_quad_port_a != 0)
1113 adapter->eeprom_wol = 0;
1114 else
1115 adapter->quad_port_a = 1;
1116 /* Reset for multiple quad port adapters */
1117 if (++global_quad_port_a == 4)
1118 global_quad_port_a = 0;
1119 break;
1122 /* initialize the wol settings based on the eeprom settings */
1123 adapter->wol = adapter->eeprom_wol;
1125 /* print bus type/speed/width info */
1127 struct e1000_hw *hw = &adapter->hw;
1128 DPRINTK(PROBE, INFO, "(PCI%s:%s:%s) ",
1129 ((hw->bus_type == e1000_bus_type_pcix) ? "-X" :
1130 (hw->bus_type == e1000_bus_type_pci_express ? " Express":"")),
1131 ((hw->bus_speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
1132 (hw->bus_speed == e1000_bus_speed_133) ? "133MHz" :
1133 (hw->bus_speed == e1000_bus_speed_120) ? "120MHz" :
1134 (hw->bus_speed == e1000_bus_speed_100) ? "100MHz" :
1135 (hw->bus_speed == e1000_bus_speed_66) ? "66MHz" : "33MHz"),
1136 ((hw->bus_width == e1000_bus_width_64) ? "64-bit" :
1137 (hw->bus_width == e1000_bus_width_pciex_4) ? "Width x4" :
1138 (hw->bus_width == e1000_bus_width_pciex_1) ? "Width x1" :
1139 "32-bit"));
1142 for (i = 0; i < 6; i++)
1143 printk("%2.2x%c", netdev->dev_addr[i], i == 5 ? '\n' : ':');
1145 /* reset the hardware with the new settings */
1146 e1000_reset(adapter);
1148 /* If the controller is 82573 and f/w is AMT, do not set
1149 * DRV_LOAD until the interface is up. For all other cases,
1150 * let the f/w know that the h/w is now under the control
1151 * of the driver. */
1152 if (adapter->hw.mac_type != e1000_82573 ||
1153 !e1000_check_mng_mode(&adapter->hw))
1154 e1000_get_hw_control(adapter);
1156 strcpy(netdev->name, "eth%d");
1157 if ((err = register_netdev(netdev)))
1158 goto err_register;
1160 /* tell the stack to leave us alone until e1000_open() is called */
1161 netif_carrier_off(netdev);
1162 netif_stop_queue(netdev);
1164 DPRINTK(PROBE, INFO, "Intel(R) PRO/1000 Network Connection\n");
1166 cards_found++;
1167 return 0;
1169 err_register:
1170 e1000_release_hw_control(adapter);
1171 err_eeprom:
1172 if (!e1000_check_phy_reset_block(&adapter->hw))
1173 e1000_phy_hw_reset(&adapter->hw);
1175 if (adapter->hw.flash_address)
1176 iounmap(adapter->hw.flash_address);
1177 err_flashmap:
1178 #ifdef CONFIG_E1000_NAPI
1179 for (i = 0; i < adapter->num_rx_queues; i++)
1180 dev_put(&adapter->polling_netdev[i]);
1181 #endif
1183 kfree(adapter->tx_ring);
1184 kfree(adapter->rx_ring);
1185 #ifdef CONFIG_E1000_NAPI
1186 kfree(adapter->polling_netdev);
1187 #endif
1188 err_sw_init:
1189 iounmap(adapter->hw.hw_addr);
1190 err_ioremap:
1191 free_netdev(netdev);
1192 err_alloc_etherdev:
1193 pci_release_regions(pdev);
1194 err_pci_reg:
1195 err_dma:
1196 pci_disable_device(pdev);
1197 return err;
1201 * e1000_remove - Device Removal Routine
1202 * @pdev: PCI device information struct
1204 * e1000_remove is called by the PCI subsystem to alert the driver
1205 * that it should release a PCI device. The could be caused by a
1206 * Hot-Plug event, or because the driver is going to be removed from
1207 * memory.
1210 static void __devexit
1211 e1000_remove(struct pci_dev *pdev)
1213 struct net_device *netdev = pci_get_drvdata(pdev);
1214 struct e1000_adapter *adapter = netdev_priv(netdev);
1215 #ifdef CONFIG_E1000_NAPI
1216 int i;
1217 #endif
1219 flush_scheduled_work();
1221 e1000_release_manageability(adapter);
1223 /* Release control of h/w to f/w. If f/w is AMT enabled, this
1224 * would have already happened in close and is redundant. */
1225 e1000_release_hw_control(adapter);
1227 unregister_netdev(netdev);
1228 #ifdef CONFIG_E1000_NAPI
1229 for (i = 0; i < adapter->num_rx_queues; i++)
1230 dev_put(&adapter->polling_netdev[i]);
1231 #endif
1233 if (!e1000_check_phy_reset_block(&adapter->hw))
1234 e1000_phy_hw_reset(&adapter->hw);
1236 kfree(adapter->tx_ring);
1237 kfree(adapter->rx_ring);
1238 #ifdef CONFIG_E1000_NAPI
1239 kfree(adapter->polling_netdev);
1240 #endif
1242 iounmap(adapter->hw.hw_addr);
1243 if (adapter->hw.flash_address)
1244 iounmap(adapter->hw.flash_address);
1245 pci_release_regions(pdev);
1247 free_netdev(netdev);
1249 pci_disable_device(pdev);
1253 * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
1254 * @adapter: board private structure to initialize
1256 * e1000_sw_init initializes the Adapter private data structure.
1257 * Fields are initialized based on PCI device information and
1258 * OS network device settings (MTU size).
1261 static int __devinit
1262 e1000_sw_init(struct e1000_adapter *adapter)
1264 struct e1000_hw *hw = &adapter->hw;
1265 struct net_device *netdev = adapter->netdev;
1266 struct pci_dev *pdev = adapter->pdev;
1267 #ifdef CONFIG_E1000_NAPI
1268 int i;
1269 #endif
1271 /* PCI config space info */
1273 hw->vendor_id = pdev->vendor;
1274 hw->device_id = pdev->device;
1275 hw->subsystem_vendor_id = pdev->subsystem_vendor;
1276 hw->subsystem_id = pdev->subsystem_device;
1278 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
1280 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
1282 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1283 adapter->rx_ps_bsize0 = E1000_RXBUFFER_128;
1284 hw->max_frame_size = netdev->mtu +
1285 ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
1286 hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
1288 /* identify the MAC */
1290 if (e1000_set_mac_type(hw)) {
1291 DPRINTK(PROBE, ERR, "Unknown MAC Type\n");
1292 return -EIO;
1295 switch (hw->mac_type) {
1296 default:
1297 break;
1298 case e1000_82541:
1299 case e1000_82547:
1300 case e1000_82541_rev_2:
1301 case e1000_82547_rev_2:
1302 hw->phy_init_script = 1;
1303 break;
1306 e1000_set_media_type(hw);
1308 hw->wait_autoneg_complete = FALSE;
1309 hw->tbi_compatibility_en = TRUE;
1310 hw->adaptive_ifs = TRUE;
1312 /* Copper options */
1314 if (hw->media_type == e1000_media_type_copper) {
1315 hw->mdix = AUTO_ALL_MODES;
1316 hw->disable_polarity_correction = FALSE;
1317 hw->master_slave = E1000_MASTER_SLAVE;
1320 adapter->num_tx_queues = 1;
1321 adapter->num_rx_queues = 1;
1323 if (e1000_alloc_queues(adapter)) {
1324 DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
1325 return -ENOMEM;
1328 #ifdef CONFIG_E1000_NAPI
1329 for (i = 0; i < adapter->num_rx_queues; i++) {
1330 adapter->polling_netdev[i].priv = adapter;
1331 adapter->polling_netdev[i].poll = &e1000_clean;
1332 adapter->polling_netdev[i].weight = 64;
1333 dev_hold(&adapter->polling_netdev[i]);
1334 set_bit(__LINK_STATE_START, &adapter->polling_netdev[i].state);
1336 spin_lock_init(&adapter->tx_queue_lock);
1337 #endif
1339 atomic_set(&adapter->irq_sem, 1);
1340 spin_lock_init(&adapter->stats_lock);
1342 set_bit(__E1000_DOWN, &adapter->flags);
1344 return 0;
1348 * e1000_alloc_queues - Allocate memory for all rings
1349 * @adapter: board private structure to initialize
1351 * We allocate one ring per queue at run-time since we don't know the
1352 * number of queues at compile-time. The polling_netdev array is
1353 * intended for Multiqueue, but should work fine with a single queue.
1356 static int __devinit
1357 e1000_alloc_queues(struct e1000_adapter *adapter)
1359 int size;
1361 size = sizeof(struct e1000_tx_ring) * adapter->num_tx_queues;
1362 adapter->tx_ring = kmalloc(size, GFP_KERNEL);
1363 if (!adapter->tx_ring)
1364 return -ENOMEM;
1365 memset(adapter->tx_ring, 0, size);
1367 size = sizeof(struct e1000_rx_ring) * adapter->num_rx_queues;
1368 adapter->rx_ring = kmalloc(size, GFP_KERNEL);
1369 if (!adapter->rx_ring) {
1370 kfree(adapter->tx_ring);
1371 return -ENOMEM;
1373 memset(adapter->rx_ring, 0, size);
1375 #ifdef CONFIG_E1000_NAPI
1376 size = sizeof(struct net_device) * adapter->num_rx_queues;
1377 adapter->polling_netdev = kmalloc(size, GFP_KERNEL);
1378 if (!adapter->polling_netdev) {
1379 kfree(adapter->tx_ring);
1380 kfree(adapter->rx_ring);
1381 return -ENOMEM;
1383 memset(adapter->polling_netdev, 0, size);
1384 #endif
1386 return E1000_SUCCESS;
1390 * e1000_open - Called when a network interface is made active
1391 * @netdev: network interface device structure
1393 * Returns 0 on success, negative value on failure
1395 * The open entry point is called when a network interface is made
1396 * active by the system (IFF_UP). At this point all resources needed
1397 * for transmit and receive operations are allocated, the interrupt
1398 * handler is registered with the OS, the watchdog timer is started,
1399 * and the stack is notified that the interface is ready.
1402 static int
1403 e1000_open(struct net_device *netdev)
1405 struct e1000_adapter *adapter = netdev_priv(netdev);
1406 int err;
1408 /* disallow open during test */
1409 if (test_bit(__E1000_TESTING, &adapter->flags))
1410 return -EBUSY;
1412 /* allocate transmit descriptors */
1413 if ((err = e1000_setup_all_tx_resources(adapter)))
1414 goto err_setup_tx;
1416 /* allocate receive descriptors */
1417 if ((err = e1000_setup_all_rx_resources(adapter)))
1418 goto err_setup_rx;
1420 err = e1000_request_irq(adapter);
1421 if (err)
1422 goto err_req_irq;
1424 e1000_power_up_phy(adapter);
1426 if ((err = e1000_up(adapter)))
1427 goto err_up;
1428 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
1429 if ((adapter->hw.mng_cookie.status &
1430 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
1431 e1000_update_mng_vlan(adapter);
1434 /* If AMT is enabled, let the firmware know that the network
1435 * interface is now open */
1436 if (adapter->hw.mac_type == e1000_82573 &&
1437 e1000_check_mng_mode(&adapter->hw))
1438 e1000_get_hw_control(adapter);
1440 return E1000_SUCCESS;
1442 err_up:
1443 e1000_power_down_phy(adapter);
1444 e1000_free_irq(adapter);
1445 err_req_irq:
1446 e1000_free_all_rx_resources(adapter);
1447 err_setup_rx:
1448 e1000_free_all_tx_resources(adapter);
1449 err_setup_tx:
1450 e1000_reset(adapter);
1452 return err;
1456 * e1000_close - Disables a network interface
1457 * @netdev: network interface device structure
1459 * Returns 0, this is not allowed to fail
1461 * The close entry point is called when an interface is de-activated
1462 * by the OS. The hardware is still under the drivers control, but
1463 * needs to be disabled. A global MAC reset is issued to stop the
1464 * hardware, and all transmit and receive resources are freed.
1467 static int
1468 e1000_close(struct net_device *netdev)
1470 struct e1000_adapter *adapter = netdev_priv(netdev);
1472 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
1473 e1000_down(adapter);
1474 e1000_power_down_phy(adapter);
1475 e1000_free_irq(adapter);
1477 e1000_free_all_tx_resources(adapter);
1478 e1000_free_all_rx_resources(adapter);
1480 /* kill manageability vlan ID if supported, but not if a vlan with
1481 * the same ID is registered on the host OS (let 8021q kill it) */
1482 if ((adapter->hw.mng_cookie.status &
1483 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
1484 !(adapter->vlgrp &&
1485 adapter->vlgrp->vlan_devices[adapter->mng_vlan_id])) {
1486 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
1489 /* If AMT is enabled, let the firmware know that the network
1490 * interface is now closed */
1491 if (adapter->hw.mac_type == e1000_82573 &&
1492 e1000_check_mng_mode(&adapter->hw))
1493 e1000_release_hw_control(adapter);
1495 return 0;
1499 * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
1500 * @adapter: address of board private structure
1501 * @start: address of beginning of memory
1502 * @len: length of memory
1504 static boolean_t
1505 e1000_check_64k_bound(struct e1000_adapter *adapter,
1506 void *start, unsigned long len)
1508 unsigned long begin = (unsigned long) start;
1509 unsigned long end = begin + len;
1511 /* First rev 82545 and 82546 need to not allow any memory
1512 * write location to cross 64k boundary due to errata 23 */
1513 if (adapter->hw.mac_type == e1000_82545 ||
1514 adapter->hw.mac_type == e1000_82546) {
1515 return ((begin ^ (end - 1)) >> 16) != 0 ? FALSE : TRUE;
1518 return TRUE;
1522 * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
1523 * @adapter: board private structure
1524 * @txdr: tx descriptor ring (for a specific queue) to setup
1526 * Return 0 on success, negative on failure
1529 static int
1530 e1000_setup_tx_resources(struct e1000_adapter *adapter,
1531 struct e1000_tx_ring *txdr)
1533 struct pci_dev *pdev = adapter->pdev;
1534 int size;
1536 size = sizeof(struct e1000_buffer) * txdr->count;
1537 txdr->buffer_info = vmalloc(size);
1538 if (!txdr->buffer_info) {
1539 DPRINTK(PROBE, ERR,
1540 "Unable to allocate memory for the transmit descriptor ring\n");
1541 return -ENOMEM;
1543 memset(txdr->buffer_info, 0, size);
1545 /* round up to nearest 4K */
1547 txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
1548 E1000_ROUNDUP(txdr->size, 4096);
1550 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
1551 if (!txdr->desc) {
1552 setup_tx_desc_die:
1553 vfree(txdr->buffer_info);
1554 DPRINTK(PROBE, ERR,
1555 "Unable to allocate memory for the transmit descriptor ring\n");
1556 return -ENOMEM;
1559 /* Fix for errata 23, can't cross 64kB boundary */
1560 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1561 void *olddesc = txdr->desc;
1562 dma_addr_t olddma = txdr->dma;
1563 DPRINTK(TX_ERR, ERR, "txdr align check failed: %u bytes "
1564 "at %p\n", txdr->size, txdr->desc);
1565 /* Try again, without freeing the previous */
1566 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
1567 /* Failed allocation, critical failure */
1568 if (!txdr->desc) {
1569 pci_free_consistent(pdev, txdr->size, olddesc, olddma);
1570 goto setup_tx_desc_die;
1573 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1574 /* give up */
1575 pci_free_consistent(pdev, txdr->size, txdr->desc,
1576 txdr->dma);
1577 pci_free_consistent(pdev, txdr->size, olddesc, olddma);
1578 DPRINTK(PROBE, ERR,
1579 "Unable to allocate aligned memory "
1580 "for the transmit descriptor ring\n");
1581 vfree(txdr->buffer_info);
1582 return -ENOMEM;
1583 } else {
1584 /* Free old allocation, new allocation was successful */
1585 pci_free_consistent(pdev, txdr->size, olddesc, olddma);
1588 memset(txdr->desc, 0, txdr->size);
1590 txdr->next_to_use = 0;
1591 txdr->next_to_clean = 0;
1592 spin_lock_init(&txdr->tx_lock);
1594 return 0;
1598 * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
1599 * (Descriptors) for all queues
1600 * @adapter: board private structure
1602 * Return 0 on success, negative on failure
1606 e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
1608 int i, err = 0;
1610 for (i = 0; i < adapter->num_tx_queues; i++) {
1611 err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1612 if (err) {
1613 DPRINTK(PROBE, ERR,
1614 "Allocation for Tx Queue %u failed\n", i);
1615 for (i-- ; i >= 0; i--)
1616 e1000_free_tx_resources(adapter,
1617 &adapter->tx_ring[i]);
1618 break;
1622 return err;
1626 * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
1627 * @adapter: board private structure
1629 * Configure the Tx unit of the MAC after a reset.
1632 static void
1633 e1000_configure_tx(struct e1000_adapter *adapter)
1635 uint64_t tdba;
1636 struct e1000_hw *hw = &adapter->hw;
1637 uint32_t tdlen, tctl, tipg, tarc;
1638 uint32_t ipgr1, ipgr2;
1640 /* Setup the HW Tx Head and Tail descriptor pointers */
1642 switch (adapter->num_tx_queues) {
1643 case 1:
1644 default:
1645 tdba = adapter->tx_ring[0].dma;
1646 tdlen = adapter->tx_ring[0].count *
1647 sizeof(struct e1000_tx_desc);
1648 E1000_WRITE_REG(hw, TDLEN, tdlen);
1649 E1000_WRITE_REG(hw, TDBAH, (tdba >> 32));
1650 E1000_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL));
1651 E1000_WRITE_REG(hw, TDT, 0);
1652 E1000_WRITE_REG(hw, TDH, 0);
1653 adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? E1000_TDH : E1000_82542_TDH);
1654 adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? E1000_TDT : E1000_82542_TDT);
1655 break;
1658 /* Set the default values for the Tx Inter Packet Gap timer */
1659 if (adapter->hw.mac_type <= e1000_82547_rev_2 &&
1660 (hw->media_type == e1000_media_type_fiber ||
1661 hw->media_type == e1000_media_type_internal_serdes))
1662 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1663 else
1664 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1666 switch (hw->mac_type) {
1667 case e1000_82542_rev2_0:
1668 case e1000_82542_rev2_1:
1669 tipg = DEFAULT_82542_TIPG_IPGT;
1670 ipgr1 = DEFAULT_82542_TIPG_IPGR1;
1671 ipgr2 = DEFAULT_82542_TIPG_IPGR2;
1672 break;
1673 case e1000_80003es2lan:
1674 ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1675 ipgr2 = DEFAULT_80003ES2LAN_TIPG_IPGR2;
1676 break;
1677 default:
1678 ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1679 ipgr2 = DEFAULT_82543_TIPG_IPGR2;
1680 break;
1682 tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
1683 tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
1684 E1000_WRITE_REG(hw, TIPG, tipg);
1686 /* Set the Tx Interrupt Delay register */
1688 E1000_WRITE_REG(hw, TIDV, adapter->tx_int_delay);
1689 if (hw->mac_type >= e1000_82540)
1690 E1000_WRITE_REG(hw, TADV, adapter->tx_abs_int_delay);
1692 /* Program the Transmit Control Register */
1694 tctl = E1000_READ_REG(hw, TCTL);
1695 tctl &= ~E1000_TCTL_CT;
1696 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
1697 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1699 if (hw->mac_type == e1000_82571 || hw->mac_type == e1000_82572) {
1700 tarc = E1000_READ_REG(hw, TARC0);
1701 /* set the speed mode bit, we'll clear it if we're not at
1702 * gigabit link later */
1703 tarc |= (1 << 21);
1704 E1000_WRITE_REG(hw, TARC0, tarc);
1705 } else if (hw->mac_type == e1000_80003es2lan) {
1706 tarc = E1000_READ_REG(hw, TARC0);
1707 tarc |= 1;
1708 E1000_WRITE_REG(hw, TARC0, tarc);
1709 tarc = E1000_READ_REG(hw, TARC1);
1710 tarc |= 1;
1711 E1000_WRITE_REG(hw, TARC1, tarc);
1714 e1000_config_collision_dist(hw);
1716 /* Setup Transmit Descriptor Settings for eop descriptor */
1717 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
1719 /* only set IDE if we are delaying interrupts using the timers */
1720 if (adapter->tx_int_delay)
1721 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
1723 if (hw->mac_type < e1000_82543)
1724 adapter->txd_cmd |= E1000_TXD_CMD_RPS;
1725 else
1726 adapter->txd_cmd |= E1000_TXD_CMD_RS;
1728 /* Cache if we're 82544 running in PCI-X because we'll
1729 * need this to apply a workaround later in the send path. */
1730 if (hw->mac_type == e1000_82544 &&
1731 hw->bus_type == e1000_bus_type_pcix)
1732 adapter->pcix_82544 = 1;
1734 E1000_WRITE_REG(hw, TCTL, tctl);
1739 * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
1740 * @adapter: board private structure
1741 * @rxdr: rx descriptor ring (for a specific queue) to setup
1743 * Returns 0 on success, negative on failure
1746 static int
1747 e1000_setup_rx_resources(struct e1000_adapter *adapter,
1748 struct e1000_rx_ring *rxdr)
1750 struct pci_dev *pdev = adapter->pdev;
1751 int size, desc_len;
1753 size = sizeof(struct e1000_buffer) * rxdr->count;
1754 rxdr->buffer_info = vmalloc(size);
1755 if (!rxdr->buffer_info) {
1756 DPRINTK(PROBE, ERR,
1757 "Unable to allocate memory for the receive descriptor ring\n");
1758 return -ENOMEM;
1760 memset(rxdr->buffer_info, 0, size);
1762 size = sizeof(struct e1000_ps_page) * rxdr->count;
1763 rxdr->ps_page = kmalloc(size, GFP_KERNEL);
1764 if (!rxdr->ps_page) {
1765 vfree(rxdr->buffer_info);
1766 DPRINTK(PROBE, ERR,
1767 "Unable to allocate memory for the receive descriptor ring\n");
1768 return -ENOMEM;
1770 memset(rxdr->ps_page, 0, size);
1772 size = sizeof(struct e1000_ps_page_dma) * rxdr->count;
1773 rxdr->ps_page_dma = kmalloc(size, GFP_KERNEL);
1774 if (!rxdr->ps_page_dma) {
1775 vfree(rxdr->buffer_info);
1776 kfree(rxdr->ps_page);
1777 DPRINTK(PROBE, ERR,
1778 "Unable to allocate memory for the receive descriptor ring\n");
1779 return -ENOMEM;
1781 memset(rxdr->ps_page_dma, 0, size);
1783 if (adapter->hw.mac_type <= e1000_82547_rev_2)
1784 desc_len = sizeof(struct e1000_rx_desc);
1785 else
1786 desc_len = sizeof(union e1000_rx_desc_packet_split);
1788 /* Round up to nearest 4K */
1790 rxdr->size = rxdr->count * desc_len;
1791 E1000_ROUNDUP(rxdr->size, 4096);
1793 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
1795 if (!rxdr->desc) {
1796 DPRINTK(PROBE, ERR,
1797 "Unable to allocate memory for the receive descriptor ring\n");
1798 setup_rx_desc_die:
1799 vfree(rxdr->buffer_info);
1800 kfree(rxdr->ps_page);
1801 kfree(rxdr->ps_page_dma);
1802 return -ENOMEM;
1805 /* Fix for errata 23, can't cross 64kB boundary */
1806 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1807 void *olddesc = rxdr->desc;
1808 dma_addr_t olddma = rxdr->dma;
1809 DPRINTK(RX_ERR, ERR, "rxdr align check failed: %u bytes "
1810 "at %p\n", rxdr->size, rxdr->desc);
1811 /* Try again, without freeing the previous */
1812 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
1813 /* Failed allocation, critical failure */
1814 if (!rxdr->desc) {
1815 pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
1816 DPRINTK(PROBE, ERR,
1817 "Unable to allocate memory "
1818 "for the receive descriptor ring\n");
1819 goto setup_rx_desc_die;
1822 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1823 /* give up */
1824 pci_free_consistent(pdev, rxdr->size, rxdr->desc,
1825 rxdr->dma);
1826 pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
1827 DPRINTK(PROBE, ERR,
1828 "Unable to allocate aligned memory "
1829 "for the receive descriptor ring\n");
1830 goto setup_rx_desc_die;
1831 } else {
1832 /* Free old allocation, new allocation was successful */
1833 pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
1836 memset(rxdr->desc, 0, rxdr->size);
1838 rxdr->next_to_clean = 0;
1839 rxdr->next_to_use = 0;
1841 return 0;
1845 * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
1846 * (Descriptors) for all queues
1847 * @adapter: board private structure
1849 * Return 0 on success, negative on failure
1853 e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
1855 int i, err = 0;
1857 for (i = 0; i < adapter->num_rx_queues; i++) {
1858 err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1859 if (err) {
1860 DPRINTK(PROBE, ERR,
1861 "Allocation for Rx Queue %u failed\n", i);
1862 for (i-- ; i >= 0; i--)
1863 e1000_free_rx_resources(adapter,
1864 &adapter->rx_ring[i]);
1865 break;
1869 return err;
1873 * e1000_setup_rctl - configure the receive control registers
1874 * @adapter: Board private structure
1876 #define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
1877 (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
1878 static void
1879 e1000_setup_rctl(struct e1000_adapter *adapter)
1881 uint32_t rctl, rfctl;
1882 uint32_t psrctl = 0;
1883 #ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
1884 uint32_t pages = 0;
1885 #endif
1887 rctl = E1000_READ_REG(&adapter->hw, RCTL);
1889 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1891 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
1892 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
1893 (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
1895 if (adapter->hw.tbi_compatibility_on == 1)
1896 rctl |= E1000_RCTL_SBP;
1897 else
1898 rctl &= ~E1000_RCTL_SBP;
1900 if (adapter->netdev->mtu <= ETH_DATA_LEN)
1901 rctl &= ~E1000_RCTL_LPE;
1902 else
1903 rctl |= E1000_RCTL_LPE;
1905 /* Setup buffer sizes */
1906 rctl &= ~E1000_RCTL_SZ_4096;
1907 rctl |= E1000_RCTL_BSEX;
1908 switch (adapter->rx_buffer_len) {
1909 case E1000_RXBUFFER_256:
1910 rctl |= E1000_RCTL_SZ_256;
1911 rctl &= ~E1000_RCTL_BSEX;
1912 break;
1913 case E1000_RXBUFFER_512:
1914 rctl |= E1000_RCTL_SZ_512;
1915 rctl &= ~E1000_RCTL_BSEX;
1916 break;
1917 case E1000_RXBUFFER_1024:
1918 rctl |= E1000_RCTL_SZ_1024;
1919 rctl &= ~E1000_RCTL_BSEX;
1920 break;
1921 case E1000_RXBUFFER_2048:
1922 default:
1923 rctl |= E1000_RCTL_SZ_2048;
1924 rctl &= ~E1000_RCTL_BSEX;
1925 break;
1926 case E1000_RXBUFFER_4096:
1927 rctl |= E1000_RCTL_SZ_4096;
1928 break;
1929 case E1000_RXBUFFER_8192:
1930 rctl |= E1000_RCTL_SZ_8192;
1931 break;
1932 case E1000_RXBUFFER_16384:
1933 rctl |= E1000_RCTL_SZ_16384;
1934 break;
1937 #ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
1938 /* 82571 and greater support packet-split where the protocol
1939 * header is placed in skb->data and the packet data is
1940 * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
1941 * In the case of a non-split, skb->data is linearly filled,
1942 * followed by the page buffers. Therefore, skb->data is
1943 * sized to hold the largest protocol header.
1945 /* allocations using alloc_page take too long for regular MTU
1946 * so only enable packet split for jumbo frames */
1947 pages = PAGE_USE_COUNT(adapter->netdev->mtu);
1948 if ((adapter->hw.mac_type >= e1000_82571) && (pages <= 3) &&
1949 PAGE_SIZE <= 16384 && (rctl & E1000_RCTL_LPE))
1950 adapter->rx_ps_pages = pages;
1951 else
1952 adapter->rx_ps_pages = 0;
1953 #endif
1954 if (adapter->rx_ps_pages) {
1955 /* Configure extra packet-split registers */
1956 rfctl = E1000_READ_REG(&adapter->hw, RFCTL);
1957 rfctl |= E1000_RFCTL_EXTEN;
1958 /* disable packet split support for IPv6 extension headers,
1959 * because some malformed IPv6 headers can hang the RX */
1960 rfctl |= (E1000_RFCTL_IPV6_EX_DIS |
1961 E1000_RFCTL_NEW_IPV6_EXT_DIS);
1963 E1000_WRITE_REG(&adapter->hw, RFCTL, rfctl);
1965 rctl |= E1000_RCTL_DTYP_PS;
1967 psrctl |= adapter->rx_ps_bsize0 >>
1968 E1000_PSRCTL_BSIZE0_SHIFT;
1970 switch (adapter->rx_ps_pages) {
1971 case 3:
1972 psrctl |= PAGE_SIZE <<
1973 E1000_PSRCTL_BSIZE3_SHIFT;
1974 case 2:
1975 psrctl |= PAGE_SIZE <<
1976 E1000_PSRCTL_BSIZE2_SHIFT;
1977 case 1:
1978 psrctl |= PAGE_SIZE >>
1979 E1000_PSRCTL_BSIZE1_SHIFT;
1980 break;
1983 E1000_WRITE_REG(&adapter->hw, PSRCTL, psrctl);
1986 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
1990 * e1000_configure_rx - Configure 8254x Receive Unit after Reset
1991 * @adapter: board private structure
1993 * Configure the Rx unit of the MAC after a reset.
1996 static void
1997 e1000_configure_rx(struct e1000_adapter *adapter)
1999 uint64_t rdba;
2000 struct e1000_hw *hw = &adapter->hw;
2001 uint32_t rdlen, rctl, rxcsum, ctrl_ext;
2003 if (adapter->rx_ps_pages) {
2004 /* this is a 32 byte descriptor */
2005 rdlen = adapter->rx_ring[0].count *
2006 sizeof(union e1000_rx_desc_packet_split);
2007 adapter->clean_rx = e1000_clean_rx_irq_ps;
2008 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps;
2009 } else {
2010 rdlen = adapter->rx_ring[0].count *
2011 sizeof(struct e1000_rx_desc);
2012 adapter->clean_rx = e1000_clean_rx_irq;
2013 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
2016 /* disable receives while setting up the descriptors */
2017 rctl = E1000_READ_REG(hw, RCTL);
2018 E1000_WRITE_REG(hw, RCTL, rctl & ~E1000_RCTL_EN);
2020 /* set the Receive Delay Timer Register */
2021 E1000_WRITE_REG(hw, RDTR, adapter->rx_int_delay);
2023 if (hw->mac_type >= e1000_82540) {
2024 E1000_WRITE_REG(hw, RADV, adapter->rx_abs_int_delay);
2025 if (adapter->itr_setting != 0)
2026 E1000_WRITE_REG(hw, ITR,
2027 1000000000 / (adapter->itr * 256));
2030 if (hw->mac_type >= e1000_82571) {
2031 ctrl_ext = E1000_READ_REG(hw, CTRL_EXT);
2032 /* Reset delay timers after every interrupt */
2033 ctrl_ext |= E1000_CTRL_EXT_INT_TIMER_CLR;
2034 #ifdef CONFIG_E1000_NAPI
2035 /* Auto-Mask interrupts upon ICR access */
2036 ctrl_ext |= E1000_CTRL_EXT_IAME;
2037 E1000_WRITE_REG(hw, IAM, 0xffffffff);
2038 #endif
2039 E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
2040 E1000_WRITE_FLUSH(hw);
2043 /* Setup the HW Rx Head and Tail Descriptor Pointers and
2044 * the Base and Length of the Rx Descriptor Ring */
2045 switch (adapter->num_rx_queues) {
2046 case 1:
2047 default:
2048 rdba = adapter->rx_ring[0].dma;
2049 E1000_WRITE_REG(hw, RDLEN, rdlen);
2050 E1000_WRITE_REG(hw, RDBAH, (rdba >> 32));
2051 E1000_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL));
2052 E1000_WRITE_REG(hw, RDT, 0);
2053 E1000_WRITE_REG(hw, RDH, 0);
2054 adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? E1000_RDH : E1000_82542_RDH);
2055 adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? E1000_RDT : E1000_82542_RDT);
2056 break;
2059 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
2060 if (hw->mac_type >= e1000_82543) {
2061 rxcsum = E1000_READ_REG(hw, RXCSUM);
2062 if (adapter->rx_csum == TRUE) {
2063 rxcsum |= E1000_RXCSUM_TUOFL;
2065 /* Enable 82571 IPv4 payload checksum for UDP fragments
2066 * Must be used in conjunction with packet-split. */
2067 if ((hw->mac_type >= e1000_82571) &&
2068 (adapter->rx_ps_pages)) {
2069 rxcsum |= E1000_RXCSUM_IPPCSE;
2071 } else {
2072 rxcsum &= ~E1000_RXCSUM_TUOFL;
2073 /* don't need to clear IPPCSE as it defaults to 0 */
2075 E1000_WRITE_REG(hw, RXCSUM, rxcsum);
2078 /* enable early receives on 82573, only takes effect if using > 2048
2079 * byte total frame size. for example only for jumbo frames */
2080 #define E1000_ERT_2048 0x100
2081 if (hw->mac_type == e1000_82573)
2082 E1000_WRITE_REG(hw, ERT, E1000_ERT_2048);
2084 /* Enable Receives */
2085 E1000_WRITE_REG(hw, RCTL, rctl);
2089 * e1000_free_tx_resources - Free Tx Resources per Queue
2090 * @adapter: board private structure
2091 * @tx_ring: Tx descriptor ring for a specific queue
2093 * Free all transmit software resources
2096 static void
2097 e1000_free_tx_resources(struct e1000_adapter *adapter,
2098 struct e1000_tx_ring *tx_ring)
2100 struct pci_dev *pdev = adapter->pdev;
2102 e1000_clean_tx_ring(adapter, tx_ring);
2104 vfree(tx_ring->buffer_info);
2105 tx_ring->buffer_info = NULL;
2107 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
2109 tx_ring->desc = NULL;
2113 * e1000_free_all_tx_resources - Free Tx Resources for All Queues
2114 * @adapter: board private structure
2116 * Free all transmit software resources
2119 void
2120 e1000_free_all_tx_resources(struct e1000_adapter *adapter)
2122 int i;
2124 for (i = 0; i < adapter->num_tx_queues; i++)
2125 e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
2128 static void
2129 e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
2130 struct e1000_buffer *buffer_info)
2132 if (buffer_info->dma) {
2133 pci_unmap_page(adapter->pdev,
2134 buffer_info->dma,
2135 buffer_info->length,
2136 PCI_DMA_TODEVICE);
2137 buffer_info->dma = 0;
2139 if (buffer_info->skb) {
2140 dev_kfree_skb_any(buffer_info->skb);
2141 buffer_info->skb = NULL;
2143 /* buffer_info must be completely set up in the transmit path */
2147 * e1000_clean_tx_ring - Free Tx Buffers
2148 * @adapter: board private structure
2149 * @tx_ring: ring to be cleaned
2152 static void
2153 e1000_clean_tx_ring(struct e1000_adapter *adapter,
2154 struct e1000_tx_ring *tx_ring)
2156 struct e1000_buffer *buffer_info;
2157 unsigned long size;
2158 unsigned int i;
2160 /* Free all the Tx ring sk_buffs */
2162 for (i = 0; i < tx_ring->count; i++) {
2163 buffer_info = &tx_ring->buffer_info[i];
2164 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2167 size = sizeof(struct e1000_buffer) * tx_ring->count;
2168 memset(tx_ring->buffer_info, 0, size);
2170 /* Zero out the descriptor ring */
2172 memset(tx_ring->desc, 0, tx_ring->size);
2174 tx_ring->next_to_use = 0;
2175 tx_ring->next_to_clean = 0;
2176 tx_ring->last_tx_tso = 0;
2178 writel(0, adapter->hw.hw_addr + tx_ring->tdh);
2179 writel(0, adapter->hw.hw_addr + tx_ring->tdt);
2183 * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
2184 * @adapter: board private structure
2187 static void
2188 e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
2190 int i;
2192 for (i = 0; i < adapter->num_tx_queues; i++)
2193 e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
2197 * e1000_free_rx_resources - Free Rx Resources
2198 * @adapter: board private structure
2199 * @rx_ring: ring to clean the resources from
2201 * Free all receive software resources
2204 static void
2205 e1000_free_rx_resources(struct e1000_adapter *adapter,
2206 struct e1000_rx_ring *rx_ring)
2208 struct pci_dev *pdev = adapter->pdev;
2210 e1000_clean_rx_ring(adapter, rx_ring);
2212 vfree(rx_ring->buffer_info);
2213 rx_ring->buffer_info = NULL;
2214 kfree(rx_ring->ps_page);
2215 rx_ring->ps_page = NULL;
2216 kfree(rx_ring->ps_page_dma);
2217 rx_ring->ps_page_dma = NULL;
2219 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
2221 rx_ring->desc = NULL;
2225 * e1000_free_all_rx_resources - Free Rx Resources for All Queues
2226 * @adapter: board private structure
2228 * Free all receive software resources
2231 void
2232 e1000_free_all_rx_resources(struct e1000_adapter *adapter)
2234 int i;
2236 for (i = 0; i < adapter->num_rx_queues; i++)
2237 e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
2241 * e1000_clean_rx_ring - Free Rx Buffers per Queue
2242 * @adapter: board private structure
2243 * @rx_ring: ring to free buffers from
2246 static void
2247 e1000_clean_rx_ring(struct e1000_adapter *adapter,
2248 struct e1000_rx_ring *rx_ring)
2250 struct e1000_buffer *buffer_info;
2251 struct e1000_ps_page *ps_page;
2252 struct e1000_ps_page_dma *ps_page_dma;
2253 struct pci_dev *pdev = adapter->pdev;
2254 unsigned long size;
2255 unsigned int i, j;
2257 /* Free all the Rx ring sk_buffs */
2258 for (i = 0; i < rx_ring->count; i++) {
2259 buffer_info = &rx_ring->buffer_info[i];
2260 if (buffer_info->skb) {
2261 pci_unmap_single(pdev,
2262 buffer_info->dma,
2263 buffer_info->length,
2264 PCI_DMA_FROMDEVICE);
2266 dev_kfree_skb(buffer_info->skb);
2267 buffer_info->skb = NULL;
2269 ps_page = &rx_ring->ps_page[i];
2270 ps_page_dma = &rx_ring->ps_page_dma[i];
2271 for (j = 0; j < adapter->rx_ps_pages; j++) {
2272 if (!ps_page->ps_page[j]) break;
2273 pci_unmap_page(pdev,
2274 ps_page_dma->ps_page_dma[j],
2275 PAGE_SIZE, PCI_DMA_FROMDEVICE);
2276 ps_page_dma->ps_page_dma[j] = 0;
2277 put_page(ps_page->ps_page[j]);
2278 ps_page->ps_page[j] = NULL;
2282 size = sizeof(struct e1000_buffer) * rx_ring->count;
2283 memset(rx_ring->buffer_info, 0, size);
2284 size = sizeof(struct e1000_ps_page) * rx_ring->count;
2285 memset(rx_ring->ps_page, 0, size);
2286 size = sizeof(struct e1000_ps_page_dma) * rx_ring->count;
2287 memset(rx_ring->ps_page_dma, 0, size);
2289 /* Zero out the descriptor ring */
2291 memset(rx_ring->desc, 0, rx_ring->size);
2293 rx_ring->next_to_clean = 0;
2294 rx_ring->next_to_use = 0;
2296 writel(0, adapter->hw.hw_addr + rx_ring->rdh);
2297 writel(0, adapter->hw.hw_addr + rx_ring->rdt);
2301 * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
2302 * @adapter: board private structure
2305 static void
2306 e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
2308 int i;
2310 for (i = 0; i < adapter->num_rx_queues; i++)
2311 e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
2314 /* The 82542 2.0 (revision 2) needs to have the receive unit in reset
2315 * and memory write and invalidate disabled for certain operations
2317 static void
2318 e1000_enter_82542_rst(struct e1000_adapter *adapter)
2320 struct net_device *netdev = adapter->netdev;
2321 uint32_t rctl;
2323 e1000_pci_clear_mwi(&adapter->hw);
2325 rctl = E1000_READ_REG(&adapter->hw, RCTL);
2326 rctl |= E1000_RCTL_RST;
2327 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
2328 E1000_WRITE_FLUSH(&adapter->hw);
2329 mdelay(5);
2331 if (netif_running(netdev))
2332 e1000_clean_all_rx_rings(adapter);
2335 static void
2336 e1000_leave_82542_rst(struct e1000_adapter *adapter)
2338 struct net_device *netdev = adapter->netdev;
2339 uint32_t rctl;
2341 rctl = E1000_READ_REG(&adapter->hw, RCTL);
2342 rctl &= ~E1000_RCTL_RST;
2343 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
2344 E1000_WRITE_FLUSH(&adapter->hw);
2345 mdelay(5);
2347 if (adapter->hw.pci_cmd_word & PCI_COMMAND_INVALIDATE)
2348 e1000_pci_set_mwi(&adapter->hw);
2350 if (netif_running(netdev)) {
2351 /* No need to loop, because 82542 supports only 1 queue */
2352 struct e1000_rx_ring *ring = &adapter->rx_ring[0];
2353 e1000_configure_rx(adapter);
2354 adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
2359 * e1000_set_mac - Change the Ethernet Address of the NIC
2360 * @netdev: network interface device structure
2361 * @p: pointer to an address structure
2363 * Returns 0 on success, negative on failure
2366 static int
2367 e1000_set_mac(struct net_device *netdev, void *p)
2369 struct e1000_adapter *adapter = netdev_priv(netdev);
2370 struct sockaddr *addr = p;
2372 if (!is_valid_ether_addr(addr->sa_data))
2373 return -EADDRNOTAVAIL;
2375 /* 82542 2.0 needs to be in reset to write receive address registers */
2377 if (adapter->hw.mac_type == e1000_82542_rev2_0)
2378 e1000_enter_82542_rst(adapter);
2380 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2381 memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
2383 e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 0);
2385 /* With 82571 controllers, LAA may be overwritten (with the default)
2386 * due to controller reset from the other port. */
2387 if (adapter->hw.mac_type == e1000_82571) {
2388 /* activate the work around */
2389 adapter->hw.laa_is_present = 1;
2391 /* Hold a copy of the LAA in RAR[14] This is done so that
2392 * between the time RAR[0] gets clobbered and the time it
2393 * gets fixed (in e1000_watchdog), the actual LAA is in one
2394 * of the RARs and no incoming packets directed to this port
2395 * are dropped. Eventaully the LAA will be in RAR[0] and
2396 * RAR[14] */
2397 e1000_rar_set(&adapter->hw, adapter->hw.mac_addr,
2398 E1000_RAR_ENTRIES - 1);
2401 if (adapter->hw.mac_type == e1000_82542_rev2_0)
2402 e1000_leave_82542_rst(adapter);
2404 return 0;
2408 * e1000_set_multi - Multicast and Promiscuous mode set
2409 * @netdev: network interface device structure
2411 * The set_multi entry point is called whenever the multicast address
2412 * list or the network interface flags are updated. This routine is
2413 * responsible for configuring the hardware for proper multicast,
2414 * promiscuous mode, and all-multi behavior.
2417 static void
2418 e1000_set_multi(struct net_device *netdev)
2420 struct e1000_adapter *adapter = netdev_priv(netdev);
2421 struct e1000_hw *hw = &adapter->hw;
2422 struct dev_mc_list *mc_ptr;
2423 uint32_t rctl;
2424 uint32_t hash_value;
2425 int i, rar_entries = E1000_RAR_ENTRIES;
2426 int mta_reg_count = (hw->mac_type == e1000_ich8lan) ?
2427 E1000_NUM_MTA_REGISTERS_ICH8LAN :
2428 E1000_NUM_MTA_REGISTERS;
2430 if (adapter->hw.mac_type == e1000_ich8lan)
2431 rar_entries = E1000_RAR_ENTRIES_ICH8LAN;
2433 /* reserve RAR[14] for LAA over-write work-around */
2434 if (adapter->hw.mac_type == e1000_82571)
2435 rar_entries--;
2437 /* Check for Promiscuous and All Multicast modes */
2439 rctl = E1000_READ_REG(hw, RCTL);
2441 if (netdev->flags & IFF_PROMISC) {
2442 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2443 } else if (netdev->flags & IFF_ALLMULTI) {
2444 rctl |= E1000_RCTL_MPE;
2445 rctl &= ~E1000_RCTL_UPE;
2446 } else {
2447 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
2450 E1000_WRITE_REG(hw, RCTL, rctl);
2452 /* 82542 2.0 needs to be in reset to write receive address registers */
2454 if (hw->mac_type == e1000_82542_rev2_0)
2455 e1000_enter_82542_rst(adapter);
2457 /* load the first 14 multicast address into the exact filters 1-14
2458 * RAR 0 is used for the station MAC adddress
2459 * if there are not 14 addresses, go ahead and clear the filters
2460 * -- with 82571 controllers only 0-13 entries are filled here
2462 mc_ptr = netdev->mc_list;
2464 for (i = 1; i < rar_entries; i++) {
2465 if (mc_ptr) {
2466 e1000_rar_set(hw, mc_ptr->dmi_addr, i);
2467 mc_ptr = mc_ptr->next;
2468 } else {
2469 E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
2470 E1000_WRITE_FLUSH(hw);
2471 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
2472 E1000_WRITE_FLUSH(hw);
2476 /* clear the old settings from the multicast hash table */
2478 for (i = 0; i < mta_reg_count; i++) {
2479 E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
2480 E1000_WRITE_FLUSH(hw);
2483 /* load any remaining addresses into the hash table */
2485 for (; mc_ptr; mc_ptr = mc_ptr->next) {
2486 hash_value = e1000_hash_mc_addr(hw, mc_ptr->dmi_addr);
2487 e1000_mta_set(hw, hash_value);
2490 if (hw->mac_type == e1000_82542_rev2_0)
2491 e1000_leave_82542_rst(adapter);
2494 /* Need to wait a few seconds after link up to get diagnostic information from
2495 * the phy */
2497 static void
2498 e1000_update_phy_info(unsigned long data)
2500 struct e1000_adapter *adapter = (struct e1000_adapter *) data;
2501 e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
2505 * e1000_82547_tx_fifo_stall - Timer Call-back
2506 * @data: pointer to adapter cast into an unsigned long
2509 static void
2510 e1000_82547_tx_fifo_stall(unsigned long data)
2512 struct e1000_adapter *adapter = (struct e1000_adapter *) data;
2513 struct net_device *netdev = adapter->netdev;
2514 uint32_t tctl;
2516 if (atomic_read(&adapter->tx_fifo_stall)) {
2517 if ((E1000_READ_REG(&adapter->hw, TDT) ==
2518 E1000_READ_REG(&adapter->hw, TDH)) &&
2519 (E1000_READ_REG(&adapter->hw, TDFT) ==
2520 E1000_READ_REG(&adapter->hw, TDFH)) &&
2521 (E1000_READ_REG(&adapter->hw, TDFTS) ==
2522 E1000_READ_REG(&adapter->hw, TDFHS))) {
2523 tctl = E1000_READ_REG(&adapter->hw, TCTL);
2524 E1000_WRITE_REG(&adapter->hw, TCTL,
2525 tctl & ~E1000_TCTL_EN);
2526 E1000_WRITE_REG(&adapter->hw, TDFT,
2527 adapter->tx_head_addr);
2528 E1000_WRITE_REG(&adapter->hw, TDFH,
2529 adapter->tx_head_addr);
2530 E1000_WRITE_REG(&adapter->hw, TDFTS,
2531 adapter->tx_head_addr);
2532 E1000_WRITE_REG(&adapter->hw, TDFHS,
2533 adapter->tx_head_addr);
2534 E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
2535 E1000_WRITE_FLUSH(&adapter->hw);
2537 adapter->tx_fifo_head = 0;
2538 atomic_set(&adapter->tx_fifo_stall, 0);
2539 netif_wake_queue(netdev);
2540 } else {
2541 mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1);
2547 * e1000_watchdog - Timer Call-back
2548 * @data: pointer to adapter cast into an unsigned long
2550 static void
2551 e1000_watchdog(unsigned long data)
2553 struct e1000_adapter *adapter = (struct e1000_adapter *) data;
2554 struct net_device *netdev = adapter->netdev;
2555 struct e1000_tx_ring *txdr = adapter->tx_ring;
2556 uint32_t link, tctl;
2557 int32_t ret_val;
2559 ret_val = e1000_check_for_link(&adapter->hw);
2560 if ((ret_val == E1000_ERR_PHY) &&
2561 (adapter->hw.phy_type == e1000_phy_igp_3) &&
2562 (E1000_READ_REG(&adapter->hw, CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
2563 /* See e1000_kumeran_lock_loss_workaround() */
2564 DPRINTK(LINK, INFO,
2565 "Gigabit has been disabled, downgrading speed\n");
2568 if (adapter->hw.mac_type == e1000_82573) {
2569 e1000_enable_tx_pkt_filtering(&adapter->hw);
2570 if (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id)
2571 e1000_update_mng_vlan(adapter);
2574 if ((adapter->hw.media_type == e1000_media_type_internal_serdes) &&
2575 !(E1000_READ_REG(&adapter->hw, TXCW) & E1000_TXCW_ANE))
2576 link = !adapter->hw.serdes_link_down;
2577 else
2578 link = E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU;
2580 if (link) {
2581 if (!netif_carrier_ok(netdev)) {
2582 uint32_t ctrl;
2583 boolean_t txb2b = 1;
2584 e1000_get_speed_and_duplex(&adapter->hw,
2585 &adapter->link_speed,
2586 &adapter->link_duplex);
2588 ctrl = E1000_READ_REG(&adapter->hw, CTRL);
2589 DPRINTK(LINK, INFO, "NIC Link is Up %d Mbps %s, "
2590 "Flow Control: %s\n",
2591 adapter->link_speed,
2592 adapter->link_duplex == FULL_DUPLEX ?
2593 "Full Duplex" : "Half Duplex",
2594 ((ctrl & E1000_CTRL_TFCE) && (ctrl &
2595 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2596 E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2597 E1000_CTRL_TFCE) ? "TX" : "None" )));
2599 /* tweak tx_queue_len according to speed/duplex
2600 * and adjust the timeout factor */
2601 netdev->tx_queue_len = adapter->tx_queue_len;
2602 adapter->tx_timeout_factor = 1;
2603 switch (adapter->link_speed) {
2604 case SPEED_10:
2605 txb2b = 0;
2606 netdev->tx_queue_len = 10;
2607 adapter->tx_timeout_factor = 8;
2608 break;
2609 case SPEED_100:
2610 txb2b = 0;
2611 netdev->tx_queue_len = 100;
2612 /* maybe add some timeout factor ? */
2613 break;
2616 if ((adapter->hw.mac_type == e1000_82571 ||
2617 adapter->hw.mac_type == e1000_82572) &&
2618 txb2b == 0) {
2619 uint32_t tarc0;
2620 tarc0 = E1000_READ_REG(&adapter->hw, TARC0);
2621 tarc0 &= ~(1 << 21);
2622 E1000_WRITE_REG(&adapter->hw, TARC0, tarc0);
2625 /* disable TSO for pcie and 10/100 speeds, to avoid
2626 * some hardware issues */
2627 if (!adapter->tso_force &&
2628 adapter->hw.bus_type == e1000_bus_type_pci_express){
2629 switch (adapter->link_speed) {
2630 case SPEED_10:
2631 case SPEED_100:
2632 DPRINTK(PROBE,INFO,
2633 "10/100 speed: disabling TSO\n");
2634 netdev->features &= ~NETIF_F_TSO;
2635 netdev->features &= ~NETIF_F_TSO6;
2636 break;
2637 case SPEED_1000:
2638 netdev->features |= NETIF_F_TSO;
2639 netdev->features |= NETIF_F_TSO6;
2640 break;
2641 default:
2642 /* oops */
2643 break;
2647 /* enable transmits in the hardware, need to do this
2648 * after setting TARC0 */
2649 tctl = E1000_READ_REG(&adapter->hw, TCTL);
2650 tctl |= E1000_TCTL_EN;
2651 E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
2653 netif_carrier_on(netdev);
2654 netif_wake_queue(netdev);
2655 mod_timer(&adapter->phy_info_timer, jiffies + 2 * HZ);
2656 adapter->smartspeed = 0;
2657 } else {
2658 /* make sure the receive unit is started */
2659 if (adapter->hw.rx_needs_kicking) {
2660 struct e1000_hw *hw = &adapter->hw;
2661 uint32_t rctl = E1000_READ_REG(hw, RCTL);
2662 E1000_WRITE_REG(hw, RCTL, rctl | E1000_RCTL_EN);
2665 } else {
2666 if (netif_carrier_ok(netdev)) {
2667 adapter->link_speed = 0;
2668 adapter->link_duplex = 0;
2669 DPRINTK(LINK, INFO, "NIC Link is Down\n");
2670 netif_carrier_off(netdev);
2671 netif_stop_queue(netdev);
2672 mod_timer(&adapter->phy_info_timer, jiffies + 2 * HZ);
2674 /* 80003ES2LAN workaround--
2675 * For packet buffer work-around on link down event;
2676 * disable receives in the ISR and
2677 * reset device here in the watchdog
2679 if (adapter->hw.mac_type == e1000_80003es2lan)
2680 /* reset device */
2681 schedule_work(&adapter->reset_task);
2684 e1000_smartspeed(adapter);
2687 e1000_update_stats(adapter);
2689 adapter->hw.tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
2690 adapter->tpt_old = adapter->stats.tpt;
2691 adapter->hw.collision_delta = adapter->stats.colc - adapter->colc_old;
2692 adapter->colc_old = adapter->stats.colc;
2694 adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
2695 adapter->gorcl_old = adapter->stats.gorcl;
2696 adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
2697 adapter->gotcl_old = adapter->stats.gotcl;
2699 e1000_update_adaptive(&adapter->hw);
2701 if (!netif_carrier_ok(netdev)) {
2702 if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
2703 /* We've lost link, so the controller stops DMA,
2704 * but we've got queued Tx work that's never going
2705 * to get done, so reset controller to flush Tx.
2706 * (Do the reset outside of interrupt context). */
2707 adapter->tx_timeout_count++;
2708 schedule_work(&adapter->reset_task);
2712 /* Cause software interrupt to ensure rx ring is cleaned */
2713 E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_RXDMT0);
2715 /* Force detection of hung controller every watchdog period */
2716 adapter->detect_tx_hung = TRUE;
2718 /* With 82571 controllers, LAA may be overwritten due to controller
2719 * reset from the other port. Set the appropriate LAA in RAR[0] */
2720 if (adapter->hw.mac_type == e1000_82571 && adapter->hw.laa_is_present)
2721 e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 0);
2723 /* Reset the timer */
2724 mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
2727 enum latency_range {
2728 lowest_latency = 0,
2729 low_latency = 1,
2730 bulk_latency = 2,
2731 latency_invalid = 255
2735 * e1000_update_itr - update the dynamic ITR value based on statistics
2736 * Stores a new ITR value based on packets and byte
2737 * counts during the last interrupt. The advantage of per interrupt
2738 * computation is faster updates and more accurate ITR for the current
2739 * traffic pattern. Constants in this function were computed
2740 * based on theoretical maximum wire speed and thresholds were set based
2741 * on testing data as well as attempting to minimize response time
2742 * while increasing bulk throughput.
2743 * this functionality is controlled by the InterruptThrottleRate module
2744 * parameter (see e1000_param.c)
2745 * @adapter: pointer to adapter
2746 * @itr_setting: current adapter->itr
2747 * @packets: the number of packets during this measurement interval
2748 * @bytes: the number of bytes during this measurement interval
2750 static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
2751 uint16_t itr_setting,
2752 int packets,
2753 int bytes)
2755 unsigned int retval = itr_setting;
2756 struct e1000_hw *hw = &adapter->hw;
2758 if (unlikely(hw->mac_type < e1000_82540))
2759 goto update_itr_done;
2761 if (packets == 0)
2762 goto update_itr_done;
2764 switch (itr_setting) {
2765 case lowest_latency:
2766 /* jumbo frames get bulk treatment*/
2767 if (bytes/packets > 8000)
2768 retval = bulk_latency;
2769 else if ((packets < 5) && (bytes > 512))
2770 retval = low_latency;
2771 break;
2772 case low_latency: /* 50 usec aka 20000 ints/s */
2773 if (bytes > 10000) {
2774 /* jumbo frames need bulk latency setting */
2775 if (bytes/packets > 8000)
2776 retval = bulk_latency;
2777 else if ((packets < 10) || ((bytes/packets) > 1200))
2778 retval = bulk_latency;
2779 else if ((packets > 35))
2780 retval = lowest_latency;
2781 } else if (bytes/packets > 2000)
2782 retval = bulk_latency;
2783 else if (packets <= 2 && bytes < 512)
2784 retval = lowest_latency;
2785 break;
2786 case bulk_latency: /* 250 usec aka 4000 ints/s */
2787 if (bytes > 25000) {
2788 if (packets > 35)
2789 retval = low_latency;
2790 } else if (bytes < 6000) {
2791 retval = low_latency;
2793 break;
2796 update_itr_done:
2797 return retval;
2800 static void e1000_set_itr(struct e1000_adapter *adapter)
2802 struct e1000_hw *hw = &adapter->hw;
2803 uint16_t current_itr;
2804 uint32_t new_itr = adapter->itr;
2806 if (unlikely(hw->mac_type < e1000_82540))
2807 return;
2809 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2810 if (unlikely(adapter->link_speed != SPEED_1000)) {
2811 current_itr = 0;
2812 new_itr = 4000;
2813 goto set_itr_now;
2816 adapter->tx_itr = e1000_update_itr(adapter,
2817 adapter->tx_itr,
2818 adapter->total_tx_packets,
2819 adapter->total_tx_bytes);
2820 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2821 if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2822 adapter->tx_itr = low_latency;
2824 adapter->rx_itr = e1000_update_itr(adapter,
2825 adapter->rx_itr,
2826 adapter->total_rx_packets,
2827 adapter->total_rx_bytes);
2828 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2829 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2830 adapter->rx_itr = low_latency;
2832 current_itr = max(adapter->rx_itr, adapter->tx_itr);
2834 switch (current_itr) {
2835 /* counts and packets in update_itr are dependent on these numbers */
2836 case lowest_latency:
2837 new_itr = 70000;
2838 break;
2839 case low_latency:
2840 new_itr = 20000; /* aka hwitr = ~200 */
2841 break;
2842 case bulk_latency:
2843 new_itr = 4000;
2844 break;
2845 default:
2846 break;
2849 set_itr_now:
2850 if (new_itr != adapter->itr) {
2851 /* this attempts to bias the interrupt rate towards Bulk
2852 * by adding intermediate steps when interrupt rate is
2853 * increasing */
2854 new_itr = new_itr > adapter->itr ?
2855 min(adapter->itr + (new_itr >> 2), new_itr) :
2856 new_itr;
2857 adapter->itr = new_itr;
2858 E1000_WRITE_REG(hw, ITR, 1000000000 / (new_itr * 256));
2861 return;
2864 #define E1000_TX_FLAGS_CSUM 0x00000001
2865 #define E1000_TX_FLAGS_VLAN 0x00000002
2866 #define E1000_TX_FLAGS_TSO 0x00000004
2867 #define E1000_TX_FLAGS_IPV4 0x00000008
2868 #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
2869 #define E1000_TX_FLAGS_VLAN_SHIFT 16
2871 static int
2872 e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2873 struct sk_buff *skb)
2875 struct e1000_context_desc *context_desc;
2876 struct e1000_buffer *buffer_info;
2877 unsigned int i;
2878 uint32_t cmd_length = 0;
2879 uint16_t ipcse = 0, tucse, mss;
2880 uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
2881 int err;
2883 if (skb_is_gso(skb)) {
2884 if (skb_header_cloned(skb)) {
2885 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2886 if (err)
2887 return err;
2890 hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
2891 mss = skb_shinfo(skb)->gso_size;
2892 if (skb->protocol == htons(ETH_P_IP)) {
2893 skb->nh.iph->tot_len = 0;
2894 skb->nh.iph->check = 0;
2895 skb->h.th->check =
2896 ~csum_tcpudp_magic(skb->nh.iph->saddr,
2897 skb->nh.iph->daddr,
2899 IPPROTO_TCP,
2901 cmd_length = E1000_TXD_CMD_IP;
2902 ipcse = skb->h.raw - skb->data - 1;
2903 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2904 skb->nh.ipv6h->payload_len = 0;
2905 skb->h.th->check =
2906 ~csum_ipv6_magic(&skb->nh.ipv6h->saddr,
2907 &skb->nh.ipv6h->daddr,
2909 IPPROTO_TCP,
2911 ipcse = 0;
2913 ipcss = skb->nh.raw - skb->data;
2914 ipcso = (void *)&(skb->nh.iph->check) - (void *)skb->data;
2915 tucss = skb->h.raw - skb->data;
2916 tucso = (void *)&(skb->h.th->check) - (void *)skb->data;
2917 tucse = 0;
2919 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
2920 E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
2922 i = tx_ring->next_to_use;
2923 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2924 buffer_info = &tx_ring->buffer_info[i];
2926 context_desc->lower_setup.ip_fields.ipcss = ipcss;
2927 context_desc->lower_setup.ip_fields.ipcso = ipcso;
2928 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
2929 context_desc->upper_setup.tcp_fields.tucss = tucss;
2930 context_desc->upper_setup.tcp_fields.tucso = tucso;
2931 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
2932 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
2933 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
2934 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
2936 buffer_info->time_stamp = jiffies;
2937 buffer_info->next_to_watch = i;
2939 if (++i == tx_ring->count) i = 0;
2940 tx_ring->next_to_use = i;
2942 return TRUE;
2944 return FALSE;
2947 static boolean_t
2948 e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2949 struct sk_buff *skb)
2951 struct e1000_context_desc *context_desc;
2952 struct e1000_buffer *buffer_info;
2953 unsigned int i;
2954 uint8_t css;
2956 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
2957 css = skb->h.raw - skb->data;
2959 i = tx_ring->next_to_use;
2960 buffer_info = &tx_ring->buffer_info[i];
2961 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2963 context_desc->lower_setup.ip_config = 0;
2964 context_desc->upper_setup.tcp_fields.tucss = css;
2965 context_desc->upper_setup.tcp_fields.tucso = css + skb->csum;
2966 context_desc->upper_setup.tcp_fields.tucse = 0;
2967 context_desc->tcp_seg_setup.data = 0;
2968 context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT);
2970 buffer_info->time_stamp = jiffies;
2971 buffer_info->next_to_watch = i;
2973 if (unlikely(++i == tx_ring->count)) i = 0;
2974 tx_ring->next_to_use = i;
2976 return TRUE;
2979 return FALSE;
2982 #define E1000_MAX_TXD_PWR 12
2983 #define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR)
2985 static int
2986 e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2987 struct sk_buff *skb, unsigned int first, unsigned int max_per_txd,
2988 unsigned int nr_frags, unsigned int mss)
2990 struct e1000_buffer *buffer_info;
2991 unsigned int len = skb->len;
2992 unsigned int offset = 0, size, count = 0, i;
2993 unsigned int f;
2994 len -= skb->data_len;
2996 i = tx_ring->next_to_use;
2998 while (len) {
2999 buffer_info = &tx_ring->buffer_info[i];
3000 size = min(len, max_per_txd);
3001 /* Workaround for Controller erratum --
3002 * descriptor for non-tso packet in a linear SKB that follows a
3003 * tso gets written back prematurely before the data is fully
3004 * DMA'd to the controller */
3005 if (!skb->data_len && tx_ring->last_tx_tso &&
3006 !skb_is_gso(skb)) {
3007 tx_ring->last_tx_tso = 0;
3008 size -= 4;
3011 /* Workaround for premature desc write-backs
3012 * in TSO mode. Append 4-byte sentinel desc */
3013 if (unlikely(mss && !nr_frags && size == len && size > 8))
3014 size -= 4;
3015 /* work-around for errata 10 and it applies
3016 * to all controllers in PCI-X mode
3017 * The fix is to make sure that the first descriptor of a
3018 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
3020 if (unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) &&
3021 (size > 2015) && count == 0))
3022 size = 2015;
3024 /* Workaround for potential 82544 hang in PCI-X. Avoid
3025 * terminating buffers within evenly-aligned dwords. */
3026 if (unlikely(adapter->pcix_82544 &&
3027 !((unsigned long)(skb->data + offset + size - 1) & 4) &&
3028 size > 4))
3029 size -= 4;
3031 buffer_info->length = size;
3032 buffer_info->dma =
3033 pci_map_single(adapter->pdev,
3034 skb->data + offset,
3035 size,
3036 PCI_DMA_TODEVICE);
3037 buffer_info->time_stamp = jiffies;
3038 buffer_info->next_to_watch = i;
3040 len -= size;
3041 offset += size;
3042 count++;
3043 if (unlikely(++i == tx_ring->count)) i = 0;
3046 for (f = 0; f < nr_frags; f++) {
3047 struct skb_frag_struct *frag;
3049 frag = &skb_shinfo(skb)->frags[f];
3050 len = frag->size;
3051 offset = frag->page_offset;
3053 while (len) {
3054 buffer_info = &tx_ring->buffer_info[i];
3055 size = min(len, max_per_txd);
3056 /* Workaround for premature desc write-backs
3057 * in TSO mode. Append 4-byte sentinel desc */
3058 if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8))
3059 size -= 4;
3060 /* Workaround for potential 82544 hang in PCI-X.
3061 * Avoid terminating buffers within evenly-aligned
3062 * dwords. */
3063 if (unlikely(adapter->pcix_82544 &&
3064 !((unsigned long)(frag->page+offset+size-1) & 4) &&
3065 size > 4))
3066 size -= 4;
3068 buffer_info->length = size;
3069 buffer_info->dma =
3070 pci_map_page(adapter->pdev,
3071 frag->page,
3072 offset,
3073 size,
3074 PCI_DMA_TODEVICE);
3075 buffer_info->time_stamp = jiffies;
3076 buffer_info->next_to_watch = i;
3078 len -= size;
3079 offset += size;
3080 count++;
3081 if (unlikely(++i == tx_ring->count)) i = 0;
3085 i = (i == 0) ? tx_ring->count - 1 : i - 1;
3086 tx_ring->buffer_info[i].skb = skb;
3087 tx_ring->buffer_info[first].next_to_watch = i;
3089 return count;
3092 static void
3093 e1000_tx_queue(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
3094 int tx_flags, int count)
3096 struct e1000_tx_desc *tx_desc = NULL;
3097 struct e1000_buffer *buffer_info;
3098 uint32_t txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
3099 unsigned int i;
3101 if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
3102 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
3103 E1000_TXD_CMD_TSE;
3104 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3106 if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
3107 txd_upper |= E1000_TXD_POPTS_IXSM << 8;
3110 if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
3111 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
3112 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3115 if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
3116 txd_lower |= E1000_TXD_CMD_VLE;
3117 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
3120 i = tx_ring->next_to_use;
3122 while (count--) {
3123 buffer_info = &tx_ring->buffer_info[i];
3124 tx_desc = E1000_TX_DESC(*tx_ring, i);
3125 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
3126 tx_desc->lower.data =
3127 cpu_to_le32(txd_lower | buffer_info->length);
3128 tx_desc->upper.data = cpu_to_le32(txd_upper);
3129 if (unlikely(++i == tx_ring->count)) i = 0;
3132 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
3134 /* Force memory writes to complete before letting h/w
3135 * know there are new descriptors to fetch. (Only
3136 * applicable for weak-ordered memory model archs,
3137 * such as IA-64). */
3138 wmb();
3140 tx_ring->next_to_use = i;
3141 writel(i, adapter->hw.hw_addr + tx_ring->tdt);
3142 /* we need this if more than one processor can write to our tail
3143 * at a time, it syncronizes IO on IA64/Altix systems */
3144 mmiowb();
3148 * 82547 workaround to avoid controller hang in half-duplex environment.
3149 * The workaround is to avoid queuing a large packet that would span
3150 * the internal Tx FIFO ring boundary by notifying the stack to resend
3151 * the packet at a later time. This gives the Tx FIFO an opportunity to
3152 * flush all packets. When that occurs, we reset the Tx FIFO pointers
3153 * to the beginning of the Tx FIFO.
3156 #define E1000_FIFO_HDR 0x10
3157 #define E1000_82547_PAD_LEN 0x3E0
3159 static int
3160 e1000_82547_fifo_workaround(struct e1000_adapter *adapter, struct sk_buff *skb)
3162 uint32_t fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
3163 uint32_t skb_fifo_len = skb->len + E1000_FIFO_HDR;
3165 E1000_ROUNDUP(skb_fifo_len, E1000_FIFO_HDR);
3167 if (adapter->link_duplex != HALF_DUPLEX)
3168 goto no_fifo_stall_required;
3170 if (atomic_read(&adapter->tx_fifo_stall))
3171 return 1;
3173 if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
3174 atomic_set(&adapter->tx_fifo_stall, 1);
3175 return 1;
3178 no_fifo_stall_required:
3179 adapter->tx_fifo_head += skb_fifo_len;
3180 if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
3181 adapter->tx_fifo_head -= adapter->tx_fifo_size;
3182 return 0;
3185 #define MINIMUM_DHCP_PACKET_SIZE 282
3186 static int
3187 e1000_transfer_dhcp_info(struct e1000_adapter *adapter, struct sk_buff *skb)
3189 struct e1000_hw *hw = &adapter->hw;
3190 uint16_t length, offset;
3191 if (vlan_tx_tag_present(skb)) {
3192 if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
3193 ( adapter->hw.mng_cookie.status &
3194 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) )
3195 return 0;
3197 if (skb->len > MINIMUM_DHCP_PACKET_SIZE) {
3198 struct ethhdr *eth = (struct ethhdr *) skb->data;
3199 if ((htons(ETH_P_IP) == eth->h_proto)) {
3200 const struct iphdr *ip =
3201 (struct iphdr *)((uint8_t *)skb->data+14);
3202 if (IPPROTO_UDP == ip->protocol) {
3203 struct udphdr *udp =
3204 (struct udphdr *)((uint8_t *)ip +
3205 (ip->ihl << 2));
3206 if (ntohs(udp->dest) == 67) {
3207 offset = (uint8_t *)udp + 8 - skb->data;
3208 length = skb->len - offset;
3210 return e1000_mng_write_dhcp_info(hw,
3211 (uint8_t *)udp + 8,
3212 length);
3217 return 0;
3220 static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
3222 struct e1000_adapter *adapter = netdev_priv(netdev);
3223 struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3225 netif_stop_queue(netdev);
3226 /* Herbert's original patch had:
3227 * smp_mb__after_netif_stop_queue();
3228 * but since that doesn't exist yet, just open code it. */
3229 smp_mb();
3231 /* We need to check again in a case another CPU has just
3232 * made room available. */
3233 if (likely(E1000_DESC_UNUSED(tx_ring) < size))
3234 return -EBUSY;
3236 /* A reprieve! */
3237 netif_start_queue(netdev);
3238 ++adapter->restart_queue;
3239 return 0;
3242 static int e1000_maybe_stop_tx(struct net_device *netdev,
3243 struct e1000_tx_ring *tx_ring, int size)
3245 if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
3246 return 0;
3247 return __e1000_maybe_stop_tx(netdev, size);
3250 #define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
3251 static int
3252 e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3254 struct e1000_adapter *adapter = netdev_priv(netdev);
3255 struct e1000_tx_ring *tx_ring;
3256 unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
3257 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
3258 unsigned int tx_flags = 0;
3259 unsigned int len = skb->len;
3260 unsigned long flags;
3261 unsigned int nr_frags = 0;
3262 unsigned int mss = 0;
3263 int count = 0;
3264 int tso;
3265 unsigned int f;
3266 len -= skb->data_len;
3268 /* This goes back to the question of how to logically map a tx queue
3269 * to a flow. Right now, performance is impacted slightly negatively
3270 * if using multiple tx queues. If the stack breaks away from a
3271 * single qdisc implementation, we can look at this again. */
3272 tx_ring = adapter->tx_ring;
3274 if (unlikely(skb->len <= 0)) {
3275 dev_kfree_skb_any(skb);
3276 return NETDEV_TX_OK;
3279 /* 82571 and newer doesn't need the workaround that limited descriptor
3280 * length to 4kB */
3281 if (adapter->hw.mac_type >= e1000_82571)
3282 max_per_txd = 8192;
3284 mss = skb_shinfo(skb)->gso_size;
3285 /* The controller does a simple calculation to
3286 * make sure there is enough room in the FIFO before
3287 * initiating the DMA for each buffer. The calc is:
3288 * 4 = ceil(buffer len/mss). To make sure we don't
3289 * overrun the FIFO, adjust the max buffer len if mss
3290 * drops. */
3291 if (mss) {
3292 uint8_t hdr_len;
3293 max_per_txd = min(mss << 2, max_per_txd);
3294 max_txd_pwr = fls(max_per_txd) - 1;
3296 /* TSO Workaround for 82571/2/3 Controllers -- if skb->data
3297 * points to just header, pull a few bytes of payload from
3298 * frags into skb->data */
3299 hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
3300 if (skb->data_len && (hdr_len == (skb->len - skb->data_len))) {
3301 switch (adapter->hw.mac_type) {
3302 unsigned int pull_size;
3303 case e1000_82544:
3304 /* Make sure we have room to chop off 4 bytes,
3305 * and that the end alignment will work out to
3306 * this hardware's requirements
3307 * NOTE: this is a TSO only workaround
3308 * if end byte alignment not correct move us
3309 * into the next dword */
3310 if ((unsigned long)(skb->tail - 1) & 4)
3311 break;
3312 /* fall through */
3313 case e1000_82571:
3314 case e1000_82572:
3315 case e1000_82573:
3316 case e1000_ich8lan:
3317 pull_size = min((unsigned int)4, skb->data_len);
3318 if (!__pskb_pull_tail(skb, pull_size)) {
3319 DPRINTK(DRV, ERR,
3320 "__pskb_pull_tail failed.\n");
3321 dev_kfree_skb_any(skb);
3322 return NETDEV_TX_OK;
3324 len = skb->len - skb->data_len;
3325 break;
3326 default:
3327 /* do nothing */
3328 break;
3333 /* reserve a descriptor for the offload context */
3334 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
3335 count++;
3336 count++;
3338 /* Controller Erratum workaround */
3339 if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
3340 count++;
3342 count += TXD_USE_COUNT(len, max_txd_pwr);
3344 if (adapter->pcix_82544)
3345 count++;
3347 /* work-around for errata 10 and it applies to all controllers
3348 * in PCI-X mode, so add one more descriptor to the count
3350 if (unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) &&
3351 (len > 2015)))
3352 count++;
3354 nr_frags = skb_shinfo(skb)->nr_frags;
3355 for (f = 0; f < nr_frags; f++)
3356 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size,
3357 max_txd_pwr);
3358 if (adapter->pcix_82544)
3359 count += nr_frags;
3362 if (adapter->hw.tx_pkt_filtering &&
3363 (adapter->hw.mac_type == e1000_82573))
3364 e1000_transfer_dhcp_info(adapter, skb);
3366 local_irq_save(flags);
3367 if (!spin_trylock(&tx_ring->tx_lock)) {
3368 /* Collision - tell upper layer to requeue */
3369 local_irq_restore(flags);
3370 return NETDEV_TX_LOCKED;
3373 /* need: count + 2 desc gap to keep tail from touching
3374 * head, otherwise try next time */
3375 if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2))) {
3376 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
3377 return NETDEV_TX_BUSY;
3380 if (unlikely(adapter->hw.mac_type == e1000_82547)) {
3381 if (unlikely(e1000_82547_fifo_workaround(adapter, skb))) {
3382 netif_stop_queue(netdev);
3383 mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1);
3384 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
3385 return NETDEV_TX_BUSY;
3389 if (unlikely(adapter->vlgrp && vlan_tx_tag_present(skb))) {
3390 tx_flags |= E1000_TX_FLAGS_VLAN;
3391 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
3394 first = tx_ring->next_to_use;
3396 tso = e1000_tso(adapter, tx_ring, skb);
3397 if (tso < 0) {
3398 dev_kfree_skb_any(skb);
3399 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
3400 return NETDEV_TX_OK;
3403 if (likely(tso)) {
3404 tx_ring->last_tx_tso = 1;
3405 tx_flags |= E1000_TX_FLAGS_TSO;
3406 } else if (likely(e1000_tx_csum(adapter, tx_ring, skb)))
3407 tx_flags |= E1000_TX_FLAGS_CSUM;
3409 /* Old method was to assume IPv4 packet by default if TSO was enabled.
3410 * 82571 hardware supports TSO capabilities for IPv6 as well...
3411 * no longer assume, we must. */
3412 if (likely(skb->protocol == htons(ETH_P_IP)))
3413 tx_flags |= E1000_TX_FLAGS_IPV4;
3415 e1000_tx_queue(adapter, tx_ring, tx_flags,
3416 e1000_tx_map(adapter, tx_ring, skb, first,
3417 max_per_txd, nr_frags, mss));
3419 netdev->trans_start = jiffies;
3421 /* Make sure there is space in the ring for the next send. */
3422 e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
3424 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
3425 return NETDEV_TX_OK;
3429 * e1000_tx_timeout - Respond to a Tx Hang
3430 * @netdev: network interface device structure
3433 static void
3434 e1000_tx_timeout(struct net_device *netdev)
3436 struct e1000_adapter *adapter = netdev_priv(netdev);
3438 /* Do the reset outside of interrupt context */
3439 adapter->tx_timeout_count++;
3440 schedule_work(&adapter->reset_task);
3443 static void
3444 e1000_reset_task(struct work_struct *work)
3446 struct e1000_adapter *adapter =
3447 container_of(work, struct e1000_adapter, reset_task);
3449 e1000_reinit_locked(adapter);
3453 * e1000_get_stats - Get System Network Statistics
3454 * @netdev: network interface device structure
3456 * Returns the address of the device statistics structure.
3457 * The statistics are actually updated from the timer callback.
3460 static struct net_device_stats *
3461 e1000_get_stats(struct net_device *netdev)
3463 struct e1000_adapter *adapter = netdev_priv(netdev);
3465 /* only return the current stats */
3466 return &adapter->net_stats;
3470 * e1000_change_mtu - Change the Maximum Transfer Unit
3471 * @netdev: network interface device structure
3472 * @new_mtu: new value for maximum frame size
3474 * Returns 0 on success, negative on failure
3477 static int
3478 e1000_change_mtu(struct net_device *netdev, int new_mtu)
3480 struct e1000_adapter *adapter = netdev_priv(netdev);
3481 int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
3482 uint16_t eeprom_data = 0;
3484 if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
3485 (max_frame > MAX_JUMBO_FRAME_SIZE)) {
3486 DPRINTK(PROBE, ERR, "Invalid MTU setting\n");
3487 return -EINVAL;
3490 /* Adapter-specific max frame size limits. */
3491 switch (adapter->hw.mac_type) {
3492 case e1000_undefined ... e1000_82542_rev2_1:
3493 case e1000_ich8lan:
3494 if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) {
3495 DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n");
3496 return -EINVAL;
3498 break;
3499 case e1000_82573:
3500 /* Jumbo Frames not supported if:
3501 * - this is not an 82573L device
3502 * - ASPM is enabled in any way (0x1A bits 3:2) */
3503 e1000_read_eeprom(&adapter->hw, EEPROM_INIT_3GIO_3, 1,
3504 &eeprom_data);
3505 if ((adapter->hw.device_id != E1000_DEV_ID_82573L) ||
3506 (eeprom_data & EEPROM_WORD1A_ASPM_MASK)) {
3507 if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) {
3508 DPRINTK(PROBE, ERR,
3509 "Jumbo Frames not supported.\n");
3510 return -EINVAL;
3512 break;
3514 /* ERT will be enabled later to enable wire speed receives */
3516 /* fall through to get support */
3517 case e1000_82571:
3518 case e1000_82572:
3519 case e1000_80003es2lan:
3520 #define MAX_STD_JUMBO_FRAME_SIZE 9234
3521 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
3522 DPRINTK(PROBE, ERR, "MTU > 9216 not supported.\n");
3523 return -EINVAL;
3525 break;
3526 default:
3527 /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
3528 break;
3531 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3532 * means we reserve 2 more, this pushes us to allocate from the next
3533 * larger slab size
3534 * i.e. RXBUFFER_2048 --> size-4096 slab */
3536 if (max_frame <= E1000_RXBUFFER_256)
3537 adapter->rx_buffer_len = E1000_RXBUFFER_256;
3538 else if (max_frame <= E1000_RXBUFFER_512)
3539 adapter->rx_buffer_len = E1000_RXBUFFER_512;
3540 else if (max_frame <= E1000_RXBUFFER_1024)
3541 adapter->rx_buffer_len = E1000_RXBUFFER_1024;
3542 else if (max_frame <= E1000_RXBUFFER_2048)
3543 adapter->rx_buffer_len = E1000_RXBUFFER_2048;
3544 else if (max_frame <= E1000_RXBUFFER_4096)
3545 adapter->rx_buffer_len = E1000_RXBUFFER_4096;
3546 else if (max_frame <= E1000_RXBUFFER_8192)
3547 adapter->rx_buffer_len = E1000_RXBUFFER_8192;
3548 else if (max_frame <= E1000_RXBUFFER_16384)
3549 adapter->rx_buffer_len = E1000_RXBUFFER_16384;
3551 /* adjust allocation if LPE protects us, and we aren't using SBP */
3552 if (!adapter->hw.tbi_compatibility_on &&
3553 ((max_frame == MAXIMUM_ETHERNET_FRAME_SIZE) ||
3554 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
3555 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
3557 netdev->mtu = new_mtu;
3558 adapter->hw.max_frame_size = max_frame;
3560 if (netif_running(netdev))
3561 e1000_reinit_locked(adapter);
3563 return 0;
3567 * e1000_update_stats - Update the board statistics counters
3568 * @adapter: board private structure
3571 void
3572 e1000_update_stats(struct e1000_adapter *adapter)
3574 struct e1000_hw *hw = &adapter->hw;
3575 struct pci_dev *pdev = adapter->pdev;
3576 unsigned long flags;
3577 uint16_t phy_tmp;
3579 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3582 * Prevent stats update while adapter is being reset, or if the pci
3583 * connection is down.
3585 if (adapter->link_speed == 0)
3586 return;
3587 if (pci_channel_offline(pdev))
3588 return;
3590 spin_lock_irqsave(&adapter->stats_lock, flags);
3592 /* these counters are modified from e1000_adjust_tbi_stats,
3593 * called from the interrupt context, so they must only
3594 * be written while holding adapter->stats_lock
3597 adapter->stats.crcerrs += E1000_READ_REG(hw, CRCERRS);
3598 adapter->stats.gprc += E1000_READ_REG(hw, GPRC);
3599 adapter->stats.gorcl += E1000_READ_REG(hw, GORCL);
3600 adapter->stats.gorch += E1000_READ_REG(hw, GORCH);
3601 adapter->stats.bprc += E1000_READ_REG(hw, BPRC);
3602 adapter->stats.mprc += E1000_READ_REG(hw, MPRC);
3603 adapter->stats.roc += E1000_READ_REG(hw, ROC);
3605 if (adapter->hw.mac_type != e1000_ich8lan) {
3606 adapter->stats.prc64 += E1000_READ_REG(hw, PRC64);
3607 adapter->stats.prc127 += E1000_READ_REG(hw, PRC127);
3608 adapter->stats.prc255 += E1000_READ_REG(hw, PRC255);
3609 adapter->stats.prc511 += E1000_READ_REG(hw, PRC511);
3610 adapter->stats.prc1023 += E1000_READ_REG(hw, PRC1023);
3611 adapter->stats.prc1522 += E1000_READ_REG(hw, PRC1522);
3614 adapter->stats.symerrs += E1000_READ_REG(hw, SYMERRS);
3615 adapter->stats.mpc += E1000_READ_REG(hw, MPC);
3616 adapter->stats.scc += E1000_READ_REG(hw, SCC);
3617 adapter->stats.ecol += E1000_READ_REG(hw, ECOL);
3618 adapter->stats.mcc += E1000_READ_REG(hw, MCC);
3619 adapter->stats.latecol += E1000_READ_REG(hw, LATECOL);
3620 adapter->stats.dc += E1000_READ_REG(hw, DC);
3621 adapter->stats.sec += E1000_READ_REG(hw, SEC);
3622 adapter->stats.rlec += E1000_READ_REG(hw, RLEC);
3623 adapter->stats.xonrxc += E1000_READ_REG(hw, XONRXC);
3624 adapter->stats.xontxc += E1000_READ_REG(hw, XONTXC);
3625 adapter->stats.xoffrxc += E1000_READ_REG(hw, XOFFRXC);
3626 adapter->stats.xofftxc += E1000_READ_REG(hw, XOFFTXC);
3627 adapter->stats.fcruc += E1000_READ_REG(hw, FCRUC);
3628 adapter->stats.gptc += E1000_READ_REG(hw, GPTC);
3629 adapter->stats.gotcl += E1000_READ_REG(hw, GOTCL);
3630 adapter->stats.gotch += E1000_READ_REG(hw, GOTCH);
3631 adapter->stats.rnbc += E1000_READ_REG(hw, RNBC);
3632 adapter->stats.ruc += E1000_READ_REG(hw, RUC);
3633 adapter->stats.rfc += E1000_READ_REG(hw, RFC);
3634 adapter->stats.rjc += E1000_READ_REG(hw, RJC);
3635 adapter->stats.torl += E1000_READ_REG(hw, TORL);
3636 adapter->stats.torh += E1000_READ_REG(hw, TORH);
3637 adapter->stats.totl += E1000_READ_REG(hw, TOTL);
3638 adapter->stats.toth += E1000_READ_REG(hw, TOTH);
3639 adapter->stats.tpr += E1000_READ_REG(hw, TPR);
3641 if (adapter->hw.mac_type != e1000_ich8lan) {
3642 adapter->stats.ptc64 += E1000_READ_REG(hw, PTC64);
3643 adapter->stats.ptc127 += E1000_READ_REG(hw, PTC127);
3644 adapter->stats.ptc255 += E1000_READ_REG(hw, PTC255);
3645 adapter->stats.ptc511 += E1000_READ_REG(hw, PTC511);
3646 adapter->stats.ptc1023 += E1000_READ_REG(hw, PTC1023);
3647 adapter->stats.ptc1522 += E1000_READ_REG(hw, PTC1522);
3650 adapter->stats.mptc += E1000_READ_REG(hw, MPTC);
3651 adapter->stats.bptc += E1000_READ_REG(hw, BPTC);
3653 /* used for adaptive IFS */
3655 hw->tx_packet_delta = E1000_READ_REG(hw, TPT);
3656 adapter->stats.tpt += hw->tx_packet_delta;
3657 hw->collision_delta = E1000_READ_REG(hw, COLC);
3658 adapter->stats.colc += hw->collision_delta;
3660 if (hw->mac_type >= e1000_82543) {
3661 adapter->stats.algnerrc += E1000_READ_REG(hw, ALGNERRC);
3662 adapter->stats.rxerrc += E1000_READ_REG(hw, RXERRC);
3663 adapter->stats.tncrs += E1000_READ_REG(hw, TNCRS);
3664 adapter->stats.cexterr += E1000_READ_REG(hw, CEXTERR);
3665 adapter->stats.tsctc += E1000_READ_REG(hw, TSCTC);
3666 adapter->stats.tsctfc += E1000_READ_REG(hw, TSCTFC);
3668 if (hw->mac_type > e1000_82547_rev_2) {
3669 adapter->stats.iac += E1000_READ_REG(hw, IAC);
3670 adapter->stats.icrxoc += E1000_READ_REG(hw, ICRXOC);
3672 if (adapter->hw.mac_type != e1000_ich8lan) {
3673 adapter->stats.icrxptc += E1000_READ_REG(hw, ICRXPTC);
3674 adapter->stats.icrxatc += E1000_READ_REG(hw, ICRXATC);
3675 adapter->stats.ictxptc += E1000_READ_REG(hw, ICTXPTC);
3676 adapter->stats.ictxatc += E1000_READ_REG(hw, ICTXATC);
3677 adapter->stats.ictxqec += E1000_READ_REG(hw, ICTXQEC);
3678 adapter->stats.ictxqmtc += E1000_READ_REG(hw, ICTXQMTC);
3679 adapter->stats.icrxdmtc += E1000_READ_REG(hw, ICRXDMTC);
3683 /* Fill out the OS statistics structure */
3684 adapter->net_stats.rx_packets = adapter->stats.gprc;
3685 adapter->net_stats.tx_packets = adapter->stats.gptc;
3686 adapter->net_stats.rx_bytes = adapter->stats.gorcl;
3687 adapter->net_stats.tx_bytes = adapter->stats.gotcl;
3688 adapter->net_stats.multicast = adapter->stats.mprc;
3689 adapter->net_stats.collisions = adapter->stats.colc;
3691 /* Rx Errors */
3693 /* RLEC on some newer hardware can be incorrect so build
3694 * our own version based on RUC and ROC */
3695 adapter->net_stats.rx_errors = adapter->stats.rxerrc +
3696 adapter->stats.crcerrs + adapter->stats.algnerrc +
3697 adapter->stats.ruc + adapter->stats.roc +
3698 adapter->stats.cexterr;
3699 adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc;
3700 adapter->net_stats.rx_length_errors = adapter->stats.rlerrc;
3701 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
3702 adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc;
3703 adapter->net_stats.rx_missed_errors = adapter->stats.mpc;
3705 /* Tx Errors */
3706 adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol;
3707 adapter->net_stats.tx_errors = adapter->stats.txerrc;
3708 adapter->net_stats.tx_aborted_errors = adapter->stats.ecol;
3709 adapter->net_stats.tx_window_errors = adapter->stats.latecol;
3710 adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs;
3711 if (adapter->hw.bad_tx_carr_stats_fd &&
3712 adapter->link_duplex == FULL_DUPLEX) {
3713 adapter->net_stats.tx_carrier_errors = 0;
3714 adapter->stats.tncrs = 0;
3717 /* Tx Dropped needs to be maintained elsewhere */
3719 /* Phy Stats */
3720 if (hw->media_type == e1000_media_type_copper) {
3721 if ((adapter->link_speed == SPEED_1000) &&
3722 (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
3723 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3724 adapter->phy_stats.idle_errors += phy_tmp;
3727 if ((hw->mac_type <= e1000_82546) &&
3728 (hw->phy_type == e1000_phy_m88) &&
3729 !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
3730 adapter->phy_stats.receive_errors += phy_tmp;
3733 /* Management Stats */
3734 if (adapter->hw.has_smbus) {
3735 adapter->stats.mgptc += E1000_READ_REG(hw, MGTPTC);
3736 adapter->stats.mgprc += E1000_READ_REG(hw, MGTPRC);
3737 adapter->stats.mgpdc += E1000_READ_REG(hw, MGTPDC);
3740 spin_unlock_irqrestore(&adapter->stats_lock, flags);
3742 #ifdef CONFIG_PCI_MSI
3745 * e1000_intr_msi - Interrupt Handler
3746 * @irq: interrupt number
3747 * @data: pointer to a network interface device structure
3750 static irqreturn_t
3751 e1000_intr_msi(int irq, void *data)
3753 struct net_device *netdev = data;
3754 struct e1000_adapter *adapter = netdev_priv(netdev);
3755 struct e1000_hw *hw = &adapter->hw;
3756 #ifndef CONFIG_E1000_NAPI
3757 int i;
3758 #endif
3759 uint32_t icr = E1000_READ_REG(hw, ICR);
3761 #ifdef CONFIG_E1000_NAPI
3762 /* read ICR disables interrupts using IAM, so keep up with our
3763 * enable/disable accounting */
3764 atomic_inc(&adapter->irq_sem);
3765 #endif
3766 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
3767 hw->get_link_status = 1;
3768 /* 80003ES2LAN workaround-- For packet buffer work-around on
3769 * link down event; disable receives here in the ISR and reset
3770 * adapter in watchdog */
3771 if (netif_carrier_ok(netdev) &&
3772 (adapter->hw.mac_type == e1000_80003es2lan)) {
3773 /* disable receives */
3774 uint32_t rctl = E1000_READ_REG(hw, RCTL);
3775 E1000_WRITE_REG(hw, RCTL, rctl & ~E1000_RCTL_EN);
3777 /* guard against interrupt when we're going down */
3778 if (!test_bit(__E1000_DOWN, &adapter->flags))
3779 mod_timer(&adapter->watchdog_timer, jiffies + 1);
3782 #ifdef CONFIG_E1000_NAPI
3783 if (likely(netif_rx_schedule_prep(netdev))) {
3784 adapter->total_tx_bytes = 0;
3785 adapter->total_tx_packets = 0;
3786 adapter->total_rx_bytes = 0;
3787 adapter->total_rx_packets = 0;
3788 __netif_rx_schedule(netdev);
3789 } else
3790 e1000_irq_enable(adapter);
3791 #else
3792 adapter->total_tx_bytes = 0;
3793 adapter->total_rx_bytes = 0;
3794 adapter->total_tx_packets = 0;
3795 adapter->total_rx_packets = 0;
3797 for (i = 0; i < E1000_MAX_INTR; i++)
3798 if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &
3799 e1000_clean_tx_irq(adapter, adapter->tx_ring)))
3800 break;
3802 if (likely(adapter->itr_setting & 3))
3803 e1000_set_itr(adapter);
3804 #endif
3806 return IRQ_HANDLED;
3808 #endif
3811 * e1000_intr - Interrupt Handler
3812 * @irq: interrupt number
3813 * @data: pointer to a network interface device structure
3816 static irqreturn_t
3817 e1000_intr(int irq, void *data)
3819 struct net_device *netdev = data;
3820 struct e1000_adapter *adapter = netdev_priv(netdev);
3821 struct e1000_hw *hw = &adapter->hw;
3822 uint32_t rctl, icr = E1000_READ_REG(hw, ICR);
3823 #ifndef CONFIG_E1000_NAPI
3824 int i;
3825 #endif
3826 if (unlikely(!icr))
3827 return IRQ_NONE; /* Not our interrupt */
3829 #ifdef CONFIG_E1000_NAPI
3830 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
3831 * not set, then the adapter didn't send an interrupt */
3832 if (unlikely(hw->mac_type >= e1000_82571 &&
3833 !(icr & E1000_ICR_INT_ASSERTED)))
3834 return IRQ_NONE;
3836 /* Interrupt Auto-Mask...upon reading ICR,
3837 * interrupts are masked. No need for the
3838 * IMC write, but it does mean we should
3839 * account for it ASAP. */
3840 if (likely(hw->mac_type >= e1000_82571))
3841 atomic_inc(&adapter->irq_sem);
3842 #endif
3844 if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
3845 hw->get_link_status = 1;
3846 /* 80003ES2LAN workaround--
3847 * For packet buffer work-around on link down event;
3848 * disable receives here in the ISR and
3849 * reset adapter in watchdog
3851 if (netif_carrier_ok(netdev) &&
3852 (adapter->hw.mac_type == e1000_80003es2lan)) {
3853 /* disable receives */
3854 rctl = E1000_READ_REG(hw, RCTL);
3855 E1000_WRITE_REG(hw, RCTL, rctl & ~E1000_RCTL_EN);
3857 /* guard against interrupt when we're going down */
3858 if (!test_bit(__E1000_DOWN, &adapter->flags))
3859 mod_timer(&adapter->watchdog_timer, jiffies + 1);
3862 #ifdef CONFIG_E1000_NAPI
3863 if (unlikely(hw->mac_type < e1000_82571)) {
3864 /* disable interrupts, without the synchronize_irq bit */
3865 atomic_inc(&adapter->irq_sem);
3866 E1000_WRITE_REG(hw, IMC, ~0);
3867 E1000_WRITE_FLUSH(hw);
3869 if (likely(netif_rx_schedule_prep(netdev))) {
3870 adapter->total_tx_bytes = 0;
3871 adapter->total_tx_packets = 0;
3872 adapter->total_rx_bytes = 0;
3873 adapter->total_rx_packets = 0;
3874 __netif_rx_schedule(netdev);
3875 } else
3876 /* this really should not happen! if it does it is basically a
3877 * bug, but not a hard error, so enable ints and continue */
3878 e1000_irq_enable(adapter);
3879 #else
3880 /* Writing IMC and IMS is needed for 82547.
3881 * Due to Hub Link bus being occupied, an interrupt
3882 * de-assertion message is not able to be sent.
3883 * When an interrupt assertion message is generated later,
3884 * two messages are re-ordered and sent out.
3885 * That causes APIC to think 82547 is in de-assertion
3886 * state, while 82547 is in assertion state, resulting
3887 * in dead lock. Writing IMC forces 82547 into
3888 * de-assertion state.
3890 if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2) {
3891 atomic_inc(&adapter->irq_sem);
3892 E1000_WRITE_REG(hw, IMC, ~0);
3895 adapter->total_tx_bytes = 0;
3896 adapter->total_rx_bytes = 0;
3897 adapter->total_tx_packets = 0;
3898 adapter->total_rx_packets = 0;
3900 for (i = 0; i < E1000_MAX_INTR; i++)
3901 if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &
3902 e1000_clean_tx_irq(adapter, adapter->tx_ring)))
3903 break;
3905 if (likely(adapter->itr_setting & 3))
3906 e1000_set_itr(adapter);
3908 if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2)
3909 e1000_irq_enable(adapter);
3911 #endif
3912 return IRQ_HANDLED;
3915 #ifdef CONFIG_E1000_NAPI
3917 * e1000_clean - NAPI Rx polling callback
3918 * @adapter: board private structure
3921 static int
3922 e1000_clean(struct net_device *poll_dev, int *budget)
3924 struct e1000_adapter *adapter;
3925 int work_to_do = min(*budget, poll_dev->quota);
3926 int tx_cleaned = 0, work_done = 0;
3928 /* Must NOT use netdev_priv macro here. */
3929 adapter = poll_dev->priv;
3931 /* Keep link state information with original netdev */
3932 if (!netif_carrier_ok(poll_dev))
3933 goto quit_polling;
3935 /* e1000_clean is called per-cpu. This lock protects
3936 * tx_ring[0] from being cleaned by multiple cpus
3937 * simultaneously. A failure obtaining the lock means
3938 * tx_ring[0] is currently being cleaned anyway. */
3939 if (spin_trylock(&adapter->tx_queue_lock)) {
3940 tx_cleaned = e1000_clean_tx_irq(adapter,
3941 &adapter->tx_ring[0]);
3942 spin_unlock(&adapter->tx_queue_lock);
3945 adapter->clean_rx(adapter, &adapter->rx_ring[0],
3946 &work_done, work_to_do);
3948 *budget -= work_done;
3949 poll_dev->quota -= work_done;
3951 /* If no Tx and not enough Rx work done, exit the polling mode */
3952 if ((tx_cleaned && (work_done < work_to_do)) ||
3953 !netif_running(poll_dev)) {
3954 quit_polling:
3955 if (likely(adapter->itr_setting & 3))
3956 e1000_set_itr(adapter);
3957 netif_rx_complete(poll_dev);
3958 e1000_irq_enable(adapter);
3959 return 0;
3962 return 1;
3965 #endif
3967 * e1000_clean_tx_irq - Reclaim resources after transmit completes
3968 * @adapter: board private structure
3971 static boolean_t
3972 e1000_clean_tx_irq(struct e1000_adapter *adapter,
3973 struct e1000_tx_ring *tx_ring)
3975 struct net_device *netdev = adapter->netdev;
3976 struct e1000_tx_desc *tx_desc, *eop_desc;
3977 struct e1000_buffer *buffer_info;
3978 unsigned int i, eop;
3979 #ifdef CONFIG_E1000_NAPI
3980 unsigned int count = 0;
3981 #endif
3982 boolean_t cleaned = TRUE;
3983 unsigned int total_tx_bytes=0, total_tx_packets=0;
3985 i = tx_ring->next_to_clean;
3986 eop = tx_ring->buffer_info[i].next_to_watch;
3987 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3989 while (eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) {
3990 for (cleaned = FALSE; !cleaned; ) {
3991 tx_desc = E1000_TX_DESC(*tx_ring, i);
3992 buffer_info = &tx_ring->buffer_info[i];
3993 cleaned = (i == eop);
3995 if (cleaned) {
3996 struct sk_buff *skb = buffer_info->skb;
3997 unsigned int segs, bytecount;
3998 segs = skb_shinfo(skb)->gso_segs ?: 1;
3999 /* multiply data chunks by size of headers */
4000 bytecount = ((segs - 1) * skb_headlen(skb)) +
4001 skb->len;
4002 total_tx_packets += segs;
4003 total_tx_bytes += bytecount;
4005 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
4006 tx_desc->upper.data = 0;
4008 if (unlikely(++i == tx_ring->count)) i = 0;
4011 eop = tx_ring->buffer_info[i].next_to_watch;
4012 eop_desc = E1000_TX_DESC(*tx_ring, eop);
4013 #ifdef CONFIG_E1000_NAPI
4014 #define E1000_TX_WEIGHT 64
4015 /* weight of a sort for tx, to avoid endless transmit cleanup */
4016 if (count++ == E1000_TX_WEIGHT) {
4017 cleaned = FALSE;
4018 break;
4020 #endif
4023 tx_ring->next_to_clean = i;
4025 #define TX_WAKE_THRESHOLD 32
4026 if (unlikely(cleaned && netif_carrier_ok(netdev) &&
4027 E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
4028 /* Make sure that anybody stopping the queue after this
4029 * sees the new next_to_clean.
4031 smp_mb();
4032 if (netif_queue_stopped(netdev)) {
4033 netif_wake_queue(netdev);
4034 ++adapter->restart_queue;
4038 if (adapter->detect_tx_hung) {
4039 /* Detect a transmit hang in hardware, this serializes the
4040 * check with the clearing of time_stamp and movement of i */
4041 adapter->detect_tx_hung = FALSE;
4042 if (tx_ring->buffer_info[eop].dma &&
4043 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
4044 (adapter->tx_timeout_factor * HZ))
4045 && !(E1000_READ_REG(&adapter->hw, STATUS) &
4046 E1000_STATUS_TXOFF)) {
4048 /* detected Tx unit hang */
4049 DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
4050 " Tx Queue <%lu>\n"
4051 " TDH <%x>\n"
4052 " TDT <%x>\n"
4053 " next_to_use <%x>\n"
4054 " next_to_clean <%x>\n"
4055 "buffer_info[next_to_clean]\n"
4056 " time_stamp <%lx>\n"
4057 " next_to_watch <%x>\n"
4058 " jiffies <%lx>\n"
4059 " next_to_watch.status <%x>\n",
4060 (unsigned long)((tx_ring - adapter->tx_ring) /
4061 sizeof(struct e1000_tx_ring)),
4062 readl(adapter->hw.hw_addr + tx_ring->tdh),
4063 readl(adapter->hw.hw_addr + tx_ring->tdt),
4064 tx_ring->next_to_use,
4065 tx_ring->next_to_clean,
4066 tx_ring->buffer_info[eop].time_stamp,
4067 eop,
4068 jiffies,
4069 eop_desc->upper.fields.status);
4070 netif_stop_queue(netdev);
4073 adapter->total_tx_bytes += total_tx_bytes;
4074 adapter->total_tx_packets += total_tx_packets;
4075 return cleaned;
4079 * e1000_rx_checksum - Receive Checksum Offload for 82543
4080 * @adapter: board private structure
4081 * @status_err: receive descriptor status and error fields
4082 * @csum: receive descriptor csum field
4083 * @sk_buff: socket buffer with received data
4086 static void
4087 e1000_rx_checksum(struct e1000_adapter *adapter,
4088 uint32_t status_err, uint32_t csum,
4089 struct sk_buff *skb)
4091 uint16_t status = (uint16_t)status_err;
4092 uint8_t errors = (uint8_t)(status_err >> 24);
4093 skb->ip_summed = CHECKSUM_NONE;
4095 /* 82543 or newer only */
4096 if (unlikely(adapter->hw.mac_type < e1000_82543)) return;
4097 /* Ignore Checksum bit is set */
4098 if (unlikely(status & E1000_RXD_STAT_IXSM)) return;
4099 /* TCP/UDP checksum error bit is set */
4100 if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
4101 /* let the stack verify checksum errors */
4102 adapter->hw_csum_err++;
4103 return;
4105 /* TCP/UDP Checksum has not been calculated */
4106 if (adapter->hw.mac_type <= e1000_82547_rev_2) {
4107 if (!(status & E1000_RXD_STAT_TCPCS))
4108 return;
4109 } else {
4110 if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
4111 return;
4113 /* It must be a TCP or UDP packet with a valid checksum */
4114 if (likely(status & E1000_RXD_STAT_TCPCS)) {
4115 /* TCP checksum is good */
4116 skb->ip_summed = CHECKSUM_UNNECESSARY;
4117 } else if (adapter->hw.mac_type > e1000_82547_rev_2) {
4118 /* IP fragment with UDP payload */
4119 /* Hardware complements the payload checksum, so we undo it
4120 * and then put the value in host order for further stack use.
4122 csum = ntohl(csum ^ 0xFFFF);
4123 skb->csum = csum;
4124 skb->ip_summed = CHECKSUM_COMPLETE;
4126 adapter->hw_csum_good++;
4130 * e1000_clean_rx_irq - Send received data up the network stack; legacy
4131 * @adapter: board private structure
4134 static boolean_t
4135 #ifdef CONFIG_E1000_NAPI
4136 e1000_clean_rx_irq(struct e1000_adapter *adapter,
4137 struct e1000_rx_ring *rx_ring,
4138 int *work_done, int work_to_do)
4139 #else
4140 e1000_clean_rx_irq(struct e1000_adapter *adapter,
4141 struct e1000_rx_ring *rx_ring)
4142 #endif
4144 struct net_device *netdev = adapter->netdev;
4145 struct pci_dev *pdev = adapter->pdev;
4146 struct e1000_rx_desc *rx_desc, *next_rxd;
4147 struct e1000_buffer *buffer_info, *next_buffer;
4148 unsigned long flags;
4149 uint32_t length;
4150 uint8_t last_byte;
4151 unsigned int i;
4152 int cleaned_count = 0;
4153 boolean_t cleaned = FALSE;
4154 unsigned int total_rx_bytes=0, total_rx_packets=0;
4156 i = rx_ring->next_to_clean;
4157 rx_desc = E1000_RX_DESC(*rx_ring, i);
4158 buffer_info = &rx_ring->buffer_info[i];
4160 while (rx_desc->status & E1000_RXD_STAT_DD) {
4161 struct sk_buff *skb;
4162 u8 status;
4164 #ifdef CONFIG_E1000_NAPI
4165 if (*work_done >= work_to_do)
4166 break;
4167 (*work_done)++;
4168 #endif
4169 status = rx_desc->status;
4170 skb = buffer_info->skb;
4171 buffer_info->skb = NULL;
4173 prefetch(skb->data - NET_IP_ALIGN);
4175 if (++i == rx_ring->count) i = 0;
4176 next_rxd = E1000_RX_DESC(*rx_ring, i);
4177 prefetch(next_rxd);
4179 next_buffer = &rx_ring->buffer_info[i];
4181 cleaned = TRUE;
4182 cleaned_count++;
4183 pci_unmap_single(pdev,
4184 buffer_info->dma,
4185 buffer_info->length,
4186 PCI_DMA_FROMDEVICE);
4188 length = le16_to_cpu(rx_desc->length);
4190 if (unlikely(!(status & E1000_RXD_STAT_EOP))) {
4191 /* All receives must fit into a single buffer */
4192 E1000_DBG("%s: Receive packet consumed multiple"
4193 " buffers\n", netdev->name);
4194 /* recycle */
4195 buffer_info->skb = skb;
4196 goto next_desc;
4199 if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
4200 last_byte = *(skb->data + length - 1);
4201 if (TBI_ACCEPT(&adapter->hw, status,
4202 rx_desc->errors, length, last_byte)) {
4203 spin_lock_irqsave(&adapter->stats_lock, flags);
4204 e1000_tbi_adjust_stats(&adapter->hw,
4205 &adapter->stats,
4206 length, skb->data);
4207 spin_unlock_irqrestore(&adapter->stats_lock,
4208 flags);
4209 length--;
4210 } else {
4211 /* recycle */
4212 buffer_info->skb = skb;
4213 goto next_desc;
4217 /* adjust length to remove Ethernet CRC, this must be
4218 * done after the TBI_ACCEPT workaround above */
4219 length -= 4;
4221 /* probably a little skewed due to removing CRC */
4222 total_rx_bytes += length;
4223 total_rx_packets++;
4225 /* code added for copybreak, this should improve
4226 * performance for small packets with large amounts
4227 * of reassembly being done in the stack */
4228 if (length < copybreak) {
4229 struct sk_buff *new_skb =
4230 netdev_alloc_skb(netdev, length + NET_IP_ALIGN);
4231 if (new_skb) {
4232 skb_reserve(new_skb, NET_IP_ALIGN);
4233 memcpy(new_skb->data - NET_IP_ALIGN,
4234 skb->data - NET_IP_ALIGN,
4235 length + NET_IP_ALIGN);
4236 /* save the skb in buffer_info as good */
4237 buffer_info->skb = skb;
4238 skb = new_skb;
4240 /* else just continue with the old one */
4242 /* end copybreak code */
4243 skb_put(skb, length);
4245 /* Receive Checksum Offload */
4246 e1000_rx_checksum(adapter,
4247 (uint32_t)(status) |
4248 ((uint32_t)(rx_desc->errors) << 24),
4249 le16_to_cpu(rx_desc->csum), skb);
4251 skb->protocol = eth_type_trans(skb, netdev);
4252 #ifdef CONFIG_E1000_NAPI
4253 if (unlikely(adapter->vlgrp &&
4254 (status & E1000_RXD_STAT_VP))) {
4255 vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
4256 le16_to_cpu(rx_desc->special) &
4257 E1000_RXD_SPC_VLAN_MASK);
4258 } else {
4259 netif_receive_skb(skb);
4261 #else /* CONFIG_E1000_NAPI */
4262 if (unlikely(adapter->vlgrp &&
4263 (status & E1000_RXD_STAT_VP))) {
4264 vlan_hwaccel_rx(skb, adapter->vlgrp,
4265 le16_to_cpu(rx_desc->special) &
4266 E1000_RXD_SPC_VLAN_MASK);
4267 } else {
4268 netif_rx(skb);
4270 #endif /* CONFIG_E1000_NAPI */
4271 netdev->last_rx = jiffies;
4273 next_desc:
4274 rx_desc->status = 0;
4276 /* return some buffers to hardware, one at a time is too slow */
4277 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4278 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4279 cleaned_count = 0;
4282 /* use prefetched values */
4283 rx_desc = next_rxd;
4284 buffer_info = next_buffer;
4286 rx_ring->next_to_clean = i;
4288 cleaned_count = E1000_DESC_UNUSED(rx_ring);
4289 if (cleaned_count)
4290 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4292 adapter->total_rx_packets += total_rx_packets;
4293 adapter->total_rx_bytes += total_rx_bytes;
4294 return cleaned;
4298 * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split
4299 * @adapter: board private structure
4302 static boolean_t
4303 #ifdef CONFIG_E1000_NAPI
4304 e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
4305 struct e1000_rx_ring *rx_ring,
4306 int *work_done, int work_to_do)
4307 #else
4308 e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
4309 struct e1000_rx_ring *rx_ring)
4310 #endif
4312 union e1000_rx_desc_packet_split *rx_desc, *next_rxd;
4313 struct net_device *netdev = adapter->netdev;
4314 struct pci_dev *pdev = adapter->pdev;
4315 struct e1000_buffer *buffer_info, *next_buffer;
4316 struct e1000_ps_page *ps_page;
4317 struct e1000_ps_page_dma *ps_page_dma;
4318 struct sk_buff *skb;
4319 unsigned int i, j;
4320 uint32_t length, staterr;
4321 int cleaned_count = 0;
4322 boolean_t cleaned = FALSE;
4323 unsigned int total_rx_bytes=0, total_rx_packets=0;
4325 i = rx_ring->next_to_clean;
4326 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
4327 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
4328 buffer_info = &rx_ring->buffer_info[i];
4330 while (staterr & E1000_RXD_STAT_DD) {
4331 ps_page = &rx_ring->ps_page[i];
4332 ps_page_dma = &rx_ring->ps_page_dma[i];
4333 #ifdef CONFIG_E1000_NAPI
4334 if (unlikely(*work_done >= work_to_do))
4335 break;
4336 (*work_done)++;
4337 #endif
4338 skb = buffer_info->skb;
4340 /* in the packet split case this is header only */
4341 prefetch(skb->data - NET_IP_ALIGN);
4343 if (++i == rx_ring->count) i = 0;
4344 next_rxd = E1000_RX_DESC_PS(*rx_ring, i);
4345 prefetch(next_rxd);
4347 next_buffer = &rx_ring->buffer_info[i];
4349 cleaned = TRUE;
4350 cleaned_count++;
4351 pci_unmap_single(pdev, buffer_info->dma,
4352 buffer_info->length,
4353 PCI_DMA_FROMDEVICE);
4355 if (unlikely(!(staterr & E1000_RXD_STAT_EOP))) {
4356 E1000_DBG("%s: Packet Split buffers didn't pick up"
4357 " the full packet\n", netdev->name);
4358 dev_kfree_skb_irq(skb);
4359 goto next_desc;
4362 if (unlikely(staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)) {
4363 dev_kfree_skb_irq(skb);
4364 goto next_desc;
4367 length = le16_to_cpu(rx_desc->wb.middle.length0);
4369 if (unlikely(!length)) {
4370 E1000_DBG("%s: Last part of the packet spanning"
4371 " multiple descriptors\n", netdev->name);
4372 dev_kfree_skb_irq(skb);
4373 goto next_desc;
4376 /* Good Receive */
4377 skb_put(skb, length);
4380 /* this looks ugly, but it seems compiler issues make it
4381 more efficient than reusing j */
4382 int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]);
4384 /* page alloc/put takes too long and effects small packet
4385 * throughput, so unsplit small packets and save the alloc/put*/
4386 if (l1 && (l1 <= copybreak) && ((length + l1) <= adapter->rx_ps_bsize0)) {
4387 u8 *vaddr;
4388 /* there is no documentation about how to call
4389 * kmap_atomic, so we can't hold the mapping
4390 * very long */
4391 pci_dma_sync_single_for_cpu(pdev,
4392 ps_page_dma->ps_page_dma[0],
4393 PAGE_SIZE,
4394 PCI_DMA_FROMDEVICE);
4395 vaddr = kmap_atomic(ps_page->ps_page[0],
4396 KM_SKB_DATA_SOFTIRQ);
4397 memcpy(skb->tail, vaddr, l1);
4398 kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
4399 pci_dma_sync_single_for_device(pdev,
4400 ps_page_dma->ps_page_dma[0],
4401 PAGE_SIZE, PCI_DMA_FROMDEVICE);
4402 /* remove the CRC */
4403 l1 -= 4;
4404 skb_put(skb, l1);
4405 goto copydone;
4406 } /* if */
4409 for (j = 0; j < adapter->rx_ps_pages; j++) {
4410 if (!(length= le16_to_cpu(rx_desc->wb.upper.length[j])))
4411 break;
4412 pci_unmap_page(pdev, ps_page_dma->ps_page_dma[j],
4413 PAGE_SIZE, PCI_DMA_FROMDEVICE);
4414 ps_page_dma->ps_page_dma[j] = 0;
4415 skb_fill_page_desc(skb, j, ps_page->ps_page[j], 0,
4416 length);
4417 ps_page->ps_page[j] = NULL;
4418 skb->len += length;
4419 skb->data_len += length;
4420 skb->truesize += length;
4423 /* strip the ethernet crc, problem is we're using pages now so
4424 * this whole operation can get a little cpu intensive */
4425 pskb_trim(skb, skb->len - 4);
4427 copydone:
4428 total_rx_bytes += skb->len;
4429 total_rx_packets++;
4431 e1000_rx_checksum(adapter, staterr,
4432 le16_to_cpu(rx_desc->wb.lower.hi_dword.csum_ip.csum), skb);
4433 skb->protocol = eth_type_trans(skb, netdev);
4435 if (likely(rx_desc->wb.upper.header_status &
4436 cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP)))
4437 adapter->rx_hdr_split++;
4438 #ifdef CONFIG_E1000_NAPI
4439 if (unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) {
4440 vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
4441 le16_to_cpu(rx_desc->wb.middle.vlan) &
4442 E1000_RXD_SPC_VLAN_MASK);
4443 } else {
4444 netif_receive_skb(skb);
4446 #else /* CONFIG_E1000_NAPI */
4447 if (unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) {
4448 vlan_hwaccel_rx(skb, adapter->vlgrp,
4449 le16_to_cpu(rx_desc->wb.middle.vlan) &
4450 E1000_RXD_SPC_VLAN_MASK);
4451 } else {
4452 netif_rx(skb);
4454 #endif /* CONFIG_E1000_NAPI */
4455 netdev->last_rx = jiffies;
4457 next_desc:
4458 rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF);
4459 buffer_info->skb = NULL;
4461 /* return some buffers to hardware, one at a time is too slow */
4462 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4463 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4464 cleaned_count = 0;
4467 /* use prefetched values */
4468 rx_desc = next_rxd;
4469 buffer_info = next_buffer;
4471 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
4473 rx_ring->next_to_clean = i;
4475 cleaned_count = E1000_DESC_UNUSED(rx_ring);
4476 if (cleaned_count)
4477 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4479 adapter->total_rx_packets += total_rx_packets;
4480 adapter->total_rx_bytes += total_rx_bytes;
4481 return cleaned;
4485 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
4486 * @adapter: address of board private structure
4489 static void
4490 e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4491 struct e1000_rx_ring *rx_ring,
4492 int cleaned_count)
4494 struct net_device *netdev = adapter->netdev;
4495 struct pci_dev *pdev = adapter->pdev;
4496 struct e1000_rx_desc *rx_desc;
4497 struct e1000_buffer *buffer_info;
4498 struct sk_buff *skb;
4499 unsigned int i;
4500 unsigned int bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
4502 i = rx_ring->next_to_use;
4503 buffer_info = &rx_ring->buffer_info[i];
4505 while (cleaned_count--) {
4506 skb = buffer_info->skb;
4507 if (skb) {
4508 skb_trim(skb, 0);
4509 goto map_skb;
4512 skb = netdev_alloc_skb(netdev, bufsz);
4513 if (unlikely(!skb)) {
4514 /* Better luck next round */
4515 adapter->alloc_rx_buff_failed++;
4516 break;
4519 /* Fix for errata 23, can't cross 64kB boundary */
4520 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4521 struct sk_buff *oldskb = skb;
4522 DPRINTK(RX_ERR, ERR, "skb align check failed: %u bytes "
4523 "at %p\n", bufsz, skb->data);
4524 /* Try again, without freeing the previous */
4525 skb = netdev_alloc_skb(netdev, bufsz);
4526 /* Failed allocation, critical failure */
4527 if (!skb) {
4528 dev_kfree_skb(oldskb);
4529 break;
4532 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4533 /* give up */
4534 dev_kfree_skb(skb);
4535 dev_kfree_skb(oldskb);
4536 break; /* while !buffer_info->skb */
4539 /* Use new allocation */
4540 dev_kfree_skb(oldskb);
4542 /* Make buffer alignment 2 beyond a 16 byte boundary
4543 * this will result in a 16 byte aligned IP header after
4544 * the 14 byte MAC header is removed
4546 skb_reserve(skb, NET_IP_ALIGN);
4548 buffer_info->skb = skb;
4549 buffer_info->length = adapter->rx_buffer_len;
4550 map_skb:
4551 buffer_info->dma = pci_map_single(pdev,
4552 skb->data,
4553 adapter->rx_buffer_len,
4554 PCI_DMA_FROMDEVICE);
4556 /* Fix for errata 23, can't cross 64kB boundary */
4557 if (!e1000_check_64k_bound(adapter,
4558 (void *)(unsigned long)buffer_info->dma,
4559 adapter->rx_buffer_len)) {
4560 DPRINTK(RX_ERR, ERR,
4561 "dma align check failed: %u bytes at %p\n",
4562 adapter->rx_buffer_len,
4563 (void *)(unsigned long)buffer_info->dma);
4564 dev_kfree_skb(skb);
4565 buffer_info->skb = NULL;
4567 pci_unmap_single(pdev, buffer_info->dma,
4568 adapter->rx_buffer_len,
4569 PCI_DMA_FROMDEVICE);
4571 break; /* while !buffer_info->skb */
4573 rx_desc = E1000_RX_DESC(*rx_ring, i);
4574 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4576 if (unlikely(++i == rx_ring->count))
4577 i = 0;
4578 buffer_info = &rx_ring->buffer_info[i];
4581 if (likely(rx_ring->next_to_use != i)) {
4582 rx_ring->next_to_use = i;
4583 if (unlikely(i-- == 0))
4584 i = (rx_ring->count - 1);
4586 /* Force memory writes to complete before letting h/w
4587 * know there are new descriptors to fetch. (Only
4588 * applicable for weak-ordered memory model archs,
4589 * such as IA-64). */
4590 wmb();
4591 writel(i, adapter->hw.hw_addr + rx_ring->rdt);
4596 * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split
4597 * @adapter: address of board private structure
4600 static void
4601 e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
4602 struct e1000_rx_ring *rx_ring,
4603 int cleaned_count)
4605 struct net_device *netdev = adapter->netdev;
4606 struct pci_dev *pdev = adapter->pdev;
4607 union e1000_rx_desc_packet_split *rx_desc;
4608 struct e1000_buffer *buffer_info;
4609 struct e1000_ps_page *ps_page;
4610 struct e1000_ps_page_dma *ps_page_dma;
4611 struct sk_buff *skb;
4612 unsigned int i, j;
4614 i = rx_ring->next_to_use;
4615 buffer_info = &rx_ring->buffer_info[i];
4616 ps_page = &rx_ring->ps_page[i];
4617 ps_page_dma = &rx_ring->ps_page_dma[i];
4619 while (cleaned_count--) {
4620 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
4622 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
4623 if (j < adapter->rx_ps_pages) {
4624 if (likely(!ps_page->ps_page[j])) {
4625 ps_page->ps_page[j] =
4626 alloc_page(GFP_ATOMIC);
4627 if (unlikely(!ps_page->ps_page[j])) {
4628 adapter->alloc_rx_buff_failed++;
4629 goto no_buffers;
4631 ps_page_dma->ps_page_dma[j] =
4632 pci_map_page(pdev,
4633 ps_page->ps_page[j],
4634 0, PAGE_SIZE,
4635 PCI_DMA_FROMDEVICE);
4637 /* Refresh the desc even if buffer_addrs didn't
4638 * change because each write-back erases
4639 * this info.
4641 rx_desc->read.buffer_addr[j+1] =
4642 cpu_to_le64(ps_page_dma->ps_page_dma[j]);
4643 } else
4644 rx_desc->read.buffer_addr[j+1] = ~0;
4647 skb = netdev_alloc_skb(netdev,
4648 adapter->rx_ps_bsize0 + NET_IP_ALIGN);
4650 if (unlikely(!skb)) {
4651 adapter->alloc_rx_buff_failed++;
4652 break;
4655 /* Make buffer alignment 2 beyond a 16 byte boundary
4656 * this will result in a 16 byte aligned IP header after
4657 * the 14 byte MAC header is removed
4659 skb_reserve(skb, NET_IP_ALIGN);
4661 buffer_info->skb = skb;
4662 buffer_info->length = adapter->rx_ps_bsize0;
4663 buffer_info->dma = pci_map_single(pdev, skb->data,
4664 adapter->rx_ps_bsize0,
4665 PCI_DMA_FROMDEVICE);
4667 rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
4669 if (unlikely(++i == rx_ring->count)) i = 0;
4670 buffer_info = &rx_ring->buffer_info[i];
4671 ps_page = &rx_ring->ps_page[i];
4672 ps_page_dma = &rx_ring->ps_page_dma[i];
4675 no_buffers:
4676 if (likely(rx_ring->next_to_use != i)) {
4677 rx_ring->next_to_use = i;
4678 if (unlikely(i-- == 0)) i = (rx_ring->count - 1);
4680 /* Force memory writes to complete before letting h/w
4681 * know there are new descriptors to fetch. (Only
4682 * applicable for weak-ordered memory model archs,
4683 * such as IA-64). */
4684 wmb();
4685 /* Hardware increments by 16 bytes, but packet split
4686 * descriptors are 32 bytes...so we increment tail
4687 * twice as much.
4689 writel(i<<1, adapter->hw.hw_addr + rx_ring->rdt);
4694 * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
4695 * @adapter:
4698 static void
4699 e1000_smartspeed(struct e1000_adapter *adapter)
4701 uint16_t phy_status;
4702 uint16_t phy_ctrl;
4704 if ((adapter->hw.phy_type != e1000_phy_igp) || !adapter->hw.autoneg ||
4705 !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL))
4706 return;
4708 if (adapter->smartspeed == 0) {
4709 /* If Master/Slave config fault is asserted twice,
4710 * we assume back-to-back */
4711 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status);
4712 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
4713 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status);
4714 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
4715 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl);
4716 if (phy_ctrl & CR_1000T_MS_ENABLE) {
4717 phy_ctrl &= ~CR_1000T_MS_ENABLE;
4718 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL,
4719 phy_ctrl);
4720 adapter->smartspeed++;
4721 if (!e1000_phy_setup_autoneg(&adapter->hw) &&
4722 !e1000_read_phy_reg(&adapter->hw, PHY_CTRL,
4723 &phy_ctrl)) {
4724 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4725 MII_CR_RESTART_AUTO_NEG);
4726 e1000_write_phy_reg(&adapter->hw, PHY_CTRL,
4727 phy_ctrl);
4730 return;
4731 } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
4732 /* If still no link, perhaps using 2/3 pair cable */
4733 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl);
4734 phy_ctrl |= CR_1000T_MS_ENABLE;
4735 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_ctrl);
4736 if (!e1000_phy_setup_autoneg(&adapter->hw) &&
4737 !e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_ctrl)) {
4738 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4739 MII_CR_RESTART_AUTO_NEG);
4740 e1000_write_phy_reg(&adapter->hw, PHY_CTRL, phy_ctrl);
4743 /* Restart process after E1000_SMARTSPEED_MAX iterations */
4744 if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
4745 adapter->smartspeed = 0;
4749 * e1000_ioctl -
4750 * @netdev:
4751 * @ifreq:
4752 * @cmd:
4755 static int
4756 e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4758 switch (cmd) {
4759 case SIOCGMIIPHY:
4760 case SIOCGMIIREG:
4761 case SIOCSMIIREG:
4762 return e1000_mii_ioctl(netdev, ifr, cmd);
4763 default:
4764 return -EOPNOTSUPP;
4769 * e1000_mii_ioctl -
4770 * @netdev:
4771 * @ifreq:
4772 * @cmd:
4775 static int
4776 e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4778 struct e1000_adapter *adapter = netdev_priv(netdev);
4779 struct mii_ioctl_data *data = if_mii(ifr);
4780 int retval;
4781 uint16_t mii_reg;
4782 uint16_t spddplx;
4783 unsigned long flags;
4785 if (adapter->hw.media_type != e1000_media_type_copper)
4786 return -EOPNOTSUPP;
4788 switch (cmd) {
4789 case SIOCGMIIPHY:
4790 data->phy_id = adapter->hw.phy_addr;
4791 break;
4792 case SIOCGMIIREG:
4793 if (!capable(CAP_NET_ADMIN))
4794 return -EPERM;
4795 spin_lock_irqsave(&adapter->stats_lock, flags);
4796 if (e1000_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
4797 &data->val_out)) {
4798 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4799 return -EIO;
4801 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4802 break;
4803 case SIOCSMIIREG:
4804 if (!capable(CAP_NET_ADMIN))
4805 return -EPERM;
4806 if (data->reg_num & ~(0x1F))
4807 return -EFAULT;
4808 mii_reg = data->val_in;
4809 spin_lock_irqsave(&adapter->stats_lock, flags);
4810 if (e1000_write_phy_reg(&adapter->hw, data->reg_num,
4811 mii_reg)) {
4812 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4813 return -EIO;
4815 if (adapter->hw.media_type == e1000_media_type_copper) {
4816 switch (data->reg_num) {
4817 case PHY_CTRL:
4818 if (mii_reg & MII_CR_POWER_DOWN)
4819 break;
4820 if (mii_reg & MII_CR_AUTO_NEG_EN) {
4821 adapter->hw.autoneg = 1;
4822 adapter->hw.autoneg_advertised = 0x2F;
4823 } else {
4824 if (mii_reg & 0x40)
4825 spddplx = SPEED_1000;
4826 else if (mii_reg & 0x2000)
4827 spddplx = SPEED_100;
4828 else
4829 spddplx = SPEED_10;
4830 spddplx += (mii_reg & 0x100)
4831 ? DUPLEX_FULL :
4832 DUPLEX_HALF;
4833 retval = e1000_set_spd_dplx(adapter,
4834 spddplx);
4835 if (retval) {
4836 spin_unlock_irqrestore(
4837 &adapter->stats_lock,
4838 flags);
4839 return retval;
4842 if (netif_running(adapter->netdev))
4843 e1000_reinit_locked(adapter);
4844 else
4845 e1000_reset(adapter);
4846 break;
4847 case M88E1000_PHY_SPEC_CTRL:
4848 case M88E1000_EXT_PHY_SPEC_CTRL:
4849 if (e1000_phy_reset(&adapter->hw)) {
4850 spin_unlock_irqrestore(
4851 &adapter->stats_lock, flags);
4852 return -EIO;
4854 break;
4856 } else {
4857 switch (data->reg_num) {
4858 case PHY_CTRL:
4859 if (mii_reg & MII_CR_POWER_DOWN)
4860 break;
4861 if (netif_running(adapter->netdev))
4862 e1000_reinit_locked(adapter);
4863 else
4864 e1000_reset(adapter);
4865 break;
4868 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4869 break;
4870 default:
4871 return -EOPNOTSUPP;
4873 return E1000_SUCCESS;
4876 void
4877 e1000_pci_set_mwi(struct e1000_hw *hw)
4879 struct e1000_adapter *adapter = hw->back;
4880 int ret_val = pci_set_mwi(adapter->pdev);
4882 if (ret_val)
4883 DPRINTK(PROBE, ERR, "Error in setting MWI\n");
4886 void
4887 e1000_pci_clear_mwi(struct e1000_hw *hw)
4889 struct e1000_adapter *adapter = hw->back;
4891 pci_clear_mwi(adapter->pdev);
4894 void
4895 e1000_read_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t *value)
4897 struct e1000_adapter *adapter = hw->back;
4899 pci_read_config_word(adapter->pdev, reg, value);
4902 void
4903 e1000_write_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t *value)
4905 struct e1000_adapter *adapter = hw->back;
4907 pci_write_config_word(adapter->pdev, reg, *value);
4910 int32_t
4911 e1000_read_pcie_cap_reg(struct e1000_hw *hw, uint32_t reg, uint16_t *value)
4913 struct e1000_adapter *adapter = hw->back;
4914 uint16_t cap_offset;
4916 cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
4917 if (!cap_offset)
4918 return -E1000_ERR_CONFIG;
4920 pci_read_config_word(adapter->pdev, cap_offset + reg, value);
4922 return E1000_SUCCESS;
4925 void
4926 e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value)
4928 outl(value, port);
4931 static void
4932 e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
4934 struct e1000_adapter *adapter = netdev_priv(netdev);
4935 uint32_t ctrl, rctl;
4937 e1000_irq_disable(adapter);
4938 adapter->vlgrp = grp;
4940 if (grp) {
4941 /* enable VLAN tag insert/strip */
4942 ctrl = E1000_READ_REG(&adapter->hw, CTRL);
4943 ctrl |= E1000_CTRL_VME;
4944 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
4946 if (adapter->hw.mac_type != e1000_ich8lan) {
4947 /* enable VLAN receive filtering */
4948 rctl = E1000_READ_REG(&adapter->hw, RCTL);
4949 rctl |= E1000_RCTL_VFE;
4950 rctl &= ~E1000_RCTL_CFIEN;
4951 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
4952 e1000_update_mng_vlan(adapter);
4954 } else {
4955 /* disable VLAN tag insert/strip */
4956 ctrl = E1000_READ_REG(&adapter->hw, CTRL);
4957 ctrl &= ~E1000_CTRL_VME;
4958 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
4960 if (adapter->hw.mac_type != e1000_ich8lan) {
4961 /* disable VLAN filtering */
4962 rctl = E1000_READ_REG(&adapter->hw, RCTL);
4963 rctl &= ~E1000_RCTL_VFE;
4964 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
4965 if (adapter->mng_vlan_id !=
4966 (uint16_t)E1000_MNG_VLAN_NONE) {
4967 e1000_vlan_rx_kill_vid(netdev,
4968 adapter->mng_vlan_id);
4969 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
4974 e1000_irq_enable(adapter);
4977 static void
4978 e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid)
4980 struct e1000_adapter *adapter = netdev_priv(netdev);
4981 uint32_t vfta, index;
4983 if ((adapter->hw.mng_cookie.status &
4984 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
4985 (vid == adapter->mng_vlan_id))
4986 return;
4987 /* add VID to filter table */
4988 index = (vid >> 5) & 0x7F;
4989 vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index);
4990 vfta |= (1 << (vid & 0x1F));
4991 e1000_write_vfta(&adapter->hw, index, vfta);
4994 static void
4995 e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid)
4997 struct e1000_adapter *adapter = netdev_priv(netdev);
4998 uint32_t vfta, index;
5000 e1000_irq_disable(adapter);
5002 if (adapter->vlgrp)
5003 adapter->vlgrp->vlan_devices[vid] = NULL;
5005 e1000_irq_enable(adapter);
5007 if ((adapter->hw.mng_cookie.status &
5008 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
5009 (vid == adapter->mng_vlan_id)) {
5010 /* release control to f/w */
5011 e1000_release_hw_control(adapter);
5012 return;
5015 /* remove VID from filter table */
5016 index = (vid >> 5) & 0x7F;
5017 vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index);
5018 vfta &= ~(1 << (vid & 0x1F));
5019 e1000_write_vfta(&adapter->hw, index, vfta);
5022 static void
5023 e1000_restore_vlan(struct e1000_adapter *adapter)
5025 e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp);
5027 if (adapter->vlgrp) {
5028 uint16_t vid;
5029 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
5030 if (!adapter->vlgrp->vlan_devices[vid])
5031 continue;
5032 e1000_vlan_rx_add_vid(adapter->netdev, vid);
5038 e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx)
5040 adapter->hw.autoneg = 0;
5042 /* Fiber NICs only allow 1000 gbps Full duplex */
5043 if ((adapter->hw.media_type == e1000_media_type_fiber) &&
5044 spddplx != (SPEED_1000 + DUPLEX_FULL)) {
5045 DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n");
5046 return -EINVAL;
5049 switch (spddplx) {
5050 case SPEED_10 + DUPLEX_HALF:
5051 adapter->hw.forced_speed_duplex = e1000_10_half;
5052 break;
5053 case SPEED_10 + DUPLEX_FULL:
5054 adapter->hw.forced_speed_duplex = e1000_10_full;
5055 break;
5056 case SPEED_100 + DUPLEX_HALF:
5057 adapter->hw.forced_speed_duplex = e1000_100_half;
5058 break;
5059 case SPEED_100 + DUPLEX_FULL:
5060 adapter->hw.forced_speed_duplex = e1000_100_full;
5061 break;
5062 case SPEED_1000 + DUPLEX_FULL:
5063 adapter->hw.autoneg = 1;
5064 adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
5065 break;
5066 case SPEED_1000 + DUPLEX_HALF: /* not supported */
5067 default:
5068 DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n");
5069 return -EINVAL;
5071 return 0;
5074 static int
5075 e1000_suspend(struct pci_dev *pdev, pm_message_t state)
5077 struct net_device *netdev = pci_get_drvdata(pdev);
5078 struct e1000_adapter *adapter = netdev_priv(netdev);
5079 uint32_t ctrl, ctrl_ext, rctl, status;
5080 uint32_t wufc = adapter->wol;
5081 #ifdef CONFIG_PM
5082 int retval = 0;
5083 #endif
5085 netif_device_detach(netdev);
5087 if (netif_running(netdev)) {
5088 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
5089 e1000_down(adapter);
5092 #ifdef CONFIG_PM
5093 retval = pci_save_state(pdev);
5094 if (retval)
5095 return retval;
5096 #endif
5098 status = E1000_READ_REG(&adapter->hw, STATUS);
5099 if (status & E1000_STATUS_LU)
5100 wufc &= ~E1000_WUFC_LNKC;
5102 if (wufc) {
5103 e1000_setup_rctl(adapter);
5104 e1000_set_multi(netdev);
5106 /* turn on all-multi mode if wake on multicast is enabled */
5107 if (wufc & E1000_WUFC_MC) {
5108 rctl = E1000_READ_REG(&adapter->hw, RCTL);
5109 rctl |= E1000_RCTL_MPE;
5110 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
5113 if (adapter->hw.mac_type >= e1000_82540) {
5114 ctrl = E1000_READ_REG(&adapter->hw, CTRL);
5115 /* advertise wake from D3Cold */
5116 #define E1000_CTRL_ADVD3WUC 0x00100000
5117 /* phy power management enable */
5118 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
5119 ctrl |= E1000_CTRL_ADVD3WUC |
5120 E1000_CTRL_EN_PHY_PWR_MGMT;
5121 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
5124 if (adapter->hw.media_type == e1000_media_type_fiber ||
5125 adapter->hw.media_type == e1000_media_type_internal_serdes) {
5126 /* keep the laser running in D3 */
5127 ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
5128 ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
5129 E1000_WRITE_REG(&adapter->hw, CTRL_EXT, ctrl_ext);
5132 /* Allow time for pending master requests to run */
5133 e1000_disable_pciex_master(&adapter->hw);
5135 E1000_WRITE_REG(&adapter->hw, WUC, E1000_WUC_PME_EN);
5136 E1000_WRITE_REG(&adapter->hw, WUFC, wufc);
5137 pci_enable_wake(pdev, PCI_D3hot, 1);
5138 pci_enable_wake(pdev, PCI_D3cold, 1);
5139 } else {
5140 E1000_WRITE_REG(&adapter->hw, WUC, 0);
5141 E1000_WRITE_REG(&adapter->hw, WUFC, 0);
5142 pci_enable_wake(pdev, PCI_D3hot, 0);
5143 pci_enable_wake(pdev, PCI_D3cold, 0);
5146 e1000_release_manageability(adapter);
5148 /* make sure adapter isn't asleep if manageability is enabled */
5149 if (adapter->en_mng_pt) {
5150 pci_enable_wake(pdev, PCI_D3hot, 1);
5151 pci_enable_wake(pdev, PCI_D3cold, 1);
5154 if (adapter->hw.phy_type == e1000_phy_igp_3)
5155 e1000_phy_powerdown_workaround(&adapter->hw);
5157 if (netif_running(netdev))
5158 e1000_free_irq(adapter);
5160 /* Release control of h/w to f/w. If f/w is AMT enabled, this
5161 * would have already happened in close and is redundant. */
5162 e1000_release_hw_control(adapter);
5164 pci_disable_device(pdev);
5166 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5168 return 0;
5171 #ifdef CONFIG_PM
5172 static int
5173 e1000_resume(struct pci_dev *pdev)
5175 struct net_device *netdev = pci_get_drvdata(pdev);
5176 struct e1000_adapter *adapter = netdev_priv(netdev);
5177 uint32_t err;
5179 pci_set_power_state(pdev, PCI_D0);
5180 pci_restore_state(pdev);
5181 if ((err = pci_enable_device(pdev))) {
5182 printk(KERN_ERR "e1000: Cannot enable PCI device from suspend\n");
5183 return err;
5185 pci_set_master(pdev);
5187 pci_enable_wake(pdev, PCI_D3hot, 0);
5188 pci_enable_wake(pdev, PCI_D3cold, 0);
5190 if (netif_running(netdev) && (err = e1000_request_irq(adapter)))
5191 return err;
5193 e1000_power_up_phy(adapter);
5194 e1000_reset(adapter);
5195 E1000_WRITE_REG(&adapter->hw, WUS, ~0);
5197 e1000_init_manageability(adapter);
5199 if (netif_running(netdev))
5200 e1000_up(adapter);
5202 netif_device_attach(netdev);
5204 /* If the controller is 82573 and f/w is AMT, do not set
5205 * DRV_LOAD until the interface is up. For all other cases,
5206 * let the f/w know that the h/w is now under the control
5207 * of the driver. */
5208 if (adapter->hw.mac_type != e1000_82573 ||
5209 !e1000_check_mng_mode(&adapter->hw))
5210 e1000_get_hw_control(adapter);
5212 return 0;
5214 #endif
5216 static void e1000_shutdown(struct pci_dev *pdev)
5218 e1000_suspend(pdev, PMSG_SUSPEND);
5221 #ifdef CONFIG_NET_POLL_CONTROLLER
5223 * Polling 'interrupt' - used by things like netconsole to send skbs
5224 * without having to re-enable interrupts. It's not called while
5225 * the interrupt routine is executing.
5227 static void
5228 e1000_netpoll(struct net_device *netdev)
5230 struct e1000_adapter *adapter = netdev_priv(netdev);
5232 disable_irq(adapter->pdev->irq);
5233 e1000_intr(adapter->pdev->irq, netdev);
5234 e1000_clean_tx_irq(adapter, adapter->tx_ring);
5235 #ifndef CONFIG_E1000_NAPI
5236 adapter->clean_rx(adapter, adapter->rx_ring);
5237 #endif
5238 enable_irq(adapter->pdev->irq);
5240 #endif
5243 * e1000_io_error_detected - called when PCI error is detected
5244 * @pdev: Pointer to PCI device
5245 * @state: The current pci conneection state
5247 * This function is called after a PCI bus error affecting
5248 * this device has been detected.
5250 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
5252 struct net_device *netdev = pci_get_drvdata(pdev);
5253 struct e1000_adapter *adapter = netdev->priv;
5255 netif_device_detach(netdev);
5257 if (netif_running(netdev))
5258 e1000_down(adapter);
5259 pci_disable_device(pdev);
5261 /* Request a slot slot reset. */
5262 return PCI_ERS_RESULT_NEED_RESET;
5266 * e1000_io_slot_reset - called after the pci bus has been reset.
5267 * @pdev: Pointer to PCI device
5269 * Restart the card from scratch, as if from a cold-boot. Implementation
5270 * resembles the first-half of the e1000_resume routine.
5272 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
5274 struct net_device *netdev = pci_get_drvdata(pdev);
5275 struct e1000_adapter *adapter = netdev->priv;
5277 if (pci_enable_device(pdev)) {
5278 printk(KERN_ERR "e1000: Cannot re-enable PCI device after reset.\n");
5279 return PCI_ERS_RESULT_DISCONNECT;
5281 pci_set_master(pdev);
5283 pci_enable_wake(pdev, PCI_D3hot, 0);
5284 pci_enable_wake(pdev, PCI_D3cold, 0);
5286 e1000_reset(adapter);
5287 E1000_WRITE_REG(&adapter->hw, WUS, ~0);
5289 return PCI_ERS_RESULT_RECOVERED;
5293 * e1000_io_resume - called when traffic can start flowing again.
5294 * @pdev: Pointer to PCI device
5296 * This callback is called when the error recovery driver tells us that
5297 * its OK to resume normal operation. Implementation resembles the
5298 * second-half of the e1000_resume routine.
5300 static void e1000_io_resume(struct pci_dev *pdev)
5302 struct net_device *netdev = pci_get_drvdata(pdev);
5303 struct e1000_adapter *adapter = netdev->priv;
5305 e1000_init_manageability(adapter);
5307 if (netif_running(netdev)) {
5308 if (e1000_up(adapter)) {
5309 printk("e1000: can't bring device back up after reset\n");
5310 return;
5314 netif_device_attach(netdev);
5316 /* If the controller is 82573 and f/w is AMT, do not set
5317 * DRV_LOAD until the interface is up. For all other cases,
5318 * let the f/w know that the h/w is now under the control
5319 * of the driver. */
5320 if (adapter->hw.mac_type != e1000_82573 ||
5321 !e1000_check_mng_mode(&adapter->hw))
5322 e1000_get_hw_control(adapter);
5326 /* e1000_main.c */