ACPI: thinkpad-acpi: bump up version to 0.16
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / net / e100.c
blob280313b9b069cfa146b8d2dd4cf93f821678df5a
1 /*******************************************************************************
3 Intel PRO/100 Linux driver
4 Copyright(c) 1999 - 2006 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *******************************************************************************/
30 * e100.c: Intel(R) PRO/100 ethernet driver
32 * (Re)written 2003 by scott.feldman@intel.com. Based loosely on
33 * original e100 driver, but better described as a munging of
34 * e100, e1000, eepro100, tg3, 8139cp, and other drivers.
36 * References:
37 * Intel 8255x 10/100 Mbps Ethernet Controller Family,
38 * Open Source Software Developers Manual,
39 * http://sourceforge.net/projects/e1000
42 * Theory of Operation
44 * I. General
46 * The driver supports Intel(R) 10/100 Mbps PCI Fast Ethernet
47 * controller family, which includes the 82557, 82558, 82559, 82550,
48 * 82551, and 82562 devices. 82558 and greater controllers
49 * integrate the Intel 82555 PHY. The controllers are used in
50 * server and client network interface cards, as well as in
51 * LAN-On-Motherboard (LOM), CardBus, MiniPCI, and ICHx
52 * configurations. 8255x supports a 32-bit linear addressing
53 * mode and operates at 33Mhz PCI clock rate.
55 * II. Driver Operation
57 * Memory-mapped mode is used exclusively to access the device's
58 * shared-memory structure, the Control/Status Registers (CSR). All
59 * setup, configuration, and control of the device, including queuing
60 * of Tx, Rx, and configuration commands is through the CSR.
61 * cmd_lock serializes accesses to the CSR command register. cb_lock
62 * protects the shared Command Block List (CBL).
64 * 8255x is highly MII-compliant and all access to the PHY go
65 * through the Management Data Interface (MDI). Consequently, the
66 * driver leverages the mii.c library shared with other MII-compliant
67 * devices.
69 * Big- and Little-Endian byte order as well as 32- and 64-bit
70 * archs are supported. Weak-ordered memory and non-cache-coherent
71 * archs are supported.
73 * III. Transmit
75 * A Tx skb is mapped and hangs off of a TCB. TCBs are linked
76 * together in a fixed-size ring (CBL) thus forming the flexible mode
77 * memory structure. A TCB marked with the suspend-bit indicates
78 * the end of the ring. The last TCB processed suspends the
79 * controller, and the controller can be restarted by issue a CU
80 * resume command to continue from the suspend point, or a CU start
81 * command to start at a given position in the ring.
83 * Non-Tx commands (config, multicast setup, etc) are linked
84 * into the CBL ring along with Tx commands. The common structure
85 * used for both Tx and non-Tx commands is the Command Block (CB).
87 * cb_to_use is the next CB to use for queuing a command; cb_to_clean
88 * is the next CB to check for completion; cb_to_send is the first
89 * CB to start on in case of a previous failure to resume. CB clean
90 * up happens in interrupt context in response to a CU interrupt.
91 * cbs_avail keeps track of number of free CB resources available.
93 * Hardware padding of short packets to minimum packet size is
94 * enabled. 82557 pads with 7Eh, while the later controllers pad
95 * with 00h.
97 * IV. Recieve
99 * The Receive Frame Area (RFA) comprises a ring of Receive Frame
100 * Descriptors (RFD) + data buffer, thus forming the simplified mode
101 * memory structure. Rx skbs are allocated to contain both the RFD
102 * and the data buffer, but the RFD is pulled off before the skb is
103 * indicated. The data buffer is aligned such that encapsulated
104 * protocol headers are u32-aligned. Since the RFD is part of the
105 * mapped shared memory, and completion status is contained within
106 * the RFD, the RFD must be dma_sync'ed to maintain a consistent
107 * view from software and hardware.
109 * Under typical operation, the receive unit (RU) is start once,
110 * and the controller happily fills RFDs as frames arrive. If
111 * replacement RFDs cannot be allocated, or the RU goes non-active,
112 * the RU must be restarted. Frame arrival generates an interrupt,
113 * and Rx indication and re-allocation happen in the same context,
114 * therefore no locking is required. A software-generated interrupt
115 * is generated from the watchdog to recover from a failed allocation
116 * senario where all Rx resources have been indicated and none re-
117 * placed.
119 * V. Miscellaneous
121 * VLAN offloading of tagging, stripping and filtering is not
122 * supported, but driver will accommodate the extra 4-byte VLAN tag
123 * for processing by upper layers. Tx/Rx Checksum offloading is not
124 * supported. Tx Scatter/Gather is not supported. Jumbo Frames is
125 * not supported (hardware limitation).
127 * MagicPacket(tm) WoL support is enabled/disabled via ethtool.
129 * Thanks to JC (jchapman@katalix.com) for helping with
130 * testing/troubleshooting the development driver.
132 * TODO:
133 * o several entry points race with dev->close
134 * o check for tx-no-resources/stop Q races with tx clean/wake Q
136 * FIXES:
137 * 2005/12/02 - Michael O'Donnell <Michael.ODonnell at stratus dot com>
138 * - Stratus87247: protect MDI control register manipulations
141 #include <linux/module.h>
142 #include <linux/moduleparam.h>
143 #include <linux/kernel.h>
144 #include <linux/types.h>
145 #include <linux/slab.h>
146 #include <linux/delay.h>
147 #include <linux/init.h>
148 #include <linux/pci.h>
149 #include <linux/dma-mapping.h>
150 #include <linux/netdevice.h>
151 #include <linux/etherdevice.h>
152 #include <linux/mii.h>
153 #include <linux/if_vlan.h>
154 #include <linux/skbuff.h>
155 #include <linux/ethtool.h>
156 #include <linux/string.h>
157 #include <asm/unaligned.h>
160 #define DRV_NAME "e100"
161 #define DRV_EXT "-NAPI"
162 #define DRV_VERSION "3.5.23-k4"DRV_EXT
163 #define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver"
164 #define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation"
165 #define PFX DRV_NAME ": "
167 #define E100_WATCHDOG_PERIOD (2 * HZ)
168 #define E100_NAPI_WEIGHT 16
170 MODULE_DESCRIPTION(DRV_DESCRIPTION);
171 MODULE_AUTHOR(DRV_COPYRIGHT);
172 MODULE_LICENSE("GPL");
173 MODULE_VERSION(DRV_VERSION);
175 static int debug = 3;
176 static int eeprom_bad_csum_allow = 0;
177 static int use_io = 0;
178 module_param(debug, int, 0);
179 module_param(eeprom_bad_csum_allow, int, 0);
180 module_param(use_io, int, 0);
181 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
182 MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums");
183 MODULE_PARM_DESC(use_io, "Force use of i/o access mode");
184 #define DPRINTK(nlevel, klevel, fmt, args...) \
185 (void)((NETIF_MSG_##nlevel & nic->msg_enable) && \
186 printk(KERN_##klevel PFX "%s: %s: " fmt, nic->netdev->name, \
187 __FUNCTION__ , ## args))
189 #define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\
190 PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \
191 PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich }
192 static struct pci_device_id e100_id_table[] = {
193 INTEL_8255X_ETHERNET_DEVICE(0x1029, 0),
194 INTEL_8255X_ETHERNET_DEVICE(0x1030, 0),
195 INTEL_8255X_ETHERNET_DEVICE(0x1031, 3),
196 INTEL_8255X_ETHERNET_DEVICE(0x1032, 3),
197 INTEL_8255X_ETHERNET_DEVICE(0x1033, 3),
198 INTEL_8255X_ETHERNET_DEVICE(0x1034, 3),
199 INTEL_8255X_ETHERNET_DEVICE(0x1038, 3),
200 INTEL_8255X_ETHERNET_DEVICE(0x1039, 4),
201 INTEL_8255X_ETHERNET_DEVICE(0x103A, 4),
202 INTEL_8255X_ETHERNET_DEVICE(0x103B, 4),
203 INTEL_8255X_ETHERNET_DEVICE(0x103C, 4),
204 INTEL_8255X_ETHERNET_DEVICE(0x103D, 4),
205 INTEL_8255X_ETHERNET_DEVICE(0x103E, 4),
206 INTEL_8255X_ETHERNET_DEVICE(0x1050, 5),
207 INTEL_8255X_ETHERNET_DEVICE(0x1051, 5),
208 INTEL_8255X_ETHERNET_DEVICE(0x1052, 5),
209 INTEL_8255X_ETHERNET_DEVICE(0x1053, 5),
210 INTEL_8255X_ETHERNET_DEVICE(0x1054, 5),
211 INTEL_8255X_ETHERNET_DEVICE(0x1055, 5),
212 INTEL_8255X_ETHERNET_DEVICE(0x1056, 5),
213 INTEL_8255X_ETHERNET_DEVICE(0x1057, 5),
214 INTEL_8255X_ETHERNET_DEVICE(0x1059, 0),
215 INTEL_8255X_ETHERNET_DEVICE(0x1064, 6),
216 INTEL_8255X_ETHERNET_DEVICE(0x1065, 6),
217 INTEL_8255X_ETHERNET_DEVICE(0x1066, 6),
218 INTEL_8255X_ETHERNET_DEVICE(0x1067, 6),
219 INTEL_8255X_ETHERNET_DEVICE(0x1068, 6),
220 INTEL_8255X_ETHERNET_DEVICE(0x1069, 6),
221 INTEL_8255X_ETHERNET_DEVICE(0x106A, 6),
222 INTEL_8255X_ETHERNET_DEVICE(0x106B, 6),
223 INTEL_8255X_ETHERNET_DEVICE(0x1091, 7),
224 INTEL_8255X_ETHERNET_DEVICE(0x1092, 7),
225 INTEL_8255X_ETHERNET_DEVICE(0x1093, 7),
226 INTEL_8255X_ETHERNET_DEVICE(0x1094, 7),
227 INTEL_8255X_ETHERNET_DEVICE(0x1095, 7),
228 INTEL_8255X_ETHERNET_DEVICE(0x1209, 0),
229 INTEL_8255X_ETHERNET_DEVICE(0x1229, 0),
230 INTEL_8255X_ETHERNET_DEVICE(0x2449, 2),
231 INTEL_8255X_ETHERNET_DEVICE(0x2459, 2),
232 INTEL_8255X_ETHERNET_DEVICE(0x245D, 2),
233 INTEL_8255X_ETHERNET_DEVICE(0x27DC, 7),
234 { 0, }
236 MODULE_DEVICE_TABLE(pci, e100_id_table);
238 enum mac {
239 mac_82557_D100_A = 0,
240 mac_82557_D100_B = 1,
241 mac_82557_D100_C = 2,
242 mac_82558_D101_A4 = 4,
243 mac_82558_D101_B0 = 5,
244 mac_82559_D101M = 8,
245 mac_82559_D101S = 9,
246 mac_82550_D102 = 12,
247 mac_82550_D102_C = 13,
248 mac_82551_E = 14,
249 mac_82551_F = 15,
250 mac_82551_10 = 16,
251 mac_unknown = 0xFF,
254 enum phy {
255 phy_100a = 0x000003E0,
256 phy_100c = 0x035002A8,
257 phy_82555_tx = 0x015002A8,
258 phy_nsc_tx = 0x5C002000,
259 phy_82562_et = 0x033002A8,
260 phy_82562_em = 0x032002A8,
261 phy_82562_ek = 0x031002A8,
262 phy_82562_eh = 0x017002A8,
263 phy_unknown = 0xFFFFFFFF,
266 /* CSR (Control/Status Registers) */
267 struct csr {
268 struct {
269 u8 status;
270 u8 stat_ack;
271 u8 cmd_lo;
272 u8 cmd_hi;
273 u32 gen_ptr;
274 } scb;
275 u32 port;
276 u16 flash_ctrl;
277 u8 eeprom_ctrl_lo;
278 u8 eeprom_ctrl_hi;
279 u32 mdi_ctrl;
280 u32 rx_dma_count;
283 enum scb_status {
284 rus_ready = 0x10,
285 rus_mask = 0x3C,
288 enum ru_state {
289 RU_SUSPENDED = 0,
290 RU_RUNNING = 1,
291 RU_UNINITIALIZED = -1,
294 enum scb_stat_ack {
295 stat_ack_not_ours = 0x00,
296 stat_ack_sw_gen = 0x04,
297 stat_ack_rnr = 0x10,
298 stat_ack_cu_idle = 0x20,
299 stat_ack_frame_rx = 0x40,
300 stat_ack_cu_cmd_done = 0x80,
301 stat_ack_not_present = 0xFF,
302 stat_ack_rx = (stat_ack_sw_gen | stat_ack_rnr | stat_ack_frame_rx),
303 stat_ack_tx = (stat_ack_cu_idle | stat_ack_cu_cmd_done),
306 enum scb_cmd_hi {
307 irq_mask_none = 0x00,
308 irq_mask_all = 0x01,
309 irq_sw_gen = 0x02,
312 enum scb_cmd_lo {
313 cuc_nop = 0x00,
314 ruc_start = 0x01,
315 ruc_load_base = 0x06,
316 cuc_start = 0x10,
317 cuc_resume = 0x20,
318 cuc_dump_addr = 0x40,
319 cuc_dump_stats = 0x50,
320 cuc_load_base = 0x60,
321 cuc_dump_reset = 0x70,
324 enum cuc_dump {
325 cuc_dump_complete = 0x0000A005,
326 cuc_dump_reset_complete = 0x0000A007,
329 enum port {
330 software_reset = 0x0000,
331 selftest = 0x0001,
332 selective_reset = 0x0002,
335 enum eeprom_ctrl_lo {
336 eesk = 0x01,
337 eecs = 0x02,
338 eedi = 0x04,
339 eedo = 0x08,
342 enum mdi_ctrl {
343 mdi_write = 0x04000000,
344 mdi_read = 0x08000000,
345 mdi_ready = 0x10000000,
348 enum eeprom_op {
349 op_write = 0x05,
350 op_read = 0x06,
351 op_ewds = 0x10,
352 op_ewen = 0x13,
355 enum eeprom_offsets {
356 eeprom_cnfg_mdix = 0x03,
357 eeprom_id = 0x0A,
358 eeprom_config_asf = 0x0D,
359 eeprom_smbus_addr = 0x90,
362 enum eeprom_cnfg_mdix {
363 eeprom_mdix_enabled = 0x0080,
366 enum eeprom_id {
367 eeprom_id_wol = 0x0020,
370 enum eeprom_config_asf {
371 eeprom_asf = 0x8000,
372 eeprom_gcl = 0x4000,
375 enum cb_status {
376 cb_complete = 0x8000,
377 cb_ok = 0x2000,
380 enum cb_command {
381 cb_nop = 0x0000,
382 cb_iaaddr = 0x0001,
383 cb_config = 0x0002,
384 cb_multi = 0x0003,
385 cb_tx = 0x0004,
386 cb_ucode = 0x0005,
387 cb_dump = 0x0006,
388 cb_tx_sf = 0x0008,
389 cb_cid = 0x1f00,
390 cb_i = 0x2000,
391 cb_s = 0x4000,
392 cb_el = 0x8000,
395 struct rfd {
396 u16 status;
397 u16 command;
398 u32 link;
399 u32 rbd;
400 u16 actual_size;
401 u16 size;
404 struct rx {
405 struct rx *next, *prev;
406 struct sk_buff *skb;
407 dma_addr_t dma_addr;
410 #if defined(__BIG_ENDIAN_BITFIELD)
411 #define X(a,b) b,a
412 #else
413 #define X(a,b) a,b
414 #endif
415 struct config {
416 /*0*/ u8 X(byte_count:6, pad0:2);
417 /*1*/ u8 X(X(rx_fifo_limit:4, tx_fifo_limit:3), pad1:1);
418 /*2*/ u8 adaptive_ifs;
419 /*3*/ u8 X(X(X(X(mwi_enable:1, type_enable:1), read_align_enable:1),
420 term_write_cache_line:1), pad3:4);
421 /*4*/ u8 X(rx_dma_max_count:7, pad4:1);
422 /*5*/ u8 X(tx_dma_max_count:7, dma_max_count_enable:1);
423 /*6*/ u8 X(X(X(X(X(X(X(late_scb_update:1, direct_rx_dma:1),
424 tno_intr:1), cna_intr:1), standard_tcb:1), standard_stat_counter:1),
425 rx_discard_overruns:1), rx_save_bad_frames:1);
426 /*7*/ u8 X(X(X(X(X(rx_discard_short_frames:1, tx_underrun_retry:2),
427 pad7:2), rx_extended_rfd:1), tx_two_frames_in_fifo:1),
428 tx_dynamic_tbd:1);
429 /*8*/ u8 X(X(mii_mode:1, pad8:6), csma_disabled:1);
430 /*9*/ u8 X(X(X(X(X(rx_tcpudp_checksum:1, pad9:3), vlan_arp_tco:1),
431 link_status_wake:1), arp_wake:1), mcmatch_wake:1);
432 /*10*/ u8 X(X(X(pad10:3, no_source_addr_insertion:1), preamble_length:2),
433 loopback:2);
434 /*11*/ u8 X(linear_priority:3, pad11:5);
435 /*12*/ u8 X(X(linear_priority_mode:1, pad12:3), ifs:4);
436 /*13*/ u8 ip_addr_lo;
437 /*14*/ u8 ip_addr_hi;
438 /*15*/ u8 X(X(X(X(X(X(X(promiscuous_mode:1, broadcast_disabled:1),
439 wait_after_win:1), pad15_1:1), ignore_ul_bit:1), crc_16_bit:1),
440 pad15_2:1), crs_or_cdt:1);
441 /*16*/ u8 fc_delay_lo;
442 /*17*/ u8 fc_delay_hi;
443 /*18*/ u8 X(X(X(X(X(rx_stripping:1, tx_padding:1), rx_crc_transfer:1),
444 rx_long_ok:1), fc_priority_threshold:3), pad18:1);
445 /*19*/ u8 X(X(X(X(X(X(X(addr_wake:1, magic_packet_disable:1),
446 fc_disable:1), fc_restop:1), fc_restart:1), fc_reject:1),
447 full_duplex_force:1), full_duplex_pin:1);
448 /*20*/ u8 X(X(X(pad20_1:5, fc_priority_location:1), multi_ia:1), pad20_2:1);
449 /*21*/ u8 X(X(pad21_1:3, multicast_all:1), pad21_2:4);
450 /*22*/ u8 X(X(rx_d102_mode:1, rx_vlan_drop:1), pad22:6);
451 u8 pad_d102[9];
454 #define E100_MAX_MULTICAST_ADDRS 64
455 struct multi {
456 u16 count;
457 u8 addr[E100_MAX_MULTICAST_ADDRS * ETH_ALEN + 2/*pad*/];
460 /* Important: keep total struct u32-aligned */
461 #define UCODE_SIZE 134
462 struct cb {
463 u16 status;
464 u16 command;
465 u32 link;
466 union {
467 u8 iaaddr[ETH_ALEN];
468 u32 ucode[UCODE_SIZE];
469 struct config config;
470 struct multi multi;
471 struct {
472 u32 tbd_array;
473 u16 tcb_byte_count;
474 u8 threshold;
475 u8 tbd_count;
476 struct {
477 u32 buf_addr;
478 u16 size;
479 u16 eol;
480 } tbd;
481 } tcb;
482 u32 dump_buffer_addr;
483 } u;
484 struct cb *next, *prev;
485 dma_addr_t dma_addr;
486 struct sk_buff *skb;
489 enum loopback {
490 lb_none = 0, lb_mac = 1, lb_phy = 3,
493 struct stats {
494 u32 tx_good_frames, tx_max_collisions, tx_late_collisions,
495 tx_underruns, tx_lost_crs, tx_deferred, tx_single_collisions,
496 tx_multiple_collisions, tx_total_collisions;
497 u32 rx_good_frames, rx_crc_errors, rx_alignment_errors,
498 rx_resource_errors, rx_overrun_errors, rx_cdt_errors,
499 rx_short_frame_errors;
500 u32 fc_xmt_pause, fc_rcv_pause, fc_rcv_unsupported;
501 u16 xmt_tco_frames, rcv_tco_frames;
502 u32 complete;
505 struct mem {
506 struct {
507 u32 signature;
508 u32 result;
509 } selftest;
510 struct stats stats;
511 u8 dump_buf[596];
514 struct param_range {
515 u32 min;
516 u32 max;
517 u32 count;
520 struct params {
521 struct param_range rfds;
522 struct param_range cbs;
525 struct nic {
526 /* Begin: frequently used values: keep adjacent for cache effect */
527 u32 msg_enable ____cacheline_aligned;
528 struct net_device *netdev;
529 struct pci_dev *pdev;
531 struct rx *rxs ____cacheline_aligned;
532 struct rx *rx_to_use;
533 struct rx *rx_to_clean;
534 struct rfd blank_rfd;
535 enum ru_state ru_running;
537 spinlock_t cb_lock ____cacheline_aligned;
538 spinlock_t cmd_lock;
539 struct csr __iomem *csr;
540 enum scb_cmd_lo cuc_cmd;
541 unsigned int cbs_avail;
542 struct cb *cbs;
543 struct cb *cb_to_use;
544 struct cb *cb_to_send;
545 struct cb *cb_to_clean;
546 u16 tx_command;
547 /* End: frequently used values: keep adjacent for cache effect */
549 enum {
550 ich = (1 << 0),
551 promiscuous = (1 << 1),
552 multicast_all = (1 << 2),
553 wol_magic = (1 << 3),
554 ich_10h_workaround = (1 << 4),
555 } flags ____cacheline_aligned;
557 enum mac mac;
558 enum phy phy;
559 struct params params;
560 struct net_device_stats net_stats;
561 struct timer_list watchdog;
562 struct timer_list blink_timer;
563 struct mii_if_info mii;
564 struct work_struct tx_timeout_task;
565 enum loopback loopback;
567 struct mem *mem;
568 dma_addr_t dma_addr;
570 dma_addr_t cbs_dma_addr;
571 u8 adaptive_ifs;
572 u8 tx_threshold;
573 u32 tx_frames;
574 u32 tx_collisions;
575 u32 tx_deferred;
576 u32 tx_single_collisions;
577 u32 tx_multiple_collisions;
578 u32 tx_fc_pause;
579 u32 tx_tco_frames;
581 u32 rx_fc_pause;
582 u32 rx_fc_unsupported;
583 u32 rx_tco_frames;
584 u32 rx_over_length_errors;
586 u16 leds;
587 u16 eeprom_wc;
588 u16 eeprom[256];
589 spinlock_t mdio_lock;
592 static inline void e100_write_flush(struct nic *nic)
594 /* Flush previous PCI writes through intermediate bridges
595 * by doing a benign read */
596 (void)ioread8(&nic->csr->scb.status);
599 static void e100_enable_irq(struct nic *nic)
601 unsigned long flags;
603 spin_lock_irqsave(&nic->cmd_lock, flags);
604 iowrite8(irq_mask_none, &nic->csr->scb.cmd_hi);
605 e100_write_flush(nic);
606 spin_unlock_irqrestore(&nic->cmd_lock, flags);
609 static void e100_disable_irq(struct nic *nic)
611 unsigned long flags;
613 spin_lock_irqsave(&nic->cmd_lock, flags);
614 iowrite8(irq_mask_all, &nic->csr->scb.cmd_hi);
615 e100_write_flush(nic);
616 spin_unlock_irqrestore(&nic->cmd_lock, flags);
619 static void e100_hw_reset(struct nic *nic)
621 /* Put CU and RU into idle with a selective reset to get
622 * device off of PCI bus */
623 iowrite32(selective_reset, &nic->csr->port);
624 e100_write_flush(nic); udelay(20);
626 /* Now fully reset device */
627 iowrite32(software_reset, &nic->csr->port);
628 e100_write_flush(nic); udelay(20);
630 /* Mask off our interrupt line - it's unmasked after reset */
631 e100_disable_irq(nic);
634 static int e100_self_test(struct nic *nic)
636 u32 dma_addr = nic->dma_addr + offsetof(struct mem, selftest);
638 /* Passing the self-test is a pretty good indication
639 * that the device can DMA to/from host memory */
641 nic->mem->selftest.signature = 0;
642 nic->mem->selftest.result = 0xFFFFFFFF;
644 iowrite32(selftest | dma_addr, &nic->csr->port);
645 e100_write_flush(nic);
646 /* Wait 10 msec for self-test to complete */
647 msleep(10);
649 /* Interrupts are enabled after self-test */
650 e100_disable_irq(nic);
652 /* Check results of self-test */
653 if(nic->mem->selftest.result != 0) {
654 DPRINTK(HW, ERR, "Self-test failed: result=0x%08X\n",
655 nic->mem->selftest.result);
656 return -ETIMEDOUT;
658 if(nic->mem->selftest.signature == 0) {
659 DPRINTK(HW, ERR, "Self-test failed: timed out\n");
660 return -ETIMEDOUT;
663 return 0;
666 static void e100_eeprom_write(struct nic *nic, u16 addr_len, u16 addr, u16 data)
668 u32 cmd_addr_data[3];
669 u8 ctrl;
670 int i, j;
672 /* Three cmds: write/erase enable, write data, write/erase disable */
673 cmd_addr_data[0] = op_ewen << (addr_len - 2);
674 cmd_addr_data[1] = (((op_write << addr_len) | addr) << 16) |
675 cpu_to_le16(data);
676 cmd_addr_data[2] = op_ewds << (addr_len - 2);
678 /* Bit-bang cmds to write word to eeprom */
679 for(j = 0; j < 3; j++) {
681 /* Chip select */
682 iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
683 e100_write_flush(nic); udelay(4);
685 for(i = 31; i >= 0; i--) {
686 ctrl = (cmd_addr_data[j] & (1 << i)) ?
687 eecs | eedi : eecs;
688 iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
689 e100_write_flush(nic); udelay(4);
691 iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
692 e100_write_flush(nic); udelay(4);
694 /* Wait 10 msec for cmd to complete */
695 msleep(10);
697 /* Chip deselect */
698 iowrite8(0, &nic->csr->eeprom_ctrl_lo);
699 e100_write_flush(nic); udelay(4);
703 /* General technique stolen from the eepro100 driver - very clever */
704 static u16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr)
706 u32 cmd_addr_data;
707 u16 data = 0;
708 u8 ctrl;
709 int i;
711 cmd_addr_data = ((op_read << *addr_len) | addr) << 16;
713 /* Chip select */
714 iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
715 e100_write_flush(nic); udelay(4);
717 /* Bit-bang to read word from eeprom */
718 for(i = 31; i >= 0; i--) {
719 ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs;
720 iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
721 e100_write_flush(nic); udelay(4);
723 iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
724 e100_write_flush(nic); udelay(4);
726 /* Eeprom drives a dummy zero to EEDO after receiving
727 * complete address. Use this to adjust addr_len. */
728 ctrl = ioread8(&nic->csr->eeprom_ctrl_lo);
729 if(!(ctrl & eedo) && i > 16) {
730 *addr_len -= (i - 16);
731 i = 17;
734 data = (data << 1) | (ctrl & eedo ? 1 : 0);
737 /* Chip deselect */
738 iowrite8(0, &nic->csr->eeprom_ctrl_lo);
739 e100_write_flush(nic); udelay(4);
741 return le16_to_cpu(data);
744 /* Load entire EEPROM image into driver cache and validate checksum */
745 static int e100_eeprom_load(struct nic *nic)
747 u16 addr, addr_len = 8, checksum = 0;
749 /* Try reading with an 8-bit addr len to discover actual addr len */
750 e100_eeprom_read(nic, &addr_len, 0);
751 nic->eeprom_wc = 1 << addr_len;
753 for(addr = 0; addr < nic->eeprom_wc; addr++) {
754 nic->eeprom[addr] = e100_eeprom_read(nic, &addr_len, addr);
755 if(addr < nic->eeprom_wc - 1)
756 checksum += cpu_to_le16(nic->eeprom[addr]);
759 /* The checksum, stored in the last word, is calculated such that
760 * the sum of words should be 0xBABA */
761 checksum = le16_to_cpu(0xBABA - checksum);
762 if(checksum != nic->eeprom[nic->eeprom_wc - 1]) {
763 DPRINTK(PROBE, ERR, "EEPROM corrupted\n");
764 if (!eeprom_bad_csum_allow)
765 return -EAGAIN;
768 return 0;
771 /* Save (portion of) driver EEPROM cache to device and update checksum */
772 static int e100_eeprom_save(struct nic *nic, u16 start, u16 count)
774 u16 addr, addr_len = 8, checksum = 0;
776 /* Try reading with an 8-bit addr len to discover actual addr len */
777 e100_eeprom_read(nic, &addr_len, 0);
778 nic->eeprom_wc = 1 << addr_len;
780 if(start + count >= nic->eeprom_wc)
781 return -EINVAL;
783 for(addr = start; addr < start + count; addr++)
784 e100_eeprom_write(nic, addr_len, addr, nic->eeprom[addr]);
786 /* The checksum, stored in the last word, is calculated such that
787 * the sum of words should be 0xBABA */
788 for(addr = 0; addr < nic->eeprom_wc - 1; addr++)
789 checksum += cpu_to_le16(nic->eeprom[addr]);
790 nic->eeprom[nic->eeprom_wc - 1] = le16_to_cpu(0xBABA - checksum);
791 e100_eeprom_write(nic, addr_len, nic->eeprom_wc - 1,
792 nic->eeprom[nic->eeprom_wc - 1]);
794 return 0;
797 #define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */
798 #define E100_WAIT_SCB_FAST 20 /* delay like the old code */
799 static int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr)
801 unsigned long flags;
802 unsigned int i;
803 int err = 0;
805 spin_lock_irqsave(&nic->cmd_lock, flags);
807 /* Previous command is accepted when SCB clears */
808 for(i = 0; i < E100_WAIT_SCB_TIMEOUT; i++) {
809 if(likely(!ioread8(&nic->csr->scb.cmd_lo)))
810 break;
811 cpu_relax();
812 if(unlikely(i > E100_WAIT_SCB_FAST))
813 udelay(5);
815 if(unlikely(i == E100_WAIT_SCB_TIMEOUT)) {
816 err = -EAGAIN;
817 goto err_unlock;
820 if(unlikely(cmd != cuc_resume))
821 iowrite32(dma_addr, &nic->csr->scb.gen_ptr);
822 iowrite8(cmd, &nic->csr->scb.cmd_lo);
824 err_unlock:
825 spin_unlock_irqrestore(&nic->cmd_lock, flags);
827 return err;
830 static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
831 void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
833 struct cb *cb;
834 unsigned long flags;
835 int err = 0;
837 spin_lock_irqsave(&nic->cb_lock, flags);
839 if(unlikely(!nic->cbs_avail)) {
840 err = -ENOMEM;
841 goto err_unlock;
844 cb = nic->cb_to_use;
845 nic->cb_to_use = cb->next;
846 nic->cbs_avail--;
847 cb->skb = skb;
849 if(unlikely(!nic->cbs_avail))
850 err = -ENOSPC;
852 cb_prepare(nic, cb, skb);
854 /* Order is important otherwise we'll be in a race with h/w:
855 * set S-bit in current first, then clear S-bit in previous. */
856 cb->command |= cpu_to_le16(cb_s);
857 wmb();
858 cb->prev->command &= cpu_to_le16(~cb_s);
860 while(nic->cb_to_send != nic->cb_to_use) {
861 if(unlikely(e100_exec_cmd(nic, nic->cuc_cmd,
862 nic->cb_to_send->dma_addr))) {
863 /* Ok, here's where things get sticky. It's
864 * possible that we can't schedule the command
865 * because the controller is too busy, so
866 * let's just queue the command and try again
867 * when another command is scheduled. */
868 if(err == -ENOSPC) {
869 //request a reset
870 schedule_work(&nic->tx_timeout_task);
872 break;
873 } else {
874 nic->cuc_cmd = cuc_resume;
875 nic->cb_to_send = nic->cb_to_send->next;
879 err_unlock:
880 spin_unlock_irqrestore(&nic->cb_lock, flags);
882 return err;
885 static u16 mdio_ctrl(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data)
887 u32 data_out = 0;
888 unsigned int i;
889 unsigned long flags;
893 * Stratus87247: we shouldn't be writing the MDI control
894 * register until the Ready bit shows True. Also, since
895 * manipulation of the MDI control registers is a multi-step
896 * procedure it should be done under lock.
898 spin_lock_irqsave(&nic->mdio_lock, flags);
899 for (i = 100; i; --i) {
900 if (ioread32(&nic->csr->mdi_ctrl) & mdi_ready)
901 break;
902 udelay(20);
904 if (unlikely(!i)) {
905 printk("e100.mdio_ctrl(%s) won't go Ready\n",
906 nic->netdev->name );
907 spin_unlock_irqrestore(&nic->mdio_lock, flags);
908 return 0; /* No way to indicate timeout error */
910 iowrite32((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl);
912 for (i = 0; i < 100; i++) {
913 udelay(20);
914 if ((data_out = ioread32(&nic->csr->mdi_ctrl)) & mdi_ready)
915 break;
917 spin_unlock_irqrestore(&nic->mdio_lock, flags);
918 DPRINTK(HW, DEBUG,
919 "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n",
920 dir == mdi_read ? "READ" : "WRITE", addr, reg, data, data_out);
921 return (u16)data_out;
924 static int mdio_read(struct net_device *netdev, int addr, int reg)
926 return mdio_ctrl(netdev_priv(netdev), addr, mdi_read, reg, 0);
929 static void mdio_write(struct net_device *netdev, int addr, int reg, int data)
931 mdio_ctrl(netdev_priv(netdev), addr, mdi_write, reg, data);
934 static void e100_get_defaults(struct nic *nic)
936 struct param_range rfds = { .min = 16, .max = 256, .count = 256 };
937 struct param_range cbs = { .min = 64, .max = 256, .count = 128 };
939 /* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */
940 nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->pdev->revision;
941 if(nic->mac == mac_unknown)
942 nic->mac = mac_82557_D100_A;
944 nic->params.rfds = rfds;
945 nic->params.cbs = cbs;
947 /* Quadwords to DMA into FIFO before starting frame transmit */
948 nic->tx_threshold = 0xE0;
950 /* no interrupt for every tx completion, delay = 256us if not 557*/
951 nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf |
952 ((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i));
954 /* Template for a freshly allocated RFD */
955 nic->blank_rfd.command = cpu_to_le16(cb_el);
956 nic->blank_rfd.rbd = 0xFFFFFFFF;
957 nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN);
959 /* MII setup */
960 nic->mii.phy_id_mask = 0x1F;
961 nic->mii.reg_num_mask = 0x1F;
962 nic->mii.dev = nic->netdev;
963 nic->mii.mdio_read = mdio_read;
964 nic->mii.mdio_write = mdio_write;
967 static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
969 struct config *config = &cb->u.config;
970 u8 *c = (u8 *)config;
972 cb->command = cpu_to_le16(cb_config);
974 memset(config, 0, sizeof(struct config));
976 config->byte_count = 0x16; /* bytes in this struct */
977 config->rx_fifo_limit = 0x8; /* bytes in FIFO before DMA */
978 config->direct_rx_dma = 0x1; /* reserved */
979 config->standard_tcb = 0x1; /* 1=standard, 0=extended */
980 config->standard_stat_counter = 0x1; /* 1=standard, 0=extended */
981 config->rx_discard_short_frames = 0x1; /* 1=discard, 0=pass */
982 config->tx_underrun_retry = 0x3; /* # of underrun retries */
983 config->mii_mode = 0x1; /* 1=MII mode, 0=503 mode */
984 config->pad10 = 0x6;
985 config->no_source_addr_insertion = 0x1; /* 1=no, 0=yes */
986 config->preamble_length = 0x2; /* 0=1, 1=3, 2=7, 3=15 bytes */
987 config->ifs = 0x6; /* x16 = inter frame spacing */
988 config->ip_addr_hi = 0xF2; /* ARP IP filter - not used */
989 config->pad15_1 = 0x1;
990 config->pad15_2 = 0x1;
991 config->crs_or_cdt = 0x0; /* 0=CRS only, 1=CRS or CDT */
992 config->fc_delay_hi = 0x40; /* time delay for fc frame */
993 config->tx_padding = 0x1; /* 1=pad short frames */
994 config->fc_priority_threshold = 0x7; /* 7=priority fc disabled */
995 config->pad18 = 0x1;
996 config->full_duplex_pin = 0x1; /* 1=examine FDX# pin */
997 config->pad20_1 = 0x1F;
998 config->fc_priority_location = 0x1; /* 1=byte#31, 0=byte#19 */
999 config->pad21_1 = 0x5;
1001 config->adaptive_ifs = nic->adaptive_ifs;
1002 config->loopback = nic->loopback;
1004 if(nic->mii.force_media && nic->mii.full_duplex)
1005 config->full_duplex_force = 0x1; /* 1=force, 0=auto */
1007 if(nic->flags & promiscuous || nic->loopback) {
1008 config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */
1009 config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */
1010 config->promiscuous_mode = 0x1; /* 1=on, 0=off */
1013 if(nic->flags & multicast_all)
1014 config->multicast_all = 0x1; /* 1=accept, 0=no */
1016 /* disable WoL when up */
1017 if(netif_running(nic->netdev) || !(nic->flags & wol_magic))
1018 config->magic_packet_disable = 0x1; /* 1=off, 0=on */
1020 if(nic->mac >= mac_82558_D101_A4) {
1021 config->fc_disable = 0x1; /* 1=Tx fc off, 0=Tx fc on */
1022 config->mwi_enable = 0x1; /* 1=enable, 0=disable */
1023 config->standard_tcb = 0x0; /* 1=standard, 0=extended */
1024 config->rx_long_ok = 0x1; /* 1=VLANs ok, 0=standard */
1025 if (nic->mac >= mac_82559_D101M) {
1026 config->tno_intr = 0x1; /* TCO stats enable */
1027 /* Enable TCO in extended config */
1028 if (nic->mac >= mac_82551_10) {
1029 config->byte_count = 0x20; /* extended bytes */
1030 config->rx_d102_mode = 0x1; /* GMRC for TCO */
1032 } else {
1033 config->standard_stat_counter = 0x0;
1037 DPRINTK(HW, DEBUG, "[00-07]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1038 c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]);
1039 DPRINTK(HW, DEBUG, "[08-15]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1040 c[8], c[9], c[10], c[11], c[12], c[13], c[14], c[15]);
1041 DPRINTK(HW, DEBUG, "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1042 c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]);
1045 /********************************************************/
1046 /* Micro code for 8086:1229 Rev 8 */
1047 /********************************************************/
1049 /* Parameter values for the D101M B-step */
1050 #define D101M_CPUSAVER_TIMER_DWORD 78
1051 #define D101M_CPUSAVER_BUNDLE_DWORD 65
1052 #define D101M_CPUSAVER_MIN_SIZE_DWORD 126
1054 #define D101M_B_RCVBUNDLE_UCODE \
1056 0x00550215, 0xFFFF0437, 0xFFFFFFFF, 0x06A70789, 0xFFFFFFFF, 0x0558FFFF, \
1057 0x000C0001, 0x00101312, 0x000C0008, 0x00380216, \
1058 0x0010009C, 0x00204056, 0x002380CC, 0x00380056, \
1059 0x0010009C, 0x00244C0B, 0x00000800, 0x00124818, \
1060 0x00380438, 0x00000000, 0x00140000, 0x00380555, \
1061 0x00308000, 0x00100662, 0x00100561, 0x000E0408, \
1062 0x00134861, 0x000C0002, 0x00103093, 0x00308000, \
1063 0x00100624, 0x00100561, 0x000E0408, 0x00100861, \
1064 0x000C007E, 0x00222C21, 0x000C0002, 0x00103093, \
1065 0x00380C7A, 0x00080000, 0x00103090, 0x00380C7A, \
1066 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1067 0x0010009C, 0x00244C2D, 0x00010004, 0x00041000, \
1068 0x003A0437, 0x00044010, 0x0038078A, 0x00000000, \
1069 0x00100099, 0x00206C7A, 0x0010009C, 0x00244C48, \
1070 0x00130824, 0x000C0001, 0x00101213, 0x00260C75, \
1071 0x00041000, 0x00010004, 0x00130826, 0x000C0006, \
1072 0x002206A8, 0x0013C926, 0x00101313, 0x003806A8, \
1073 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1074 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1075 0x00080600, 0x00101B10, 0x00050004, 0x00100826, \
1076 0x00101210, 0x00380C34, 0x00000000, 0x00000000, \
1077 0x0021155B, 0x00100099, 0x00206559, 0x0010009C, \
1078 0x00244559, 0x00130836, 0x000C0000, 0x00220C62, \
1079 0x000C0001, 0x00101B13, 0x00229C0E, 0x00210C0E, \
1080 0x00226C0E, 0x00216C0E, 0x0022FC0E, 0x00215C0E, \
1081 0x00214C0E, 0x00380555, 0x00010004, 0x00041000, \
1082 0x00278C67, 0x00040800, 0x00018100, 0x003A0437, \
1083 0x00130826, 0x000C0001, 0x00220559, 0x00101313, \
1084 0x00380559, 0x00000000, 0x00000000, 0x00000000, \
1085 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1086 0x00000000, 0x00130831, 0x0010090B, 0x00124813, \
1087 0x000CFF80, 0x002606AB, 0x00041000, 0x00010004, \
1088 0x003806A8, 0x00000000, 0x00000000, 0x00000000, \
1091 /********************************************************/
1092 /* Micro code for 8086:1229 Rev 9 */
1093 /********************************************************/
1095 /* Parameter values for the D101S */
1096 #define D101S_CPUSAVER_TIMER_DWORD 78
1097 #define D101S_CPUSAVER_BUNDLE_DWORD 67
1098 #define D101S_CPUSAVER_MIN_SIZE_DWORD 128
1100 #define D101S_RCVBUNDLE_UCODE \
1102 0x00550242, 0xFFFF047E, 0xFFFFFFFF, 0x06FF0818, 0xFFFFFFFF, 0x05A6FFFF, \
1103 0x000C0001, 0x00101312, 0x000C0008, 0x00380243, \
1104 0x0010009C, 0x00204056, 0x002380D0, 0x00380056, \
1105 0x0010009C, 0x00244F8B, 0x00000800, 0x00124818, \
1106 0x0038047F, 0x00000000, 0x00140000, 0x003805A3, \
1107 0x00308000, 0x00100610, 0x00100561, 0x000E0408, \
1108 0x00134861, 0x000C0002, 0x00103093, 0x00308000, \
1109 0x00100624, 0x00100561, 0x000E0408, 0x00100861, \
1110 0x000C007E, 0x00222FA1, 0x000C0002, 0x00103093, \
1111 0x00380F90, 0x00080000, 0x00103090, 0x00380F90, \
1112 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1113 0x0010009C, 0x00244FAD, 0x00010004, 0x00041000, \
1114 0x003A047E, 0x00044010, 0x00380819, 0x00000000, \
1115 0x00100099, 0x00206FFD, 0x0010009A, 0x0020AFFD, \
1116 0x0010009C, 0x00244FC8, 0x00130824, 0x000C0001, \
1117 0x00101213, 0x00260FF7, 0x00041000, 0x00010004, \
1118 0x00130826, 0x000C0006, 0x00220700, 0x0013C926, \
1119 0x00101313, 0x00380700, 0x00000000, 0x00000000, \
1120 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1121 0x00080600, 0x00101B10, 0x00050004, 0x00100826, \
1122 0x00101210, 0x00380FB6, 0x00000000, 0x00000000, \
1123 0x002115A9, 0x00100099, 0x002065A7, 0x0010009A, \
1124 0x0020A5A7, 0x0010009C, 0x002445A7, 0x00130836, \
1125 0x000C0000, 0x00220FE4, 0x000C0001, 0x00101B13, \
1126 0x00229F8E, 0x00210F8E, 0x00226F8E, 0x00216F8E, \
1127 0x0022FF8E, 0x00215F8E, 0x00214F8E, 0x003805A3, \
1128 0x00010004, 0x00041000, 0x00278FE9, 0x00040800, \
1129 0x00018100, 0x003A047E, 0x00130826, 0x000C0001, \
1130 0x002205A7, 0x00101313, 0x003805A7, 0x00000000, \
1131 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1132 0x00000000, 0x00000000, 0x00000000, 0x00130831, \
1133 0x0010090B, 0x00124813, 0x000CFF80, 0x00260703, \
1134 0x00041000, 0x00010004, 0x00380700 \
1137 /********************************************************/
1138 /* Micro code for the 8086:1229 Rev F/10 */
1139 /********************************************************/
1141 /* Parameter values for the D102 E-step */
1142 #define D102_E_CPUSAVER_TIMER_DWORD 42
1143 #define D102_E_CPUSAVER_BUNDLE_DWORD 54
1144 #define D102_E_CPUSAVER_MIN_SIZE_DWORD 46
1146 #define D102_E_RCVBUNDLE_UCODE \
1148 0x007D028F, 0x0E4204F9, 0x14ED0C85, 0x14FA14E9, 0x0EF70E36, 0x1FFF1FFF, \
1149 0x00E014B9, 0x00000000, 0x00000000, 0x00000000, \
1150 0x00E014BD, 0x00000000, 0x00000000, 0x00000000, \
1151 0x00E014D5, 0x00000000, 0x00000000, 0x00000000, \
1152 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1153 0x00E014C1, 0x00000000, 0x00000000, 0x00000000, \
1154 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1155 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1156 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1157 0x00E014C8, 0x00000000, 0x00000000, 0x00000000, \
1158 0x00200600, 0x00E014EE, 0x00000000, 0x00000000, \
1159 0x0030FF80, 0x00940E46, 0x00038200, 0x00102000, \
1160 0x00E00E43, 0x00000000, 0x00000000, 0x00000000, \
1161 0x00300006, 0x00E014FB, 0x00000000, 0x00000000, \
1162 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1163 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1164 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1165 0x00906E41, 0x00800E3C, 0x00E00E39, 0x00000000, \
1166 0x00906EFD, 0x00900EFD, 0x00E00EF8, 0x00000000, \
1167 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1168 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1169 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1170 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1171 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1172 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1173 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1174 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1175 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1176 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1177 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1178 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1179 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1180 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1183 static void e100_setup_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1185 /* *INDENT-OFF* */
1186 static struct {
1187 u32 ucode[UCODE_SIZE + 1];
1188 u8 mac;
1189 u8 timer_dword;
1190 u8 bundle_dword;
1191 u8 min_size_dword;
1192 } ucode_opts[] = {
1193 { D101M_B_RCVBUNDLE_UCODE,
1194 mac_82559_D101M,
1195 D101M_CPUSAVER_TIMER_DWORD,
1196 D101M_CPUSAVER_BUNDLE_DWORD,
1197 D101M_CPUSAVER_MIN_SIZE_DWORD },
1198 { D101S_RCVBUNDLE_UCODE,
1199 mac_82559_D101S,
1200 D101S_CPUSAVER_TIMER_DWORD,
1201 D101S_CPUSAVER_BUNDLE_DWORD,
1202 D101S_CPUSAVER_MIN_SIZE_DWORD },
1203 { D102_E_RCVBUNDLE_UCODE,
1204 mac_82551_F,
1205 D102_E_CPUSAVER_TIMER_DWORD,
1206 D102_E_CPUSAVER_BUNDLE_DWORD,
1207 D102_E_CPUSAVER_MIN_SIZE_DWORD },
1208 { D102_E_RCVBUNDLE_UCODE,
1209 mac_82551_10,
1210 D102_E_CPUSAVER_TIMER_DWORD,
1211 D102_E_CPUSAVER_BUNDLE_DWORD,
1212 D102_E_CPUSAVER_MIN_SIZE_DWORD },
1213 { {0}, 0, 0, 0, 0}
1214 }, *opts;
1215 /* *INDENT-ON* */
1217 /*************************************************************************
1218 * CPUSaver parameters
1220 * All CPUSaver parameters are 16-bit literals that are part of a
1221 * "move immediate value" instruction. By changing the value of
1222 * the literal in the instruction before the code is loaded, the
1223 * driver can change the algorithm.
1225 * INTDELAY - This loads the dead-man timer with its initial value.
1226 * When this timer expires the interrupt is asserted, and the
1227 * timer is reset each time a new packet is received. (see
1228 * BUNDLEMAX below to set the limit on number of chained packets)
1229 * The current default is 0x600 or 1536. Experiments show that
1230 * the value should probably stay within the 0x200 - 0x1000.
1232 * BUNDLEMAX -
1233 * This sets the maximum number of frames that will be bundled. In
1234 * some situations, such as the TCP windowing algorithm, it may be
1235 * better to limit the growth of the bundle size than let it go as
1236 * high as it can, because that could cause too much added latency.
1237 * The default is six, because this is the number of packets in the
1238 * default TCP window size. A value of 1 would make CPUSaver indicate
1239 * an interrupt for every frame received. If you do not want to put
1240 * a limit on the bundle size, set this value to xFFFF.
1242 * BUNDLESMALL -
1243 * This contains a bit-mask describing the minimum size frame that
1244 * will be bundled. The default masks the lower 7 bits, which means
1245 * that any frame less than 128 bytes in length will not be bundled,
1246 * but will instead immediately generate an interrupt. This does
1247 * not affect the current bundle in any way. Any frame that is 128
1248 * bytes or large will be bundled normally. This feature is meant
1249 * to provide immediate indication of ACK frames in a TCP environment.
1250 * Customers were seeing poor performance when a machine with CPUSaver
1251 * enabled was sending but not receiving. The delay introduced when
1252 * the ACKs were received was enough to reduce total throughput, because
1253 * the sender would sit idle until the ACK was finally seen.
1255 * The current default is 0xFF80, which masks out the lower 7 bits.
1256 * This means that any frame which is x7F (127) bytes or smaller
1257 * will cause an immediate interrupt. Because this value must be a
1258 * bit mask, there are only a few valid values that can be used. To
1259 * turn this feature off, the driver can write the value xFFFF to the
1260 * lower word of this instruction (in the same way that the other
1261 * parameters are used). Likewise, a value of 0xF800 (2047) would
1262 * cause an interrupt to be generated for every frame, because all
1263 * standard Ethernet frames are <= 2047 bytes in length.
1264 *************************************************************************/
1266 /* if you wish to disable the ucode functionality, while maintaining the
1267 * workarounds it provides, set the following defines to:
1268 * BUNDLESMALL 0
1269 * BUNDLEMAX 1
1270 * INTDELAY 1
1272 #define BUNDLESMALL 1
1273 #define BUNDLEMAX (u16)6
1274 #define INTDELAY (u16)1536 /* 0x600 */
1276 /* do not load u-code for ICH devices */
1277 if (nic->flags & ich)
1278 goto noloaducode;
1280 /* Search for ucode match against h/w revision */
1281 for (opts = ucode_opts; opts->mac; opts++) {
1282 int i;
1283 u32 *ucode = opts->ucode;
1284 if (nic->mac != opts->mac)
1285 continue;
1287 /* Insert user-tunable settings */
1288 ucode[opts->timer_dword] &= 0xFFFF0000;
1289 ucode[opts->timer_dword] |= INTDELAY;
1290 ucode[opts->bundle_dword] &= 0xFFFF0000;
1291 ucode[opts->bundle_dword] |= BUNDLEMAX;
1292 ucode[opts->min_size_dword] &= 0xFFFF0000;
1293 ucode[opts->min_size_dword] |= (BUNDLESMALL) ? 0xFFFF : 0xFF80;
1295 for (i = 0; i < UCODE_SIZE; i++)
1296 cb->u.ucode[i] = cpu_to_le32(ucode[i]);
1297 cb->command = cpu_to_le16(cb_ucode | cb_el);
1298 return;
1301 noloaducode:
1302 cb->command = cpu_to_le16(cb_nop | cb_el);
1305 static inline int e100_exec_cb_wait(struct nic *nic, struct sk_buff *skb,
1306 void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
1308 int err = 0, counter = 50;
1309 struct cb *cb = nic->cb_to_clean;
1311 if ((err = e100_exec_cb(nic, NULL, e100_setup_ucode)))
1312 DPRINTK(PROBE,ERR, "ucode cmd failed with error %d\n", err);
1314 /* must restart cuc */
1315 nic->cuc_cmd = cuc_start;
1317 /* wait for completion */
1318 e100_write_flush(nic);
1319 udelay(10);
1321 /* wait for possibly (ouch) 500ms */
1322 while (!(cb->status & cpu_to_le16(cb_complete))) {
1323 msleep(10);
1324 if (!--counter) break;
1327 /* ack any interupts, something could have been set */
1328 iowrite8(~0, &nic->csr->scb.stat_ack);
1330 /* if the command failed, or is not OK, notify and return */
1331 if (!counter || !(cb->status & cpu_to_le16(cb_ok))) {
1332 DPRINTK(PROBE,ERR, "ucode load failed\n");
1333 err = -EPERM;
1336 return err;
1339 static void e100_setup_iaaddr(struct nic *nic, struct cb *cb,
1340 struct sk_buff *skb)
1342 cb->command = cpu_to_le16(cb_iaaddr);
1343 memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN);
1346 static void e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1348 cb->command = cpu_to_le16(cb_dump);
1349 cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr +
1350 offsetof(struct mem, dump_buf));
1353 #define NCONFIG_AUTO_SWITCH 0x0080
1354 #define MII_NSC_CONG MII_RESV1
1355 #define NSC_CONG_ENABLE 0x0100
1356 #define NSC_CONG_TXREADY 0x0400
1357 #define ADVERTISE_FC_SUPPORTED 0x0400
1358 static int e100_phy_init(struct nic *nic)
1360 struct net_device *netdev = nic->netdev;
1361 u32 addr;
1362 u16 bmcr, stat, id_lo, id_hi, cong;
1364 /* Discover phy addr by searching addrs in order {1,0,2,..., 31} */
1365 for(addr = 0; addr < 32; addr++) {
1366 nic->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
1367 bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
1368 stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
1369 stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
1370 if(!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
1371 break;
1373 DPRINTK(HW, DEBUG, "phy_addr = %d\n", nic->mii.phy_id);
1374 if(addr == 32)
1375 return -EAGAIN;
1377 /* Selected the phy and isolate the rest */
1378 for(addr = 0; addr < 32; addr++) {
1379 if(addr != nic->mii.phy_id) {
1380 mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE);
1381 } else {
1382 bmcr = mdio_read(netdev, addr, MII_BMCR);
1383 mdio_write(netdev, addr, MII_BMCR,
1384 bmcr & ~BMCR_ISOLATE);
1388 /* Get phy ID */
1389 id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1);
1390 id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2);
1391 nic->phy = (u32)id_hi << 16 | (u32)id_lo;
1392 DPRINTK(HW, DEBUG, "phy ID = 0x%08X\n", nic->phy);
1394 /* Handle National tx phys */
1395 #define NCS_PHY_MODEL_MASK 0xFFF0FFFF
1396 if((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) {
1397 /* Disable congestion control */
1398 cong = mdio_read(netdev, nic->mii.phy_id, MII_NSC_CONG);
1399 cong |= NSC_CONG_TXREADY;
1400 cong &= ~NSC_CONG_ENABLE;
1401 mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong);
1404 if((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
1405 (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) &&
1406 !(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) {
1407 /* enable/disable MDI/MDI-X auto-switching. */
1408 mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG,
1409 nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH);
1412 return 0;
1415 static int e100_hw_init(struct nic *nic)
1417 int err;
1419 e100_hw_reset(nic);
1421 DPRINTK(HW, ERR, "e100_hw_init\n");
1422 if(!in_interrupt() && (err = e100_self_test(nic)))
1423 return err;
1425 if((err = e100_phy_init(nic)))
1426 return err;
1427 if((err = e100_exec_cmd(nic, cuc_load_base, 0)))
1428 return err;
1429 if((err = e100_exec_cmd(nic, ruc_load_base, 0)))
1430 return err;
1431 if ((err = e100_exec_cb_wait(nic, NULL, e100_setup_ucode)))
1432 return err;
1433 if((err = e100_exec_cb(nic, NULL, e100_configure)))
1434 return err;
1435 if((err = e100_exec_cb(nic, NULL, e100_setup_iaaddr)))
1436 return err;
1437 if((err = e100_exec_cmd(nic, cuc_dump_addr,
1438 nic->dma_addr + offsetof(struct mem, stats))))
1439 return err;
1440 if((err = e100_exec_cmd(nic, cuc_dump_reset, 0)))
1441 return err;
1443 e100_disable_irq(nic);
1445 return 0;
1448 static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1450 struct net_device *netdev = nic->netdev;
1451 struct dev_mc_list *list = netdev->mc_list;
1452 u16 i, count = min(netdev->mc_count, E100_MAX_MULTICAST_ADDRS);
1454 cb->command = cpu_to_le16(cb_multi);
1455 cb->u.multi.count = cpu_to_le16(count * ETH_ALEN);
1456 for(i = 0; list && i < count; i++, list = list->next)
1457 memcpy(&cb->u.multi.addr[i*ETH_ALEN], &list->dmi_addr,
1458 ETH_ALEN);
1461 static void e100_set_multicast_list(struct net_device *netdev)
1463 struct nic *nic = netdev_priv(netdev);
1465 DPRINTK(HW, DEBUG, "mc_count=%d, flags=0x%04X\n",
1466 netdev->mc_count, netdev->flags);
1468 if(netdev->flags & IFF_PROMISC)
1469 nic->flags |= promiscuous;
1470 else
1471 nic->flags &= ~promiscuous;
1473 if(netdev->flags & IFF_ALLMULTI ||
1474 netdev->mc_count > E100_MAX_MULTICAST_ADDRS)
1475 nic->flags |= multicast_all;
1476 else
1477 nic->flags &= ~multicast_all;
1479 e100_exec_cb(nic, NULL, e100_configure);
1480 e100_exec_cb(nic, NULL, e100_multi);
1483 static void e100_update_stats(struct nic *nic)
1485 struct net_device_stats *ns = &nic->net_stats;
1486 struct stats *s = &nic->mem->stats;
1487 u32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause :
1488 (nic->mac < mac_82559_D101M) ? (u32 *)&s->xmt_tco_frames :
1489 &s->complete;
1491 /* Device's stats reporting may take several microseconds to
1492 * complete, so where always waiting for results of the
1493 * previous command. */
1495 if(*complete == le32_to_cpu(cuc_dump_reset_complete)) {
1496 *complete = 0;
1497 nic->tx_frames = le32_to_cpu(s->tx_good_frames);
1498 nic->tx_collisions = le32_to_cpu(s->tx_total_collisions);
1499 ns->tx_aborted_errors += le32_to_cpu(s->tx_max_collisions);
1500 ns->tx_window_errors += le32_to_cpu(s->tx_late_collisions);
1501 ns->tx_carrier_errors += le32_to_cpu(s->tx_lost_crs);
1502 ns->tx_fifo_errors += le32_to_cpu(s->tx_underruns);
1503 ns->collisions += nic->tx_collisions;
1504 ns->tx_errors += le32_to_cpu(s->tx_max_collisions) +
1505 le32_to_cpu(s->tx_lost_crs);
1506 ns->rx_length_errors += le32_to_cpu(s->rx_short_frame_errors) +
1507 nic->rx_over_length_errors;
1508 ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors);
1509 ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors);
1510 ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors);
1511 ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors);
1512 ns->rx_missed_errors += le32_to_cpu(s->rx_resource_errors);
1513 ns->rx_errors += le32_to_cpu(s->rx_crc_errors) +
1514 le32_to_cpu(s->rx_alignment_errors) +
1515 le32_to_cpu(s->rx_short_frame_errors) +
1516 le32_to_cpu(s->rx_cdt_errors);
1517 nic->tx_deferred += le32_to_cpu(s->tx_deferred);
1518 nic->tx_single_collisions +=
1519 le32_to_cpu(s->tx_single_collisions);
1520 nic->tx_multiple_collisions +=
1521 le32_to_cpu(s->tx_multiple_collisions);
1522 if(nic->mac >= mac_82558_D101_A4) {
1523 nic->tx_fc_pause += le32_to_cpu(s->fc_xmt_pause);
1524 nic->rx_fc_pause += le32_to_cpu(s->fc_rcv_pause);
1525 nic->rx_fc_unsupported +=
1526 le32_to_cpu(s->fc_rcv_unsupported);
1527 if(nic->mac >= mac_82559_D101M) {
1528 nic->tx_tco_frames +=
1529 le16_to_cpu(s->xmt_tco_frames);
1530 nic->rx_tco_frames +=
1531 le16_to_cpu(s->rcv_tco_frames);
1537 if(e100_exec_cmd(nic, cuc_dump_reset, 0))
1538 DPRINTK(TX_ERR, DEBUG, "exec cuc_dump_reset failed\n");
1541 static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex)
1543 /* Adjust inter-frame-spacing (IFS) between two transmits if
1544 * we're getting collisions on a half-duplex connection. */
1546 if(duplex == DUPLEX_HALF) {
1547 u32 prev = nic->adaptive_ifs;
1548 u32 min_frames = (speed == SPEED_100) ? 1000 : 100;
1550 if((nic->tx_frames / 32 < nic->tx_collisions) &&
1551 (nic->tx_frames > min_frames)) {
1552 if(nic->adaptive_ifs < 60)
1553 nic->adaptive_ifs += 5;
1554 } else if (nic->tx_frames < min_frames) {
1555 if(nic->adaptive_ifs >= 5)
1556 nic->adaptive_ifs -= 5;
1558 if(nic->adaptive_ifs != prev)
1559 e100_exec_cb(nic, NULL, e100_configure);
1563 static void e100_watchdog(unsigned long data)
1565 struct nic *nic = (struct nic *)data;
1566 struct ethtool_cmd cmd;
1568 DPRINTK(TIMER, DEBUG, "right now = %ld\n", jiffies);
1570 /* mii library handles link maintenance tasks */
1572 mii_ethtool_gset(&nic->mii, &cmd);
1574 if(mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) {
1575 DPRINTK(LINK, INFO, "link up, %sMbps, %s-duplex\n",
1576 cmd.speed == SPEED_100 ? "100" : "10",
1577 cmd.duplex == DUPLEX_FULL ? "full" : "half");
1578 } else if(!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) {
1579 DPRINTK(LINK, INFO, "link down\n");
1582 mii_check_link(&nic->mii);
1584 /* Software generated interrupt to recover from (rare) Rx
1585 * allocation failure.
1586 * Unfortunately have to use a spinlock to not re-enable interrupts
1587 * accidentally, due to hardware that shares a register between the
1588 * interrupt mask bit and the SW Interrupt generation bit */
1589 spin_lock_irq(&nic->cmd_lock);
1590 iowrite8(ioread8(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi);
1591 e100_write_flush(nic);
1592 spin_unlock_irq(&nic->cmd_lock);
1594 e100_update_stats(nic);
1595 e100_adjust_adaptive_ifs(nic, cmd.speed, cmd.duplex);
1597 if(nic->mac <= mac_82557_D100_C)
1598 /* Issue a multicast command to workaround a 557 lock up */
1599 e100_set_multicast_list(nic->netdev);
1601 if(nic->flags & ich && cmd.speed==SPEED_10 && cmd.duplex==DUPLEX_HALF)
1602 /* Need SW workaround for ICH[x] 10Mbps/half duplex Tx hang. */
1603 nic->flags |= ich_10h_workaround;
1604 else
1605 nic->flags &= ~ich_10h_workaround;
1607 mod_timer(&nic->watchdog, jiffies + E100_WATCHDOG_PERIOD);
1610 static void e100_xmit_prepare(struct nic *nic, struct cb *cb,
1611 struct sk_buff *skb)
1613 cb->command = nic->tx_command;
1614 /* interrupt every 16 packets regardless of delay */
1615 if((nic->cbs_avail & ~15) == nic->cbs_avail)
1616 cb->command |= cpu_to_le16(cb_i);
1617 cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd);
1618 cb->u.tcb.tcb_byte_count = 0;
1619 cb->u.tcb.threshold = nic->tx_threshold;
1620 cb->u.tcb.tbd_count = 1;
1621 cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev,
1622 skb->data, skb->len, PCI_DMA_TODEVICE));
1623 /* check for mapping failure? */
1624 cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
1627 static int e100_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1629 struct nic *nic = netdev_priv(netdev);
1630 int err;
1632 if(nic->flags & ich_10h_workaround) {
1633 /* SW workaround for ICH[x] 10Mbps/half duplex Tx hang.
1634 Issue a NOP command followed by a 1us delay before
1635 issuing the Tx command. */
1636 if(e100_exec_cmd(nic, cuc_nop, 0))
1637 DPRINTK(TX_ERR, DEBUG, "exec cuc_nop failed\n");
1638 udelay(1);
1641 err = e100_exec_cb(nic, skb, e100_xmit_prepare);
1643 switch(err) {
1644 case -ENOSPC:
1645 /* We queued the skb, but now we're out of space. */
1646 DPRINTK(TX_ERR, DEBUG, "No space for CB\n");
1647 netif_stop_queue(netdev);
1648 break;
1649 case -ENOMEM:
1650 /* This is a hard error - log it. */
1651 DPRINTK(TX_ERR, DEBUG, "Out of Tx resources, returning skb\n");
1652 netif_stop_queue(netdev);
1653 return 1;
1656 netdev->trans_start = jiffies;
1657 return 0;
1660 static int e100_tx_clean(struct nic *nic)
1662 struct cb *cb;
1663 int tx_cleaned = 0;
1665 spin_lock(&nic->cb_lock);
1667 /* Clean CBs marked complete */
1668 for(cb = nic->cb_to_clean;
1669 cb->status & cpu_to_le16(cb_complete);
1670 cb = nic->cb_to_clean = cb->next) {
1671 DPRINTK(TX_DONE, DEBUG, "cb[%d]->status = 0x%04X\n",
1672 (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)),
1673 cb->status);
1675 if(likely(cb->skb != NULL)) {
1676 nic->net_stats.tx_packets++;
1677 nic->net_stats.tx_bytes += cb->skb->len;
1679 pci_unmap_single(nic->pdev,
1680 le32_to_cpu(cb->u.tcb.tbd.buf_addr),
1681 le16_to_cpu(cb->u.tcb.tbd.size),
1682 PCI_DMA_TODEVICE);
1683 dev_kfree_skb_any(cb->skb);
1684 cb->skb = NULL;
1685 tx_cleaned = 1;
1687 cb->status = 0;
1688 nic->cbs_avail++;
1691 spin_unlock(&nic->cb_lock);
1693 /* Recover from running out of Tx resources in xmit_frame */
1694 if(unlikely(tx_cleaned && netif_queue_stopped(nic->netdev)))
1695 netif_wake_queue(nic->netdev);
1697 return tx_cleaned;
1700 static void e100_clean_cbs(struct nic *nic)
1702 if(nic->cbs) {
1703 while(nic->cbs_avail != nic->params.cbs.count) {
1704 struct cb *cb = nic->cb_to_clean;
1705 if(cb->skb) {
1706 pci_unmap_single(nic->pdev,
1707 le32_to_cpu(cb->u.tcb.tbd.buf_addr),
1708 le16_to_cpu(cb->u.tcb.tbd.size),
1709 PCI_DMA_TODEVICE);
1710 dev_kfree_skb(cb->skb);
1712 nic->cb_to_clean = nic->cb_to_clean->next;
1713 nic->cbs_avail++;
1715 pci_free_consistent(nic->pdev,
1716 sizeof(struct cb) * nic->params.cbs.count,
1717 nic->cbs, nic->cbs_dma_addr);
1718 nic->cbs = NULL;
1719 nic->cbs_avail = 0;
1721 nic->cuc_cmd = cuc_start;
1722 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean =
1723 nic->cbs;
1726 static int e100_alloc_cbs(struct nic *nic)
1728 struct cb *cb;
1729 unsigned int i, count = nic->params.cbs.count;
1731 nic->cuc_cmd = cuc_start;
1732 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL;
1733 nic->cbs_avail = 0;
1735 nic->cbs = pci_alloc_consistent(nic->pdev,
1736 sizeof(struct cb) * count, &nic->cbs_dma_addr);
1737 if(!nic->cbs)
1738 return -ENOMEM;
1740 for(cb = nic->cbs, i = 0; i < count; cb++, i++) {
1741 cb->next = (i + 1 < count) ? cb + 1 : nic->cbs;
1742 cb->prev = (i == 0) ? nic->cbs + count - 1 : cb - 1;
1744 cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb);
1745 cb->link = cpu_to_le32(nic->cbs_dma_addr +
1746 ((i+1) % count) * sizeof(struct cb));
1747 cb->skb = NULL;
1750 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs;
1751 nic->cbs_avail = count;
1753 return 0;
1756 static inline void e100_start_receiver(struct nic *nic, struct rx *rx)
1758 if(!nic->rxs) return;
1759 if(RU_SUSPENDED != nic->ru_running) return;
1761 /* handle init time starts */
1762 if(!rx) rx = nic->rxs;
1764 /* (Re)start RU if suspended or idle and RFA is non-NULL */
1765 if(rx->skb) {
1766 e100_exec_cmd(nic, ruc_start, rx->dma_addr);
1767 nic->ru_running = RU_RUNNING;
1771 #define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN)
1772 static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
1774 if(!(rx->skb = netdev_alloc_skb(nic->netdev, RFD_BUF_LEN + NET_IP_ALIGN)))
1775 return -ENOMEM;
1777 /* Align, init, and map the RFD. */
1778 skb_reserve(rx->skb, NET_IP_ALIGN);
1779 skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd));
1780 rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
1781 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
1783 if(pci_dma_mapping_error(rx->dma_addr)) {
1784 dev_kfree_skb_any(rx->skb);
1785 rx->skb = NULL;
1786 rx->dma_addr = 0;
1787 return -ENOMEM;
1790 /* Link the RFD to end of RFA by linking previous RFD to
1791 * this one, and clearing EL bit of previous. */
1792 if(rx->prev->skb) {
1793 struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data;
1794 put_unaligned(cpu_to_le32(rx->dma_addr),
1795 (u32 *)&prev_rfd->link);
1796 wmb();
1797 prev_rfd->command &= ~cpu_to_le16(cb_el);
1798 pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr,
1799 sizeof(struct rfd), PCI_DMA_TODEVICE);
1802 return 0;
1805 static int e100_rx_indicate(struct nic *nic, struct rx *rx,
1806 unsigned int *work_done, unsigned int work_to_do)
1808 struct sk_buff *skb = rx->skb;
1809 struct rfd *rfd = (struct rfd *)skb->data;
1810 u16 rfd_status, actual_size;
1812 if(unlikely(work_done && *work_done >= work_to_do))
1813 return -EAGAIN;
1815 /* Need to sync before taking a peek at cb_complete bit */
1816 pci_dma_sync_single_for_cpu(nic->pdev, rx->dma_addr,
1817 sizeof(struct rfd), PCI_DMA_FROMDEVICE);
1818 rfd_status = le16_to_cpu(rfd->status);
1820 DPRINTK(RX_STATUS, DEBUG, "status=0x%04X\n", rfd_status);
1822 /* If data isn't ready, nothing to indicate */
1823 if(unlikely(!(rfd_status & cb_complete)))
1824 return -ENODATA;
1826 /* Get actual data size */
1827 actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF;
1828 if(unlikely(actual_size > RFD_BUF_LEN - sizeof(struct rfd)))
1829 actual_size = RFD_BUF_LEN - sizeof(struct rfd);
1831 /* Get data */
1832 pci_unmap_single(nic->pdev, rx->dma_addr,
1833 RFD_BUF_LEN, PCI_DMA_FROMDEVICE);
1835 /* this allows for a fast restart without re-enabling interrupts */
1836 if(le16_to_cpu(rfd->command) & cb_el)
1837 nic->ru_running = RU_SUSPENDED;
1839 /* Pull off the RFD and put the actual data (minus eth hdr) */
1840 skb_reserve(skb, sizeof(struct rfd));
1841 skb_put(skb, actual_size);
1842 skb->protocol = eth_type_trans(skb, nic->netdev);
1844 if(unlikely(!(rfd_status & cb_ok))) {
1845 /* Don't indicate if hardware indicates errors */
1846 dev_kfree_skb_any(skb);
1847 } else if(actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN) {
1848 /* Don't indicate oversized frames */
1849 nic->rx_over_length_errors++;
1850 dev_kfree_skb_any(skb);
1851 } else {
1852 nic->net_stats.rx_packets++;
1853 nic->net_stats.rx_bytes += actual_size;
1854 nic->netdev->last_rx = jiffies;
1855 netif_receive_skb(skb);
1856 if(work_done)
1857 (*work_done)++;
1860 rx->skb = NULL;
1862 return 0;
1865 static void e100_rx_clean(struct nic *nic, unsigned int *work_done,
1866 unsigned int work_to_do)
1868 struct rx *rx;
1869 int restart_required = 0;
1870 struct rx *rx_to_start = NULL;
1872 /* are we already rnr? then pay attention!!! this ensures that
1873 * the state machine progression never allows a start with a
1874 * partially cleaned list, avoiding a race between hardware
1875 * and rx_to_clean when in NAPI mode */
1876 if(RU_SUSPENDED == nic->ru_running)
1877 restart_required = 1;
1879 /* Indicate newly arrived packets */
1880 for(rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) {
1881 int err = e100_rx_indicate(nic, rx, work_done, work_to_do);
1882 if(-EAGAIN == err) {
1883 /* hit quota so have more work to do, restart once
1884 * cleanup is complete */
1885 restart_required = 0;
1886 break;
1887 } else if(-ENODATA == err)
1888 break; /* No more to clean */
1891 /* save our starting point as the place we'll restart the receiver */
1892 if(restart_required)
1893 rx_to_start = nic->rx_to_clean;
1895 /* Alloc new skbs to refill list */
1896 for(rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) {
1897 if(unlikely(e100_rx_alloc_skb(nic, rx)))
1898 break; /* Better luck next time (see watchdog) */
1901 if(restart_required) {
1902 // ack the rnr?
1903 writeb(stat_ack_rnr, &nic->csr->scb.stat_ack);
1904 e100_start_receiver(nic, rx_to_start);
1905 if(work_done)
1906 (*work_done)++;
1910 static void e100_rx_clean_list(struct nic *nic)
1912 struct rx *rx;
1913 unsigned int i, count = nic->params.rfds.count;
1915 nic->ru_running = RU_UNINITIALIZED;
1917 if(nic->rxs) {
1918 for(rx = nic->rxs, i = 0; i < count; rx++, i++) {
1919 if(rx->skb) {
1920 pci_unmap_single(nic->pdev, rx->dma_addr,
1921 RFD_BUF_LEN, PCI_DMA_FROMDEVICE);
1922 dev_kfree_skb(rx->skb);
1925 kfree(nic->rxs);
1926 nic->rxs = NULL;
1929 nic->rx_to_use = nic->rx_to_clean = NULL;
1932 static int e100_rx_alloc_list(struct nic *nic)
1934 struct rx *rx;
1935 unsigned int i, count = nic->params.rfds.count;
1937 nic->rx_to_use = nic->rx_to_clean = NULL;
1938 nic->ru_running = RU_UNINITIALIZED;
1940 if(!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_ATOMIC)))
1941 return -ENOMEM;
1943 for(rx = nic->rxs, i = 0; i < count; rx++, i++) {
1944 rx->next = (i + 1 < count) ? rx + 1 : nic->rxs;
1945 rx->prev = (i == 0) ? nic->rxs + count - 1 : rx - 1;
1946 if(e100_rx_alloc_skb(nic, rx)) {
1947 e100_rx_clean_list(nic);
1948 return -ENOMEM;
1952 nic->rx_to_use = nic->rx_to_clean = nic->rxs;
1953 nic->ru_running = RU_SUSPENDED;
1955 return 0;
1958 static irqreturn_t e100_intr(int irq, void *dev_id)
1960 struct net_device *netdev = dev_id;
1961 struct nic *nic = netdev_priv(netdev);
1962 u8 stat_ack = ioread8(&nic->csr->scb.stat_ack);
1964 DPRINTK(INTR, DEBUG, "stat_ack = 0x%02X\n", stat_ack);
1966 if(stat_ack == stat_ack_not_ours || /* Not our interrupt */
1967 stat_ack == stat_ack_not_present) /* Hardware is ejected */
1968 return IRQ_NONE;
1970 /* Ack interrupt(s) */
1971 iowrite8(stat_ack, &nic->csr->scb.stat_ack);
1973 /* We hit Receive No Resource (RNR); restart RU after cleaning */
1974 if(stat_ack & stat_ack_rnr)
1975 nic->ru_running = RU_SUSPENDED;
1977 if(likely(netif_rx_schedule_prep(netdev))) {
1978 e100_disable_irq(nic);
1979 __netif_rx_schedule(netdev);
1982 return IRQ_HANDLED;
1985 static int e100_poll(struct net_device *netdev, int *budget)
1987 struct nic *nic = netdev_priv(netdev);
1988 unsigned int work_to_do = min(netdev->quota, *budget);
1989 unsigned int work_done = 0;
1990 int tx_cleaned;
1992 e100_rx_clean(nic, &work_done, work_to_do);
1993 tx_cleaned = e100_tx_clean(nic);
1995 /* If no Rx and Tx cleanup work was done, exit polling mode. */
1996 if((!tx_cleaned && (work_done == 0)) || !netif_running(netdev)) {
1997 netif_rx_complete(netdev);
1998 e100_enable_irq(nic);
1999 return 0;
2002 *budget -= work_done;
2003 netdev->quota -= work_done;
2005 return 1;
2008 #ifdef CONFIG_NET_POLL_CONTROLLER
2009 static void e100_netpoll(struct net_device *netdev)
2011 struct nic *nic = netdev_priv(netdev);
2013 e100_disable_irq(nic);
2014 e100_intr(nic->pdev->irq, netdev);
2015 e100_tx_clean(nic);
2016 e100_enable_irq(nic);
2018 #endif
2020 static struct net_device_stats *e100_get_stats(struct net_device *netdev)
2022 struct nic *nic = netdev_priv(netdev);
2023 return &nic->net_stats;
2026 static int e100_set_mac_address(struct net_device *netdev, void *p)
2028 struct nic *nic = netdev_priv(netdev);
2029 struct sockaddr *addr = p;
2031 if (!is_valid_ether_addr(addr->sa_data))
2032 return -EADDRNOTAVAIL;
2034 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2035 e100_exec_cb(nic, NULL, e100_setup_iaaddr);
2037 return 0;
2040 static int e100_change_mtu(struct net_device *netdev, int new_mtu)
2042 if(new_mtu < ETH_ZLEN || new_mtu > ETH_DATA_LEN)
2043 return -EINVAL;
2044 netdev->mtu = new_mtu;
2045 return 0;
2048 static int e100_asf(struct nic *nic)
2050 /* ASF can be enabled from eeprom */
2051 return((nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) &&
2052 (nic->eeprom[eeprom_config_asf] & eeprom_asf) &&
2053 !(nic->eeprom[eeprom_config_asf] & eeprom_gcl) &&
2054 ((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE));
2057 static int e100_up(struct nic *nic)
2059 int err;
2061 if((err = e100_rx_alloc_list(nic)))
2062 return err;
2063 if((err = e100_alloc_cbs(nic)))
2064 goto err_rx_clean_list;
2065 if((err = e100_hw_init(nic)))
2066 goto err_clean_cbs;
2067 e100_set_multicast_list(nic->netdev);
2068 e100_start_receiver(nic, NULL);
2069 mod_timer(&nic->watchdog, jiffies);
2070 if((err = request_irq(nic->pdev->irq, e100_intr, IRQF_SHARED,
2071 nic->netdev->name, nic->netdev)))
2072 goto err_no_irq;
2073 netif_wake_queue(nic->netdev);
2074 netif_poll_enable(nic->netdev);
2075 /* enable ints _after_ enabling poll, preventing a race between
2076 * disable ints+schedule */
2077 e100_enable_irq(nic);
2078 return 0;
2080 err_no_irq:
2081 del_timer_sync(&nic->watchdog);
2082 err_clean_cbs:
2083 e100_clean_cbs(nic);
2084 err_rx_clean_list:
2085 e100_rx_clean_list(nic);
2086 return err;
2089 static void e100_down(struct nic *nic)
2091 /* wait here for poll to complete */
2092 netif_poll_disable(nic->netdev);
2093 netif_stop_queue(nic->netdev);
2094 e100_hw_reset(nic);
2095 free_irq(nic->pdev->irq, nic->netdev);
2096 del_timer_sync(&nic->watchdog);
2097 netif_carrier_off(nic->netdev);
2098 e100_clean_cbs(nic);
2099 e100_rx_clean_list(nic);
2102 static void e100_tx_timeout(struct net_device *netdev)
2104 struct nic *nic = netdev_priv(netdev);
2106 /* Reset outside of interrupt context, to avoid request_irq
2107 * in interrupt context */
2108 schedule_work(&nic->tx_timeout_task);
2111 static void e100_tx_timeout_task(struct work_struct *work)
2113 struct nic *nic = container_of(work, struct nic, tx_timeout_task);
2114 struct net_device *netdev = nic->netdev;
2116 DPRINTK(TX_ERR, DEBUG, "scb.status=0x%02X\n",
2117 ioread8(&nic->csr->scb.status));
2118 e100_down(netdev_priv(netdev));
2119 e100_up(netdev_priv(netdev));
2122 static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
2124 int err;
2125 struct sk_buff *skb;
2127 /* Use driver resources to perform internal MAC or PHY
2128 * loopback test. A single packet is prepared and transmitted
2129 * in loopback mode, and the test passes if the received
2130 * packet compares byte-for-byte to the transmitted packet. */
2132 if((err = e100_rx_alloc_list(nic)))
2133 return err;
2134 if((err = e100_alloc_cbs(nic)))
2135 goto err_clean_rx;
2137 /* ICH PHY loopback is broken so do MAC loopback instead */
2138 if(nic->flags & ich && loopback_mode == lb_phy)
2139 loopback_mode = lb_mac;
2141 nic->loopback = loopback_mode;
2142 if((err = e100_hw_init(nic)))
2143 goto err_loopback_none;
2145 if(loopback_mode == lb_phy)
2146 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR,
2147 BMCR_LOOPBACK);
2149 e100_start_receiver(nic, NULL);
2151 if(!(skb = netdev_alloc_skb(nic->netdev, ETH_DATA_LEN))) {
2152 err = -ENOMEM;
2153 goto err_loopback_none;
2155 skb_put(skb, ETH_DATA_LEN);
2156 memset(skb->data, 0xFF, ETH_DATA_LEN);
2157 e100_xmit_frame(skb, nic->netdev);
2159 msleep(10);
2161 pci_dma_sync_single_for_cpu(nic->pdev, nic->rx_to_clean->dma_addr,
2162 RFD_BUF_LEN, PCI_DMA_FROMDEVICE);
2164 if(memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd),
2165 skb->data, ETH_DATA_LEN))
2166 err = -EAGAIN;
2168 err_loopback_none:
2169 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 0);
2170 nic->loopback = lb_none;
2171 e100_clean_cbs(nic);
2172 e100_hw_reset(nic);
2173 err_clean_rx:
2174 e100_rx_clean_list(nic);
2175 return err;
2178 #define MII_LED_CONTROL 0x1B
2179 static void e100_blink_led(unsigned long data)
2181 struct nic *nic = (struct nic *)data;
2182 enum led_state {
2183 led_on = 0x01,
2184 led_off = 0x04,
2185 led_on_559 = 0x05,
2186 led_on_557 = 0x07,
2189 nic->leds = (nic->leds & led_on) ? led_off :
2190 (nic->mac < mac_82559_D101M) ? led_on_557 : led_on_559;
2191 mdio_write(nic->netdev, nic->mii.phy_id, MII_LED_CONTROL, nic->leds);
2192 mod_timer(&nic->blink_timer, jiffies + HZ / 4);
2195 static int e100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
2197 struct nic *nic = netdev_priv(netdev);
2198 return mii_ethtool_gset(&nic->mii, cmd);
2201 static int e100_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
2203 struct nic *nic = netdev_priv(netdev);
2204 int err;
2206 mdio_write(netdev, nic->mii.phy_id, MII_BMCR, BMCR_RESET);
2207 err = mii_ethtool_sset(&nic->mii, cmd);
2208 e100_exec_cb(nic, NULL, e100_configure);
2210 return err;
2213 static void e100_get_drvinfo(struct net_device *netdev,
2214 struct ethtool_drvinfo *info)
2216 struct nic *nic = netdev_priv(netdev);
2217 strcpy(info->driver, DRV_NAME);
2218 strcpy(info->version, DRV_VERSION);
2219 strcpy(info->fw_version, "N/A");
2220 strcpy(info->bus_info, pci_name(nic->pdev));
2223 static int e100_get_regs_len(struct net_device *netdev)
2225 struct nic *nic = netdev_priv(netdev);
2226 #define E100_PHY_REGS 0x1C
2227 #define E100_REGS_LEN 1 + E100_PHY_REGS + \
2228 sizeof(nic->mem->dump_buf) / sizeof(u32)
2229 return E100_REGS_LEN * sizeof(u32);
2232 static void e100_get_regs(struct net_device *netdev,
2233 struct ethtool_regs *regs, void *p)
2235 struct nic *nic = netdev_priv(netdev);
2236 u32 *buff = p;
2237 int i;
2239 regs->version = (1 << 24) | nic->pdev->revision;
2240 buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 |
2241 ioread8(&nic->csr->scb.cmd_lo) << 16 |
2242 ioread16(&nic->csr->scb.status);
2243 for(i = E100_PHY_REGS; i >= 0; i--)
2244 buff[1 + E100_PHY_REGS - i] =
2245 mdio_read(netdev, nic->mii.phy_id, i);
2246 memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf));
2247 e100_exec_cb(nic, NULL, e100_dump);
2248 msleep(10);
2249 memcpy(&buff[2 + E100_PHY_REGS], nic->mem->dump_buf,
2250 sizeof(nic->mem->dump_buf));
2253 static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2255 struct nic *nic = netdev_priv(netdev);
2256 wol->supported = (nic->mac >= mac_82558_D101_A4) ? WAKE_MAGIC : 0;
2257 wol->wolopts = (nic->flags & wol_magic) ? WAKE_MAGIC : 0;
2260 static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2262 struct nic *nic = netdev_priv(netdev);
2264 if(wol->wolopts != WAKE_MAGIC && wol->wolopts != 0)
2265 return -EOPNOTSUPP;
2267 if(wol->wolopts)
2268 nic->flags |= wol_magic;
2269 else
2270 nic->flags &= ~wol_magic;
2272 e100_exec_cb(nic, NULL, e100_configure);
2274 return 0;
2277 static u32 e100_get_msglevel(struct net_device *netdev)
2279 struct nic *nic = netdev_priv(netdev);
2280 return nic->msg_enable;
2283 static void e100_set_msglevel(struct net_device *netdev, u32 value)
2285 struct nic *nic = netdev_priv(netdev);
2286 nic->msg_enable = value;
2289 static int e100_nway_reset(struct net_device *netdev)
2291 struct nic *nic = netdev_priv(netdev);
2292 return mii_nway_restart(&nic->mii);
2295 static u32 e100_get_link(struct net_device *netdev)
2297 struct nic *nic = netdev_priv(netdev);
2298 return mii_link_ok(&nic->mii);
2301 static int e100_get_eeprom_len(struct net_device *netdev)
2303 struct nic *nic = netdev_priv(netdev);
2304 return nic->eeprom_wc << 1;
2307 #define E100_EEPROM_MAGIC 0x1234
2308 static int e100_get_eeprom(struct net_device *netdev,
2309 struct ethtool_eeprom *eeprom, u8 *bytes)
2311 struct nic *nic = netdev_priv(netdev);
2313 eeprom->magic = E100_EEPROM_MAGIC;
2314 memcpy(bytes, &((u8 *)nic->eeprom)[eeprom->offset], eeprom->len);
2316 return 0;
2319 static int e100_set_eeprom(struct net_device *netdev,
2320 struct ethtool_eeprom *eeprom, u8 *bytes)
2322 struct nic *nic = netdev_priv(netdev);
2324 if(eeprom->magic != E100_EEPROM_MAGIC)
2325 return -EINVAL;
2327 memcpy(&((u8 *)nic->eeprom)[eeprom->offset], bytes, eeprom->len);
2329 return e100_eeprom_save(nic, eeprom->offset >> 1,
2330 (eeprom->len >> 1) + 1);
2333 static void e100_get_ringparam(struct net_device *netdev,
2334 struct ethtool_ringparam *ring)
2336 struct nic *nic = netdev_priv(netdev);
2337 struct param_range *rfds = &nic->params.rfds;
2338 struct param_range *cbs = &nic->params.cbs;
2340 ring->rx_max_pending = rfds->max;
2341 ring->tx_max_pending = cbs->max;
2342 ring->rx_mini_max_pending = 0;
2343 ring->rx_jumbo_max_pending = 0;
2344 ring->rx_pending = rfds->count;
2345 ring->tx_pending = cbs->count;
2346 ring->rx_mini_pending = 0;
2347 ring->rx_jumbo_pending = 0;
2350 static int e100_set_ringparam(struct net_device *netdev,
2351 struct ethtool_ringparam *ring)
2353 struct nic *nic = netdev_priv(netdev);
2354 struct param_range *rfds = &nic->params.rfds;
2355 struct param_range *cbs = &nic->params.cbs;
2357 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
2358 return -EINVAL;
2360 if(netif_running(netdev))
2361 e100_down(nic);
2362 rfds->count = max(ring->rx_pending, rfds->min);
2363 rfds->count = min(rfds->count, rfds->max);
2364 cbs->count = max(ring->tx_pending, cbs->min);
2365 cbs->count = min(cbs->count, cbs->max);
2366 DPRINTK(DRV, INFO, "Ring Param settings: rx: %d, tx %d\n",
2367 rfds->count, cbs->count);
2368 if(netif_running(netdev))
2369 e100_up(nic);
2371 return 0;
2374 static const char e100_gstrings_test[][ETH_GSTRING_LEN] = {
2375 "Link test (on/offline)",
2376 "Eeprom test (on/offline)",
2377 "Self test (offline)",
2378 "Mac loopback (offline)",
2379 "Phy loopback (offline)",
2381 #define E100_TEST_LEN sizeof(e100_gstrings_test) / ETH_GSTRING_LEN
2383 static int e100_diag_test_count(struct net_device *netdev)
2385 return E100_TEST_LEN;
2388 static void e100_diag_test(struct net_device *netdev,
2389 struct ethtool_test *test, u64 *data)
2391 struct ethtool_cmd cmd;
2392 struct nic *nic = netdev_priv(netdev);
2393 int i, err;
2395 memset(data, 0, E100_TEST_LEN * sizeof(u64));
2396 data[0] = !mii_link_ok(&nic->mii);
2397 data[1] = e100_eeprom_load(nic);
2398 if(test->flags & ETH_TEST_FL_OFFLINE) {
2400 /* save speed, duplex & autoneg settings */
2401 err = mii_ethtool_gset(&nic->mii, &cmd);
2403 if(netif_running(netdev))
2404 e100_down(nic);
2405 data[2] = e100_self_test(nic);
2406 data[3] = e100_loopback_test(nic, lb_mac);
2407 data[4] = e100_loopback_test(nic, lb_phy);
2409 /* restore speed, duplex & autoneg settings */
2410 err = mii_ethtool_sset(&nic->mii, &cmd);
2412 if(netif_running(netdev))
2413 e100_up(nic);
2415 for(i = 0; i < E100_TEST_LEN; i++)
2416 test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0;
2418 msleep_interruptible(4 * 1000);
2421 static int e100_phys_id(struct net_device *netdev, u32 data)
2423 struct nic *nic = netdev_priv(netdev);
2425 if(!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ))
2426 data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ);
2427 mod_timer(&nic->blink_timer, jiffies);
2428 msleep_interruptible(data * 1000);
2429 del_timer_sync(&nic->blink_timer);
2430 mdio_write(netdev, nic->mii.phy_id, MII_LED_CONTROL, 0);
2432 return 0;
2435 static const char e100_gstrings_stats[][ETH_GSTRING_LEN] = {
2436 "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
2437 "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
2438 "rx_length_errors", "rx_over_errors", "rx_crc_errors",
2439 "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
2440 "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
2441 "tx_heartbeat_errors", "tx_window_errors",
2442 /* device-specific stats */
2443 "tx_deferred", "tx_single_collisions", "tx_multi_collisions",
2444 "tx_flow_control_pause", "rx_flow_control_pause",
2445 "rx_flow_control_unsupported", "tx_tco_packets", "rx_tco_packets",
2447 #define E100_NET_STATS_LEN 21
2448 #define E100_STATS_LEN sizeof(e100_gstrings_stats) / ETH_GSTRING_LEN
2450 static int e100_get_stats_count(struct net_device *netdev)
2452 return E100_STATS_LEN;
2455 static void e100_get_ethtool_stats(struct net_device *netdev,
2456 struct ethtool_stats *stats, u64 *data)
2458 struct nic *nic = netdev_priv(netdev);
2459 int i;
2461 for(i = 0; i < E100_NET_STATS_LEN; i++)
2462 data[i] = ((unsigned long *)&nic->net_stats)[i];
2464 data[i++] = nic->tx_deferred;
2465 data[i++] = nic->tx_single_collisions;
2466 data[i++] = nic->tx_multiple_collisions;
2467 data[i++] = nic->tx_fc_pause;
2468 data[i++] = nic->rx_fc_pause;
2469 data[i++] = nic->rx_fc_unsupported;
2470 data[i++] = nic->tx_tco_frames;
2471 data[i++] = nic->rx_tco_frames;
2474 static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
2476 switch(stringset) {
2477 case ETH_SS_TEST:
2478 memcpy(data, *e100_gstrings_test, sizeof(e100_gstrings_test));
2479 break;
2480 case ETH_SS_STATS:
2481 memcpy(data, *e100_gstrings_stats, sizeof(e100_gstrings_stats));
2482 break;
2486 static const struct ethtool_ops e100_ethtool_ops = {
2487 .get_settings = e100_get_settings,
2488 .set_settings = e100_set_settings,
2489 .get_drvinfo = e100_get_drvinfo,
2490 .get_regs_len = e100_get_regs_len,
2491 .get_regs = e100_get_regs,
2492 .get_wol = e100_get_wol,
2493 .set_wol = e100_set_wol,
2494 .get_msglevel = e100_get_msglevel,
2495 .set_msglevel = e100_set_msglevel,
2496 .nway_reset = e100_nway_reset,
2497 .get_link = e100_get_link,
2498 .get_eeprom_len = e100_get_eeprom_len,
2499 .get_eeprom = e100_get_eeprom,
2500 .set_eeprom = e100_set_eeprom,
2501 .get_ringparam = e100_get_ringparam,
2502 .set_ringparam = e100_set_ringparam,
2503 .self_test_count = e100_diag_test_count,
2504 .self_test = e100_diag_test,
2505 .get_strings = e100_get_strings,
2506 .phys_id = e100_phys_id,
2507 .get_stats_count = e100_get_stats_count,
2508 .get_ethtool_stats = e100_get_ethtool_stats,
2511 static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2513 struct nic *nic = netdev_priv(netdev);
2515 return generic_mii_ioctl(&nic->mii, if_mii(ifr), cmd, NULL);
2518 static int e100_alloc(struct nic *nic)
2520 nic->mem = pci_alloc_consistent(nic->pdev, sizeof(struct mem),
2521 &nic->dma_addr);
2522 return nic->mem ? 0 : -ENOMEM;
2525 static void e100_free(struct nic *nic)
2527 if(nic->mem) {
2528 pci_free_consistent(nic->pdev, sizeof(struct mem),
2529 nic->mem, nic->dma_addr);
2530 nic->mem = NULL;
2534 static int e100_open(struct net_device *netdev)
2536 struct nic *nic = netdev_priv(netdev);
2537 int err = 0;
2539 netif_carrier_off(netdev);
2540 if((err = e100_up(nic)))
2541 DPRINTK(IFUP, ERR, "Cannot open interface, aborting.\n");
2542 return err;
2545 static int e100_close(struct net_device *netdev)
2547 e100_down(netdev_priv(netdev));
2548 return 0;
2551 static int __devinit e100_probe(struct pci_dev *pdev,
2552 const struct pci_device_id *ent)
2554 struct net_device *netdev;
2555 struct nic *nic;
2556 int err;
2558 if(!(netdev = alloc_etherdev(sizeof(struct nic)))) {
2559 if(((1 << debug) - 1) & NETIF_MSG_PROBE)
2560 printk(KERN_ERR PFX "Etherdev alloc failed, abort.\n");
2561 return -ENOMEM;
2564 netdev->open = e100_open;
2565 netdev->stop = e100_close;
2566 netdev->hard_start_xmit = e100_xmit_frame;
2567 netdev->get_stats = e100_get_stats;
2568 netdev->set_multicast_list = e100_set_multicast_list;
2569 netdev->set_mac_address = e100_set_mac_address;
2570 netdev->change_mtu = e100_change_mtu;
2571 netdev->do_ioctl = e100_do_ioctl;
2572 SET_ETHTOOL_OPS(netdev, &e100_ethtool_ops);
2573 netdev->tx_timeout = e100_tx_timeout;
2574 netdev->watchdog_timeo = E100_WATCHDOG_PERIOD;
2575 netdev->poll = e100_poll;
2576 netdev->weight = E100_NAPI_WEIGHT;
2577 #ifdef CONFIG_NET_POLL_CONTROLLER
2578 netdev->poll_controller = e100_netpoll;
2579 #endif
2580 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
2582 nic = netdev_priv(netdev);
2583 nic->netdev = netdev;
2584 nic->pdev = pdev;
2585 nic->msg_enable = (1 << debug) - 1;
2586 pci_set_drvdata(pdev, netdev);
2588 if((err = pci_enable_device(pdev))) {
2589 DPRINTK(PROBE, ERR, "Cannot enable PCI device, aborting.\n");
2590 goto err_out_free_dev;
2593 if(!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2594 DPRINTK(PROBE, ERR, "Cannot find proper PCI device "
2595 "base address, aborting.\n");
2596 err = -ENODEV;
2597 goto err_out_disable_pdev;
2600 if((err = pci_request_regions(pdev, DRV_NAME))) {
2601 DPRINTK(PROBE, ERR, "Cannot obtain PCI resources, aborting.\n");
2602 goto err_out_disable_pdev;
2605 if((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
2606 DPRINTK(PROBE, ERR, "No usable DMA configuration, aborting.\n");
2607 goto err_out_free_res;
2610 SET_MODULE_OWNER(netdev);
2611 SET_NETDEV_DEV(netdev, &pdev->dev);
2613 if (use_io)
2614 DPRINTK(PROBE, INFO, "using i/o access mode\n");
2616 nic->csr = pci_iomap(pdev, (use_io ? 1 : 0), sizeof(struct csr));
2617 if(!nic->csr) {
2618 DPRINTK(PROBE, ERR, "Cannot map device registers, aborting.\n");
2619 err = -ENOMEM;
2620 goto err_out_free_res;
2623 if(ent->driver_data)
2624 nic->flags |= ich;
2625 else
2626 nic->flags &= ~ich;
2628 e100_get_defaults(nic);
2630 /* locks must be initialized before calling hw_reset */
2631 spin_lock_init(&nic->cb_lock);
2632 spin_lock_init(&nic->cmd_lock);
2633 spin_lock_init(&nic->mdio_lock);
2635 /* Reset the device before pci_set_master() in case device is in some
2636 * funky state and has an interrupt pending - hint: we don't have the
2637 * interrupt handler registered yet. */
2638 e100_hw_reset(nic);
2640 pci_set_master(pdev);
2642 init_timer(&nic->watchdog);
2643 nic->watchdog.function = e100_watchdog;
2644 nic->watchdog.data = (unsigned long)nic;
2645 init_timer(&nic->blink_timer);
2646 nic->blink_timer.function = e100_blink_led;
2647 nic->blink_timer.data = (unsigned long)nic;
2649 INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task);
2651 if((err = e100_alloc(nic))) {
2652 DPRINTK(PROBE, ERR, "Cannot alloc driver memory, aborting.\n");
2653 goto err_out_iounmap;
2656 if((err = e100_eeprom_load(nic)))
2657 goto err_out_free;
2659 e100_phy_init(nic);
2661 memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN);
2662 memcpy(netdev->perm_addr, nic->eeprom, ETH_ALEN);
2663 if (!is_valid_ether_addr(netdev->perm_addr)) {
2664 if (!eeprom_bad_csum_allow) {
2665 DPRINTK(PROBE, ERR, "Invalid MAC address from "
2666 "EEPROM, aborting.\n");
2667 err = -EAGAIN;
2668 goto err_out_free;
2669 } else {
2670 DPRINTK(PROBE, ERR, "Invalid MAC address from EEPROM, "
2671 "you MUST configure one.\n");
2675 /* Wol magic packet can be enabled from eeprom */
2676 if((nic->mac >= mac_82558_D101_A4) &&
2677 (nic->eeprom[eeprom_id] & eeprom_id_wol))
2678 nic->flags |= wol_magic;
2680 /* ack any pending wake events, disable PME */
2681 err = pci_enable_wake(pdev, 0, 0);
2682 if (err)
2683 DPRINTK(PROBE, ERR, "Error clearing wake event\n");
2685 strcpy(netdev->name, "eth%d");
2686 if((err = register_netdev(netdev))) {
2687 DPRINTK(PROBE, ERR, "Cannot register net device, aborting.\n");
2688 goto err_out_free;
2691 DPRINTK(PROBE, INFO, "addr 0x%llx, irq %d, "
2692 "MAC addr %02X:%02X:%02X:%02X:%02X:%02X\n",
2693 (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0), pdev->irq,
2694 netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],
2695 netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]);
2697 return 0;
2699 err_out_free:
2700 e100_free(nic);
2701 err_out_iounmap:
2702 pci_iounmap(pdev, nic->csr);
2703 err_out_free_res:
2704 pci_release_regions(pdev);
2705 err_out_disable_pdev:
2706 pci_disable_device(pdev);
2707 err_out_free_dev:
2708 pci_set_drvdata(pdev, NULL);
2709 free_netdev(netdev);
2710 return err;
2713 static void __devexit e100_remove(struct pci_dev *pdev)
2715 struct net_device *netdev = pci_get_drvdata(pdev);
2717 if(netdev) {
2718 struct nic *nic = netdev_priv(netdev);
2719 unregister_netdev(netdev);
2720 e100_free(nic);
2721 iounmap(nic->csr);
2722 free_netdev(netdev);
2723 pci_release_regions(pdev);
2724 pci_disable_device(pdev);
2725 pci_set_drvdata(pdev, NULL);
2729 #ifdef CONFIG_PM
2730 static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
2732 struct net_device *netdev = pci_get_drvdata(pdev);
2733 struct nic *nic = netdev_priv(netdev);
2735 if (netif_running(netdev))
2736 netif_poll_disable(nic->netdev);
2737 del_timer_sync(&nic->watchdog);
2738 netif_carrier_off(nic->netdev);
2739 netif_device_detach(netdev);
2741 pci_save_state(pdev);
2743 if ((nic->flags & wol_magic) | e100_asf(nic)) {
2744 pci_enable_wake(pdev, PCI_D3hot, 1);
2745 pci_enable_wake(pdev, PCI_D3cold, 1);
2746 } else {
2747 pci_enable_wake(pdev, PCI_D3hot, 0);
2748 pci_enable_wake(pdev, PCI_D3cold, 0);
2751 pci_disable_device(pdev);
2752 free_irq(pdev->irq, netdev);
2753 pci_set_power_state(pdev, PCI_D3hot);
2755 return 0;
2758 static int e100_resume(struct pci_dev *pdev)
2760 struct net_device *netdev = pci_get_drvdata(pdev);
2761 struct nic *nic = netdev_priv(netdev);
2763 pci_set_power_state(pdev, PCI_D0);
2764 pci_restore_state(pdev);
2765 /* ack any pending wake events, disable PME */
2766 pci_enable_wake(pdev, 0, 0);
2768 netif_device_attach(netdev);
2769 if (netif_running(netdev))
2770 e100_up(nic);
2772 return 0;
2774 #endif /* CONFIG_PM */
2776 static void e100_shutdown(struct pci_dev *pdev)
2778 struct net_device *netdev = pci_get_drvdata(pdev);
2779 struct nic *nic = netdev_priv(netdev);
2781 if (netif_running(netdev))
2782 netif_poll_disable(nic->netdev);
2783 del_timer_sync(&nic->watchdog);
2784 netif_carrier_off(nic->netdev);
2786 if ((nic->flags & wol_magic) | e100_asf(nic)) {
2787 pci_enable_wake(pdev, PCI_D3hot, 1);
2788 pci_enable_wake(pdev, PCI_D3cold, 1);
2789 } else {
2790 pci_enable_wake(pdev, PCI_D3hot, 0);
2791 pci_enable_wake(pdev, PCI_D3cold, 0);
2794 pci_disable_device(pdev);
2795 pci_set_power_state(pdev, PCI_D3hot);
2798 /* ------------------ PCI Error Recovery infrastructure -------------- */
2800 * e100_io_error_detected - called when PCI error is detected.
2801 * @pdev: Pointer to PCI device
2802 * @state: The current pci conneection state
2804 static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
2806 struct net_device *netdev = pci_get_drvdata(pdev);
2808 /* Similar to calling e100_down(), but avoids adpater I/O. */
2809 netdev->stop(netdev);
2811 /* Detach; put netif into state similar to hotplug unplug. */
2812 netif_poll_enable(netdev);
2813 netif_device_detach(netdev);
2814 pci_disable_device(pdev);
2816 /* Request a slot reset. */
2817 return PCI_ERS_RESULT_NEED_RESET;
2821 * e100_io_slot_reset - called after the pci bus has been reset.
2822 * @pdev: Pointer to PCI device
2824 * Restart the card from scratch.
2826 static pci_ers_result_t e100_io_slot_reset(struct pci_dev *pdev)
2828 struct net_device *netdev = pci_get_drvdata(pdev);
2829 struct nic *nic = netdev_priv(netdev);
2831 if (pci_enable_device(pdev)) {
2832 printk(KERN_ERR "e100: Cannot re-enable PCI device after reset.\n");
2833 return PCI_ERS_RESULT_DISCONNECT;
2835 pci_set_master(pdev);
2837 /* Only one device per card can do a reset */
2838 if (0 != PCI_FUNC(pdev->devfn))
2839 return PCI_ERS_RESULT_RECOVERED;
2840 e100_hw_reset(nic);
2841 e100_phy_init(nic);
2843 return PCI_ERS_RESULT_RECOVERED;
2847 * e100_io_resume - resume normal operations
2848 * @pdev: Pointer to PCI device
2850 * Resume normal operations after an error recovery
2851 * sequence has been completed.
2853 static void e100_io_resume(struct pci_dev *pdev)
2855 struct net_device *netdev = pci_get_drvdata(pdev);
2856 struct nic *nic = netdev_priv(netdev);
2858 /* ack any pending wake events, disable PME */
2859 pci_enable_wake(pdev, 0, 0);
2861 netif_device_attach(netdev);
2862 if (netif_running(netdev)) {
2863 e100_open(netdev);
2864 mod_timer(&nic->watchdog, jiffies);
2868 static struct pci_error_handlers e100_err_handler = {
2869 .error_detected = e100_io_error_detected,
2870 .slot_reset = e100_io_slot_reset,
2871 .resume = e100_io_resume,
2874 static struct pci_driver e100_driver = {
2875 .name = DRV_NAME,
2876 .id_table = e100_id_table,
2877 .probe = e100_probe,
2878 .remove = __devexit_p(e100_remove),
2879 #ifdef CONFIG_PM
2880 /* Power Management hooks */
2881 .suspend = e100_suspend,
2882 .resume = e100_resume,
2883 #endif
2884 .shutdown = e100_shutdown,
2885 .err_handler = &e100_err_handler,
2888 static int __init e100_init_module(void)
2890 if(((1 << debug) - 1) & NETIF_MSG_DRV) {
2891 printk(KERN_INFO PFX "%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
2892 printk(KERN_INFO PFX "%s\n", DRV_COPYRIGHT);
2894 return pci_register_driver(&e100_driver);
2897 static void __exit e100_cleanup_module(void)
2899 pci_unregister_driver(&e100_driver);
2902 module_init(e100_init_module);
2903 module_exit(e100_cleanup_module);