[netdrvr] Remove long-unused bits from Becker template drivers
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / net / eepro100.c
blobecf5ad85a6847e32ac7ecb3d1eee398033cffc66
1 /* drivers/net/eepro100.c: An Intel i82557-559 Ethernet driver for Linux. */
2 /*
3 Written 1996-1999 by Donald Becker.
5 The driver also contains updates by different kernel developers
6 (see incomplete list below).
7 Current maintainer is Andrey V. Savochkin <saw@saw.sw.com.sg>.
8 Please use this email address and linux-kernel mailing list for bug reports.
10 This software may be used and distributed according to the terms
11 of the GNU General Public License, incorporated herein by reference.
13 This driver is for the Intel EtherExpress Pro100 (Speedo3) design.
14 It should work with all i82557/558/559 boards.
16 Version history:
17 1998 Apr - 2000 Feb Andrey V. Savochkin <saw@saw.sw.com.sg>
18 Serious fixes for multicast filter list setting, TX timeout routine;
19 RX ring refilling logic; other stuff
20 2000 Feb Jeff Garzik <jgarzik@pobox.com>
21 Convert to new PCI driver interface
22 2000 Mar 24 Dragan Stancevic <visitor@valinux.com>
23 Disabled FC and ER, to avoid lockups when when we get FCP interrupts.
24 2000 Jul 17 Goutham Rao <goutham.rao@intel.com>
25 PCI DMA API fixes, adding pci_dma_sync_single calls where neccesary
26 2000 Aug 31 David Mosberger <davidm@hpl.hp.com>
27 rx_align support: enables rx DMA without causing unaligned accesses.
30 static const char * const version =
31 "eepro100.c:v1.09j-t 9/29/99 Donald Becker http://www.scyld.com/network/eepro100.html\n"
32 "eepro100.c: $Revision: 1.36 $ 2000/11/17 Modified by Andrey V. Savochkin <saw@saw.sw.com.sg> and others\n";
34 /* A few user-configurable values that apply to all boards.
35 First set is undocumented and spelled per Intel recommendations. */
37 static int congenb /* = 0 */; /* Enable congestion control in the DP83840. */
38 static int txfifo = 8; /* Tx FIFO threshold in 4 byte units, 0-15 */
39 static int rxfifo = 8; /* Rx FIFO threshold, default 32 bytes. */
40 /* Tx/Rx DMA burst length, 0-127, 0 == no preemption, tx==128 -> disabled. */
41 static int txdmacount = 128;
42 static int rxdmacount /* = 0 */;
44 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__) || defined(__mips__) || \
45 defined(__arm__)
46 /* align rx buffers to 2 bytes so that IP header is aligned */
47 # define rx_align(skb) skb_reserve((skb), 2)
48 # define RxFD_ALIGNMENT __attribute__ ((aligned (2), packed))
49 #else
50 # define rx_align(skb)
51 # define RxFD_ALIGNMENT
52 #endif
54 /* Set the copy breakpoint for the copy-only-tiny-buffer Rx method.
55 Lower values use more memory, but are faster. */
56 static int rx_copybreak = 200;
58 /* Maximum events (Rx packets, etc.) to handle at each interrupt. */
59 static int max_interrupt_work = 20;
61 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast) */
62 static int multicast_filter_limit = 64;
64 /* 'options' is used to pass a transceiver override or full-duplex flag
65 e.g. "options=16" for FD, "options=32" for 100mbps-only. */
66 static int full_duplex[] = {-1, -1, -1, -1, -1, -1, -1, -1};
67 static int options[] = {-1, -1, -1, -1, -1, -1, -1, -1};
69 /* A few values that may be tweaked. */
70 /* The ring sizes should be a power of two for efficiency. */
71 #define TX_RING_SIZE 64
72 #define RX_RING_SIZE 64
73 /* How much slots multicast filter setup may take.
74 Do not descrease without changing set_rx_mode() implementaion. */
75 #define TX_MULTICAST_SIZE 2
76 #define TX_MULTICAST_RESERV (TX_MULTICAST_SIZE*2)
77 /* Actual number of TX packets queued, must be
78 <= TX_RING_SIZE-TX_MULTICAST_RESERV. */
79 #define TX_QUEUE_LIMIT (TX_RING_SIZE-TX_MULTICAST_RESERV)
80 /* Hysteresis marking queue as no longer full. */
81 #define TX_QUEUE_UNFULL (TX_QUEUE_LIMIT-4)
83 /* Operational parameters that usually are not changed. */
85 /* Time in jiffies before concluding the transmitter is hung. */
86 #define TX_TIMEOUT (2*HZ)
87 /* Size of an pre-allocated Rx buffer: <Ethernet MTU> + slack.*/
88 #define PKT_BUF_SZ 1536
90 #include <linux/config.h>
91 #include <linux/module.h>
93 #include <linux/kernel.h>
94 #include <linux/string.h>
95 #include <linux/errno.h>
96 #include <linux/ioport.h>
97 #include <linux/slab.h>
98 #include <linux/interrupt.h>
99 #include <linux/timer.h>
100 #include <linux/pci.h>
101 #include <linux/spinlock.h>
102 #include <linux/init.h>
103 #include <linux/mii.h>
104 #include <linux/delay.h>
105 #include <linux/bitops.h>
107 #include <asm/io.h>
108 #include <asm/uaccess.h>
109 #include <asm/irq.h>
111 #include <linux/netdevice.h>
112 #include <linux/etherdevice.h>
113 #include <linux/rtnetlink.h>
114 #include <linux/skbuff.h>
115 #include <linux/ethtool.h>
117 static int use_io;
118 static int debug = -1;
119 #define DEBUG_DEFAULT (NETIF_MSG_DRV | \
120 NETIF_MSG_HW | \
121 NETIF_MSG_RX_ERR | \
122 NETIF_MSG_TX_ERR)
123 #define DEBUG ((debug >= 0) ? (1<<debug)-1 : DEBUG_DEFAULT)
126 MODULE_AUTHOR("Maintainer: Andrey V. Savochkin <saw@saw.sw.com.sg>");
127 MODULE_DESCRIPTION("Intel i82557/i82558/i82559 PCI EtherExpressPro driver");
128 MODULE_LICENSE("GPL");
129 module_param(use_io, int, 0);
130 module_param(debug, int, 0);
131 module_param_array(options, int, NULL, 0);
132 module_param_array(full_duplex, int, NULL, 0);
133 module_param(congenb, int, 0);
134 module_param(txfifo, int, 0);
135 module_param(rxfifo, int, 0);
136 module_param(txdmacount, int, 0);
137 module_param(rxdmacount, int, 0);
138 module_param(rx_copybreak, int, 0);
139 module_param(max_interrupt_work, int, 0);
140 module_param(multicast_filter_limit, int, 0);
141 MODULE_PARM_DESC(debug, "debug level (0-6)");
142 MODULE_PARM_DESC(options, "Bits 0-3: transceiver type, bit 4: full duplex, bit 5: 100Mbps");
143 MODULE_PARM_DESC(full_duplex, "full duplex setting(s) (1)");
144 MODULE_PARM_DESC(congenb, "Enable congestion control (1)");
145 MODULE_PARM_DESC(txfifo, "Tx FIFO threshold in 4 byte units, (0-15)");
146 MODULE_PARM_DESC(rxfifo, "Rx FIFO threshold in 4 byte units, (0-15)");
147 MODULE_PARM_DESC(txdmacount, "Tx DMA burst length; 128 - disable (0-128)");
148 MODULE_PARM_DESC(rxdmacount, "Rx DMA burst length; 128 - disable (0-128)");
149 MODULE_PARM_DESC(rx_copybreak, "copy breakpoint for copy-only-tiny-frames");
150 MODULE_PARM_DESC(max_interrupt_work, "maximum events handled per interrupt");
151 MODULE_PARM_DESC(multicast_filter_limit, "maximum number of filtered multicast addresses");
153 #define RUN_AT(x) (jiffies + (x))
155 #define netdevice_start(dev)
156 #define netdevice_stop(dev)
157 #define netif_set_tx_timeout(dev, tf, tm) \
158 do { \
159 (dev)->tx_timeout = (tf); \
160 (dev)->watchdog_timeo = (tm); \
161 } while(0)
166 Theory of Operation
168 I. Board Compatibility
170 This device driver is designed for the Intel i82557 "Speedo3" chip, Intel's
171 single-chip fast Ethernet controller for PCI, as used on the Intel
172 EtherExpress Pro 100 adapter.
174 II. Board-specific settings
176 PCI bus devices are configured by the system at boot time, so no jumpers
177 need to be set on the board. The system BIOS should be set to assign the
178 PCI INTA signal to an otherwise unused system IRQ line. While it's
179 possible to share PCI interrupt lines, it negatively impacts performance and
180 only recent kernels support it.
182 III. Driver operation
184 IIIA. General
185 The Speedo3 is very similar to other Intel network chips, that is to say
186 "apparently designed on a different planet". This chips retains the complex
187 Rx and Tx descriptors and multiple buffers pointers as previous chips, but
188 also has simplified Tx and Rx buffer modes. This driver uses the "flexible"
189 Tx mode, but in a simplified lower-overhead manner: it associates only a
190 single buffer descriptor with each frame descriptor.
192 Despite the extra space overhead in each receive skbuff, the driver must use
193 the simplified Rx buffer mode to assure that only a single data buffer is
194 associated with each RxFD. The driver implements this by reserving space
195 for the Rx descriptor at the head of each Rx skbuff.
197 The Speedo-3 has receive and command unit base addresses that are added to
198 almost all descriptor pointers. The driver sets these to zero, so that all
199 pointer fields are absolute addresses.
201 The System Control Block (SCB) of some previous Intel chips exists on the
202 chip in both PCI I/O and memory space. This driver uses the I/O space
203 registers, but might switch to memory mapped mode to better support non-x86
204 processors.
206 IIIB. Transmit structure
208 The driver must use the complex Tx command+descriptor mode in order to
209 have a indirect pointer to the skbuff data section. Each Tx command block
210 (TxCB) is associated with two immediately appended Tx Buffer Descriptor
211 (TxBD). A fixed ring of these TxCB+TxBD pairs are kept as part of the
212 speedo_private data structure for each adapter instance.
214 The newer i82558 explicitly supports this structure, and can read the two
215 TxBDs in the same PCI burst as the TxCB.
217 This ring structure is used for all normal transmit packets, but the
218 transmit packet descriptors aren't long enough for most non-Tx commands such
219 as CmdConfigure. This is complicated by the possibility that the chip has
220 already loaded the link address in the previous descriptor. So for these
221 commands we convert the next free descriptor on the ring to a NoOp, and point
222 that descriptor's link to the complex command.
224 An additional complexity of these non-transmit commands are that they may be
225 added asynchronous to the normal transmit queue, so we disable interrupts
226 whenever the Tx descriptor ring is manipulated.
228 A notable aspect of these special configure commands is that they do
229 work with the normal Tx ring entry scavenge method. The Tx ring scavenge
230 is done at interrupt time using the 'dirty_tx' index, and checking for the
231 command-complete bit. While the setup frames may have the NoOp command on the
232 Tx ring marked as complete, but not have completed the setup command, this
233 is not a problem. The tx_ring entry can be still safely reused, as the
234 tx_skbuff[] entry is always empty for config_cmd and mc_setup frames.
236 Commands may have bits set e.g. CmdSuspend in the command word to either
237 suspend or stop the transmit/command unit. This driver always flags the last
238 command with CmdSuspend, erases the CmdSuspend in the previous command, and
239 then issues a CU_RESUME.
240 Note: Watch out for the potential race condition here: imagine
241 erasing the previous suspend
242 the chip processes the previous command
243 the chip processes the final command, and suspends
244 doing the CU_RESUME
245 the chip processes the next-yet-valid post-final-command.
246 So blindly sending a CU_RESUME is only safe if we do it immediately after
247 after erasing the previous CmdSuspend, without the possibility of an
248 intervening delay. Thus the resume command is always within the
249 interrupts-disabled region. This is a timing dependence, but handling this
250 condition in a timing-independent way would considerably complicate the code.
252 Note: In previous generation Intel chips, restarting the command unit was a
253 notoriously slow process. This is presumably no longer true.
255 IIIC. Receive structure
257 Because of the bus-master support on the Speedo3 this driver uses the new
258 SKBUFF_RX_COPYBREAK scheme, rather than a fixed intermediate receive buffer.
259 This scheme allocates full-sized skbuffs as receive buffers. The value
260 SKBUFF_RX_COPYBREAK is used as the copying breakpoint: it is chosen to
261 trade-off the memory wasted by passing the full-sized skbuff to the queue
262 layer for all frames vs. the copying cost of copying a frame to a
263 correctly-sized skbuff.
265 For small frames the copying cost is negligible (esp. considering that we
266 are pre-loading the cache with immediately useful header information), so we
267 allocate a new, minimally-sized skbuff. For large frames the copying cost
268 is non-trivial, and the larger copy might flush the cache of useful data, so
269 we pass up the skbuff the packet was received into.
271 IV. Notes
273 Thanks to Steve Williams of Intel for arranging the non-disclosure agreement
274 that stated that I could disclose the information. But I still resent
275 having to sign an Intel NDA when I'm helping Intel sell their own product!
279 static int speedo_found1(struct pci_dev *pdev, void __iomem *ioaddr, int fnd_cnt, int acpi_idle_state);
281 /* Offsets to the various registers.
282 All accesses need not be longword aligned. */
283 enum speedo_offsets {
284 SCBStatus = 0, SCBCmd = 2, /* Rx/Command Unit command and status. */
285 SCBIntmask = 3,
286 SCBPointer = 4, /* General purpose pointer. */
287 SCBPort = 8, /* Misc. commands and operands. */
288 SCBflash = 12, SCBeeprom = 14, /* EEPROM and flash memory control. */
289 SCBCtrlMDI = 16, /* MDI interface control. */
290 SCBEarlyRx = 20, /* Early receive byte count. */
292 /* Commands that can be put in a command list entry. */
293 enum commands {
294 CmdNOp = 0, CmdIASetup = 0x10000, CmdConfigure = 0x20000,
295 CmdMulticastList = 0x30000, CmdTx = 0x40000, CmdTDR = 0x50000,
296 CmdDump = 0x60000, CmdDiagnose = 0x70000,
297 CmdSuspend = 0x40000000, /* Suspend after completion. */
298 CmdIntr = 0x20000000, /* Interrupt after completion. */
299 CmdTxFlex = 0x00080000, /* Use "Flexible mode" for CmdTx command. */
301 /* Clear CmdSuspend (1<<30) avoiding interference with the card access to the
302 status bits. Previous driver versions used separate 16 bit fields for
303 commands and statuses. --SAW
305 #if defined(__alpha__)
306 # define clear_suspend(cmd) clear_bit(30, &(cmd)->cmd_status);
307 #else
308 # if defined(__LITTLE_ENDIAN)
309 # define clear_suspend(cmd) ((__u16 *)&(cmd)->cmd_status)[1] &= ~0x4000
310 # elif defined(__BIG_ENDIAN)
311 # define clear_suspend(cmd) ((__u16 *)&(cmd)->cmd_status)[1] &= ~0x0040
312 # else
313 # error Unsupported byteorder
314 # endif
315 #endif
317 enum SCBCmdBits {
318 SCBMaskCmdDone=0x8000, SCBMaskRxDone=0x4000, SCBMaskCmdIdle=0x2000,
319 SCBMaskRxSuspend=0x1000, SCBMaskEarlyRx=0x0800, SCBMaskFlowCtl=0x0400,
320 SCBTriggerIntr=0x0200, SCBMaskAll=0x0100,
321 /* The rest are Rx and Tx commands. */
322 CUStart=0x0010, CUResume=0x0020, CUStatsAddr=0x0040, CUShowStats=0x0050,
323 CUCmdBase=0x0060, /* CU Base address (set to zero) . */
324 CUDumpStats=0x0070, /* Dump then reset stats counters. */
325 RxStart=0x0001, RxResume=0x0002, RxAbort=0x0004, RxAddrLoad=0x0006,
326 RxResumeNoResources=0x0007,
329 enum SCBPort_cmds {
330 PortReset=0, PortSelfTest=1, PortPartialReset=2, PortDump=3,
333 /* The Speedo3 Rx and Tx frame/buffer descriptors. */
334 struct descriptor { /* A generic descriptor. */
335 volatile s32 cmd_status; /* All command and status fields. */
336 u32 link; /* struct descriptor * */
337 unsigned char params[0];
340 /* The Speedo3 Rx and Tx buffer descriptors. */
341 struct RxFD { /* Receive frame descriptor. */
342 volatile s32 status;
343 u32 link; /* struct RxFD * */
344 u32 rx_buf_addr; /* void * */
345 u32 count;
346 } RxFD_ALIGNMENT;
348 /* Selected elements of the Tx/RxFD.status word. */
349 enum RxFD_bits {
350 RxComplete=0x8000, RxOK=0x2000,
351 RxErrCRC=0x0800, RxErrAlign=0x0400, RxErrTooBig=0x0200, RxErrSymbol=0x0010,
352 RxEth2Type=0x0020, RxNoMatch=0x0004, RxNoIAMatch=0x0002,
353 TxUnderrun=0x1000, StatusComplete=0x8000,
356 #define CONFIG_DATA_SIZE 22
357 struct TxFD { /* Transmit frame descriptor set. */
358 s32 status;
359 u32 link; /* void * */
360 u32 tx_desc_addr; /* Always points to the tx_buf_addr element. */
361 s32 count; /* # of TBD (=1), Tx start thresh., etc. */
362 /* This constitutes two "TBD" entries -- we only use one. */
363 #define TX_DESCR_BUF_OFFSET 16
364 u32 tx_buf_addr0; /* void *, frame to be transmitted. */
365 s32 tx_buf_size0; /* Length of Tx frame. */
366 u32 tx_buf_addr1; /* void *, frame to be transmitted. */
367 s32 tx_buf_size1; /* Length of Tx frame. */
368 /* the structure must have space for at least CONFIG_DATA_SIZE starting
369 * from tx_desc_addr field */
372 /* Multicast filter setting block. --SAW */
373 struct speedo_mc_block {
374 struct speedo_mc_block *next;
375 unsigned int tx;
376 dma_addr_t frame_dma;
377 unsigned int len;
378 struct descriptor frame __attribute__ ((__aligned__(16)));
381 /* Elements of the dump_statistics block. This block must be lword aligned. */
382 struct speedo_stats {
383 u32 tx_good_frames;
384 u32 tx_coll16_errs;
385 u32 tx_late_colls;
386 u32 tx_underruns;
387 u32 tx_lost_carrier;
388 u32 tx_deferred;
389 u32 tx_one_colls;
390 u32 tx_multi_colls;
391 u32 tx_total_colls;
392 u32 rx_good_frames;
393 u32 rx_crc_errs;
394 u32 rx_align_errs;
395 u32 rx_resource_errs;
396 u32 rx_overrun_errs;
397 u32 rx_colls_errs;
398 u32 rx_runt_errs;
399 u32 done_marker;
402 enum Rx_ring_state_bits {
403 RrNoMem=1, RrPostponed=2, RrNoResources=4, RrOOMReported=8,
406 /* Do not change the position (alignment) of the first few elements!
407 The later elements are grouped for cache locality.
409 Unfortunately, all the positions have been shifted since there.
410 A new re-alignment is required. 2000/03/06 SAW */
411 struct speedo_private {
412 void __iomem *regs;
413 struct TxFD *tx_ring; /* Commands (usually CmdTxPacket). */
414 struct RxFD *rx_ringp[RX_RING_SIZE]; /* Rx descriptor, used as ring. */
415 /* The addresses of a Tx/Rx-in-place packets/buffers. */
416 struct sk_buff *tx_skbuff[TX_RING_SIZE];
417 struct sk_buff *rx_skbuff[RX_RING_SIZE];
418 /* Mapped addresses of the rings. */
419 dma_addr_t tx_ring_dma;
420 #define TX_RING_ELEM_DMA(sp, n) ((sp)->tx_ring_dma + (n)*sizeof(struct TxFD))
421 dma_addr_t rx_ring_dma[RX_RING_SIZE];
422 struct descriptor *last_cmd; /* Last command sent. */
423 unsigned int cur_tx, dirty_tx; /* The ring entries to be free()ed. */
424 spinlock_t lock; /* Group with Tx control cache line. */
425 u32 tx_threshold; /* The value for txdesc.count. */
426 struct RxFD *last_rxf; /* Last filled RX buffer. */
427 dma_addr_t last_rxf_dma;
428 unsigned int cur_rx, dirty_rx; /* The next free ring entry */
429 long last_rx_time; /* Last Rx, in jiffies, to handle Rx hang. */
430 struct net_device_stats stats;
431 struct speedo_stats *lstats;
432 dma_addr_t lstats_dma;
433 int chip_id;
434 struct pci_dev *pdev;
435 struct timer_list timer; /* Media selection timer. */
436 struct speedo_mc_block *mc_setup_head; /* Multicast setup frame list head. */
437 struct speedo_mc_block *mc_setup_tail; /* Multicast setup frame list tail. */
438 long in_interrupt; /* Word-aligned dev->interrupt */
439 unsigned char acpi_pwr;
440 signed char rx_mode; /* Current PROMISC/ALLMULTI setting. */
441 unsigned int tx_full:1; /* The Tx queue is full. */
442 unsigned int flow_ctrl:1; /* Use 802.3x flow control. */
443 unsigned int rx_bug:1; /* Work around receiver hang errata. */
444 unsigned char default_port:8; /* Last dev->if_port value. */
445 unsigned char rx_ring_state; /* RX ring status flags. */
446 unsigned short phy[2]; /* PHY media interfaces available. */
447 unsigned short partner; /* Link partner caps. */
448 struct mii_if_info mii_if; /* MII API hooks, info */
449 u32 msg_enable; /* debug message level */
452 /* The parameters for a CmdConfigure operation.
453 There are so many options that it would be difficult to document each bit.
454 We mostly use the default or recommended settings. */
455 static const char i82557_config_cmd[CONFIG_DATA_SIZE] = {
456 22, 0x08, 0, 0, 0, 0, 0x32, 0x03, 1, /* 1=Use MII 0=Use AUI */
457 0, 0x2E, 0, 0x60, 0,
458 0xf2, 0x48, 0, 0x40, 0xf2, 0x80, /* 0x40=Force full-duplex */
459 0x3f, 0x05, };
460 static const char i82558_config_cmd[CONFIG_DATA_SIZE] = {
461 22, 0x08, 0, 1, 0, 0, 0x22, 0x03, 1, /* 1=Use MII 0=Use AUI */
462 0, 0x2E, 0, 0x60, 0x08, 0x88,
463 0x68, 0, 0x40, 0xf2, 0x84, /* Disable FC */
464 0x31, 0x05, };
466 /* PHY media interface chips. */
467 static const char * const phys[] = {
468 "None", "i82553-A/B", "i82553-C", "i82503",
469 "DP83840", "80c240", "80c24", "i82555",
470 "unknown-8", "unknown-9", "DP83840A", "unknown-11",
471 "unknown-12", "unknown-13", "unknown-14", "unknown-15", };
472 enum phy_chips { NonSuchPhy=0, I82553AB, I82553C, I82503, DP83840, S80C240,
473 S80C24, I82555, DP83840A=10, };
474 static const char is_mii[] = { 0, 1, 1, 0, 1, 1, 0, 1 };
475 #define EE_READ_CMD (6)
477 static int eepro100_init_one(struct pci_dev *pdev,
478 const struct pci_device_id *ent);
480 static int do_eeprom_cmd(void __iomem *ioaddr, int cmd, int cmd_len);
481 static int mdio_read(struct net_device *dev, int phy_id, int location);
482 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
483 static int speedo_open(struct net_device *dev);
484 static void speedo_resume(struct net_device *dev);
485 static void speedo_timer(unsigned long data);
486 static void speedo_init_rx_ring(struct net_device *dev);
487 static void speedo_tx_timeout(struct net_device *dev);
488 static int speedo_start_xmit(struct sk_buff *skb, struct net_device *dev);
489 static void speedo_refill_rx_buffers(struct net_device *dev, int force);
490 static int speedo_rx(struct net_device *dev);
491 static void speedo_tx_buffer_gc(struct net_device *dev);
492 static irqreturn_t speedo_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
493 static int speedo_close(struct net_device *dev);
494 static struct net_device_stats *speedo_get_stats(struct net_device *dev);
495 static int speedo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
496 static void set_rx_mode(struct net_device *dev);
497 static void speedo_show_state(struct net_device *dev);
498 static struct ethtool_ops ethtool_ops;
502 #ifdef honor_default_port
503 /* Optional driver feature to allow forcing the transceiver setting.
504 Not recommended. */
505 static int mii_ctrl[8] = { 0x3300, 0x3100, 0x0000, 0x0100,
506 0x2000, 0x2100, 0x0400, 0x3100};
507 #endif
509 /* How to wait for the command unit to accept a command.
510 Typically this takes 0 ticks. */
511 static inline unsigned char wait_for_cmd_done(struct net_device *dev,
512 struct speedo_private *sp)
514 int wait = 1000;
515 void __iomem *cmd_ioaddr = sp->regs + SCBCmd;
516 unsigned char r;
518 do {
519 udelay(1);
520 r = ioread8(cmd_ioaddr);
521 } while(r && --wait >= 0);
523 if (wait < 0)
524 printk(KERN_ALERT "%s: wait_for_cmd_done timeout!\n", dev->name);
525 return r;
528 static int __devinit eepro100_init_one (struct pci_dev *pdev,
529 const struct pci_device_id *ent)
531 void __iomem *ioaddr;
532 int irq, pci_bar;
533 int acpi_idle_state = 0, pm;
534 static int cards_found /* = 0 */;
535 unsigned long pci_base;
537 #ifndef MODULE
538 /* when built-in, we only print version if device is found */
539 static int did_version;
540 if (did_version++ == 0)
541 printk(version);
542 #endif
544 /* save power state before pci_enable_device overwrites it */
545 pm = pci_find_capability(pdev, PCI_CAP_ID_PM);
546 if (pm) {
547 u16 pwr_command;
548 pci_read_config_word(pdev, pm + PCI_PM_CTRL, &pwr_command);
549 acpi_idle_state = pwr_command & PCI_PM_CTRL_STATE_MASK;
552 if (pci_enable_device(pdev))
553 goto err_out_free_mmio_region;
555 pci_set_master(pdev);
557 if (!request_region(pci_resource_start(pdev, 1),
558 pci_resource_len(pdev, 1), "eepro100")) {
559 printk (KERN_ERR "eepro100: cannot reserve I/O ports\n");
560 goto err_out_none;
562 if (!request_mem_region(pci_resource_start(pdev, 0),
563 pci_resource_len(pdev, 0), "eepro100")) {
564 printk (KERN_ERR "eepro100: cannot reserve MMIO region\n");
565 goto err_out_free_pio_region;
568 irq = pdev->irq;
569 pci_bar = use_io ? 1 : 0;
570 pci_base = pci_resource_start(pdev, pci_bar);
571 if (DEBUG & NETIF_MSG_PROBE)
572 printk("Found Intel i82557 PCI Speedo at %#lx, IRQ %d.\n",
573 pci_base, irq);
575 ioaddr = pci_iomap(pdev, pci_bar, 0);
576 if (!ioaddr) {
577 printk (KERN_ERR "eepro100: cannot remap IO\n");
578 goto err_out_free_mmio_region;
581 if (speedo_found1(pdev, ioaddr, cards_found, acpi_idle_state) == 0)
582 cards_found++;
583 else
584 goto err_out_iounmap;
586 return 0;
588 err_out_iounmap: ;
589 pci_iounmap(pdev, ioaddr);
590 err_out_free_mmio_region:
591 release_mem_region(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
592 err_out_free_pio_region:
593 release_region(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
594 err_out_none:
595 return -ENODEV;
598 #ifdef CONFIG_NET_POLL_CONTROLLER
600 * Polling 'interrupt' - used by things like netconsole to send skbs
601 * without having to re-enable interrupts. It's not called while
602 * the interrupt routine is executing.
605 static void poll_speedo (struct net_device *dev)
607 /* disable_irq is not very nice, but with the funny lockless design
608 we have no other choice. */
609 disable_irq(dev->irq);
610 speedo_interrupt (dev->irq, dev, NULL);
611 enable_irq(dev->irq);
613 #endif
615 static int __devinit speedo_found1(struct pci_dev *pdev,
616 void __iomem *ioaddr, int card_idx, int acpi_idle_state)
618 struct net_device *dev;
619 struct speedo_private *sp;
620 const char *product;
621 int i, option;
622 u16 eeprom[0x100];
623 int size;
624 void *tx_ring_space;
625 dma_addr_t tx_ring_dma;
627 size = TX_RING_SIZE * sizeof(struct TxFD) + sizeof(struct speedo_stats);
628 tx_ring_space = pci_alloc_consistent(pdev, size, &tx_ring_dma);
629 if (tx_ring_space == NULL)
630 return -1;
632 dev = alloc_etherdev(sizeof(struct speedo_private));
633 if (dev == NULL) {
634 printk(KERN_ERR "eepro100: Could not allocate ethernet device.\n");
635 pci_free_consistent(pdev, size, tx_ring_space, tx_ring_dma);
636 return -1;
639 SET_MODULE_OWNER(dev);
640 SET_NETDEV_DEV(dev, &pdev->dev);
642 if (dev->mem_start > 0)
643 option = dev->mem_start;
644 else if (card_idx >= 0 && options[card_idx] >= 0)
645 option = options[card_idx];
646 else
647 option = 0;
649 rtnl_lock();
650 if (dev_alloc_name(dev, dev->name) < 0)
651 goto err_free_unlock;
653 /* Read the station address EEPROM before doing the reset.
654 Nominally his should even be done before accepting the device, but
655 then we wouldn't have a device name with which to report the error.
656 The size test is for 6 bit vs. 8 bit address serial EEPROMs.
659 void __iomem *iobase;
660 int read_cmd, ee_size;
661 u16 sum;
662 int j;
664 /* Use IO only to avoid postponed writes and satisfy EEPROM timing
665 requirements. */
666 iobase = pci_iomap(pdev, 1, pci_resource_len(pdev, 1));
667 if (!iobase)
668 goto err_free_unlock;
669 if ((do_eeprom_cmd(iobase, EE_READ_CMD << 24, 27) & 0xffe0000)
670 == 0xffe0000) {
671 ee_size = 0x100;
672 read_cmd = EE_READ_CMD << 24;
673 } else {
674 ee_size = 0x40;
675 read_cmd = EE_READ_CMD << 22;
678 for (j = 0, i = 0, sum = 0; i < ee_size; i++) {
679 u16 value = do_eeprom_cmd(iobase, read_cmd | (i << 16), 27);
680 eeprom[i] = value;
681 sum += value;
682 if (i < 3) {
683 dev->dev_addr[j++] = value;
684 dev->dev_addr[j++] = value >> 8;
687 if (sum != 0xBABA)
688 printk(KERN_WARNING "%s: Invalid EEPROM checksum %#4.4x, "
689 "check settings before activating this device!\n",
690 dev->name, sum);
691 /* Don't unregister_netdev(dev); as the EEPro may actually be
692 usable, especially if the MAC address is set later.
693 On the other hand, it may be unusable if MDI data is corrupted. */
695 pci_iounmap(pdev, iobase);
698 /* Reset the chip: stop Tx and Rx processes and clear counters.
699 This takes less than 10usec and will easily finish before the next
700 action. */
701 iowrite32(PortReset, ioaddr + SCBPort);
702 ioread32(ioaddr + SCBPort);
703 udelay(10);
705 if (eeprom[3] & 0x0100)
706 product = "OEM i82557/i82558 10/100 Ethernet";
707 else
708 product = pci_name(pdev);
710 printk(KERN_INFO "%s: %s, ", dev->name, product);
712 for (i = 0; i < 5; i++)
713 printk("%2.2X:", dev->dev_addr[i]);
714 printk("%2.2X, ", dev->dev_addr[i]);
715 printk("IRQ %d.\n", pdev->irq);
717 sp = netdev_priv(dev);
719 /* we must initialize this early, for mdio_{read,write} */
720 sp->regs = ioaddr;
722 #if 1 || defined(kernel_bloat)
723 /* OK, this is pure kernel bloat. I don't like it when other drivers
724 waste non-pageable kernel space to emit similar messages, but I need
725 them for bug reports. */
727 const char *connectors[] = {" RJ45", " BNC", " AUI", " MII"};
728 /* The self-test results must be paragraph aligned. */
729 volatile s32 *self_test_results;
730 int boguscnt = 16000; /* Timeout for set-test. */
731 if ((eeprom[3] & 0x03) != 0x03)
732 printk(KERN_INFO " Receiver lock-up bug exists -- enabling"
733 " work-around.\n");
734 printk(KERN_INFO " Board assembly %4.4x%2.2x-%3.3d, Physical"
735 " connectors present:",
736 eeprom[8], eeprom[9]>>8, eeprom[9] & 0xff);
737 for (i = 0; i < 4; i++)
738 if (eeprom[5] & (1<<i))
739 printk(connectors[i]);
740 printk("\n"KERN_INFO" Primary interface chip %s PHY #%d.\n",
741 phys[(eeprom[6]>>8)&15], eeprom[6] & 0x1f);
742 if (eeprom[7] & 0x0700)
743 printk(KERN_INFO " Secondary interface chip %s.\n",
744 phys[(eeprom[7]>>8)&7]);
745 if (((eeprom[6]>>8) & 0x3f) == DP83840
746 || ((eeprom[6]>>8) & 0x3f) == DP83840A) {
747 int mdi_reg23 = mdio_read(dev, eeprom[6] & 0x1f, 23) | 0x0422;
748 if (congenb)
749 mdi_reg23 |= 0x0100;
750 printk(KERN_INFO" DP83840 specific setup, setting register 23 to %4.4x.\n",
751 mdi_reg23);
752 mdio_write(dev, eeprom[6] & 0x1f, 23, mdi_reg23);
754 if ((option >= 0) && (option & 0x70)) {
755 printk(KERN_INFO " Forcing %dMbs %s-duplex operation.\n",
756 (option & 0x20 ? 100 : 10),
757 (option & 0x10 ? "full" : "half"));
758 mdio_write(dev, eeprom[6] & 0x1f, MII_BMCR,
759 ((option & 0x20) ? 0x2000 : 0) | /* 100mbps? */
760 ((option & 0x10) ? 0x0100 : 0)); /* Full duplex? */
763 /* Perform a system self-test. */
764 self_test_results = (s32*) ((((long) tx_ring_space) + 15) & ~0xf);
765 self_test_results[0] = 0;
766 self_test_results[1] = -1;
767 iowrite32(tx_ring_dma | PortSelfTest, ioaddr + SCBPort);
768 do {
769 udelay(10);
770 } while (self_test_results[1] == -1 && --boguscnt >= 0);
772 if (boguscnt < 0) { /* Test optimized out. */
773 printk(KERN_ERR "Self test failed, status %8.8x:\n"
774 KERN_ERR " Failure to initialize the i82557.\n"
775 KERN_ERR " Verify that the card is a bus-master"
776 " capable slot.\n",
777 self_test_results[1]);
778 } else
779 printk(KERN_INFO " General self-test: %s.\n"
780 KERN_INFO " Serial sub-system self-test: %s.\n"
781 KERN_INFO " Internal registers self-test: %s.\n"
782 KERN_INFO " ROM checksum self-test: %s (%#8.8x).\n",
783 self_test_results[1] & 0x1000 ? "failed" : "passed",
784 self_test_results[1] & 0x0020 ? "failed" : "passed",
785 self_test_results[1] & 0x0008 ? "failed" : "passed",
786 self_test_results[1] & 0x0004 ? "failed" : "passed",
787 self_test_results[0]);
789 #endif /* kernel_bloat */
791 iowrite32(PortReset, ioaddr + SCBPort);
792 ioread32(ioaddr + SCBPort);
793 udelay(10);
795 /* Return the chip to its original power state. */
796 pci_set_power_state(pdev, acpi_idle_state);
798 pci_set_drvdata (pdev, dev);
799 SET_NETDEV_DEV(dev, &pdev->dev);
801 dev->irq = pdev->irq;
803 sp->pdev = pdev;
804 sp->msg_enable = DEBUG;
805 sp->acpi_pwr = acpi_idle_state;
806 sp->tx_ring = tx_ring_space;
807 sp->tx_ring_dma = tx_ring_dma;
808 sp->lstats = (struct speedo_stats *)(sp->tx_ring + TX_RING_SIZE);
809 sp->lstats_dma = TX_RING_ELEM_DMA(sp, TX_RING_SIZE);
810 init_timer(&sp->timer); /* used in ioctl() */
811 spin_lock_init(&sp->lock);
813 sp->mii_if.full_duplex = option >= 0 && (option & 0x10) ? 1 : 0;
814 if (card_idx >= 0) {
815 if (full_duplex[card_idx] >= 0)
816 sp->mii_if.full_duplex = full_duplex[card_idx];
818 sp->default_port = option >= 0 ? (option & 0x0f) : 0;
820 sp->phy[0] = eeprom[6];
821 sp->phy[1] = eeprom[7];
823 sp->mii_if.phy_id = eeprom[6] & 0x1f;
824 sp->mii_if.phy_id_mask = 0x1f;
825 sp->mii_if.reg_num_mask = 0x1f;
826 sp->mii_if.dev = dev;
827 sp->mii_if.mdio_read = mdio_read;
828 sp->mii_if.mdio_write = mdio_write;
830 sp->rx_bug = (eeprom[3] & 0x03) == 3 ? 0 : 1;
831 if (((pdev->device > 0x1030 && (pdev->device < 0x103F)))
832 || (pdev->device == 0x2449) || (pdev->device == 0x2459)
833 || (pdev->device == 0x245D)) {
834 sp->chip_id = 1;
837 if (sp->rx_bug)
838 printk(KERN_INFO " Receiver lock-up workaround activated.\n");
840 /* The Speedo-specific entries in the device structure. */
841 dev->open = &speedo_open;
842 dev->hard_start_xmit = &speedo_start_xmit;
843 netif_set_tx_timeout(dev, &speedo_tx_timeout, TX_TIMEOUT);
844 dev->stop = &speedo_close;
845 dev->get_stats = &speedo_get_stats;
846 dev->set_multicast_list = &set_rx_mode;
847 dev->do_ioctl = &speedo_ioctl;
848 SET_ETHTOOL_OPS(dev, &ethtool_ops);
849 #ifdef CONFIG_NET_POLL_CONTROLLER
850 dev->poll_controller = &poll_speedo;
851 #endif
853 if (register_netdevice(dev))
854 goto err_free_unlock;
855 rtnl_unlock();
857 return 0;
859 err_free_unlock:
860 rtnl_unlock();
861 free_netdev(dev);
862 return -1;
865 static void do_slow_command(struct net_device *dev, struct speedo_private *sp, int cmd)
867 void __iomem *cmd_ioaddr = sp->regs + SCBCmd;
868 int wait = 0;
870 if (ioread8(cmd_ioaddr) == 0) break;
871 while(++wait <= 200);
872 if (wait > 100)
873 printk(KERN_ERR "Command %4.4x never accepted (%d polls)!\n",
874 ioread8(cmd_ioaddr), wait);
876 iowrite8(cmd, cmd_ioaddr);
878 for (wait = 0; wait <= 100; wait++)
879 if (ioread8(cmd_ioaddr) == 0) return;
880 for (; wait <= 20000; wait++)
881 if (ioread8(cmd_ioaddr) == 0) return;
882 else udelay(1);
883 printk(KERN_ERR "Command %4.4x was not accepted after %d polls!"
884 " Current status %8.8x.\n",
885 cmd, wait, ioread32(sp->regs + SCBStatus));
888 /* Serial EEPROM section.
889 A "bit" grungy, but we work our way through bit-by-bit :->. */
890 /* EEPROM_Ctrl bits. */
891 #define EE_SHIFT_CLK 0x01 /* EEPROM shift clock. */
892 #define EE_CS 0x02 /* EEPROM chip select. */
893 #define EE_DATA_WRITE 0x04 /* EEPROM chip data in. */
894 #define EE_DATA_READ 0x08 /* EEPROM chip data out. */
895 #define EE_ENB (0x4800 | EE_CS)
896 #define EE_WRITE_0 0x4802
897 #define EE_WRITE_1 0x4806
898 #define EE_OFFSET SCBeeprom
900 /* The fixes for the code were kindly provided by Dragan Stancevic
901 <visitor@valinux.com> to strictly follow Intel specifications of EEPROM
902 access timing.
903 The publicly available sheet 64486302 (sec. 3.1) specifies 1us access
904 interval for serial EEPROM. However, it looks like that there is an
905 additional requirement dictating larger udelay's in the code below.
906 2000/05/24 SAW */
907 static int __devinit do_eeprom_cmd(void __iomem *ioaddr, int cmd, int cmd_len)
909 unsigned retval = 0;
910 void __iomem *ee_addr = ioaddr + SCBeeprom;
912 iowrite16(EE_ENB, ee_addr); udelay(2);
913 iowrite16(EE_ENB | EE_SHIFT_CLK, ee_addr); udelay(2);
915 /* Shift the command bits out. */
916 do {
917 short dataval = (cmd & (1 << cmd_len)) ? EE_WRITE_1 : EE_WRITE_0;
918 iowrite16(dataval, ee_addr); udelay(2);
919 iowrite16(dataval | EE_SHIFT_CLK, ee_addr); udelay(2);
920 retval = (retval << 1) | ((ioread16(ee_addr) & EE_DATA_READ) ? 1 : 0);
921 } while (--cmd_len >= 0);
922 iowrite16(EE_ENB, ee_addr); udelay(2);
924 /* Terminate the EEPROM access. */
925 iowrite16(EE_ENB & ~EE_CS, ee_addr);
926 return retval;
929 static int mdio_read(struct net_device *dev, int phy_id, int location)
931 struct speedo_private *sp = netdev_priv(dev);
932 void __iomem *ioaddr = sp->regs;
933 int val, boguscnt = 64*10; /* <64 usec. to complete, typ 27 ticks */
934 iowrite32(0x08000000 | (location<<16) | (phy_id<<21), ioaddr + SCBCtrlMDI);
935 do {
936 val = ioread32(ioaddr + SCBCtrlMDI);
937 if (--boguscnt < 0) {
938 printk(KERN_ERR " mdio_read() timed out with val = %8.8x.\n", val);
939 break;
941 } while (! (val & 0x10000000));
942 return val & 0xffff;
945 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
947 struct speedo_private *sp = netdev_priv(dev);
948 void __iomem *ioaddr = sp->regs;
949 int val, boguscnt = 64*10; /* <64 usec. to complete, typ 27 ticks */
950 iowrite32(0x04000000 | (location<<16) | (phy_id<<21) | value,
951 ioaddr + SCBCtrlMDI);
952 do {
953 val = ioread32(ioaddr + SCBCtrlMDI);
954 if (--boguscnt < 0) {
955 printk(KERN_ERR" mdio_write() timed out with val = %8.8x.\n", val);
956 break;
958 } while (! (val & 0x10000000));
961 static int
962 speedo_open(struct net_device *dev)
964 struct speedo_private *sp = netdev_priv(dev);
965 void __iomem *ioaddr = sp->regs;
966 int retval;
968 if (netif_msg_ifup(sp))
969 printk(KERN_DEBUG "%s: speedo_open() irq %d.\n", dev->name, dev->irq);
971 pci_set_power_state(sp->pdev, PCI_D0);
973 /* Set up the Tx queue early.. */
974 sp->cur_tx = 0;
975 sp->dirty_tx = 0;
976 sp->last_cmd = NULL;
977 sp->tx_full = 0;
978 sp->in_interrupt = 0;
980 /* .. we can safely take handler calls during init. */
981 retval = request_irq(dev->irq, &speedo_interrupt, SA_SHIRQ, dev->name, dev);
982 if (retval) {
983 return retval;
986 dev->if_port = sp->default_port;
988 #ifdef oh_no_you_dont_unless_you_honour_the_options_passed_in_to_us
989 /* Retrigger negotiation to reset previous errors. */
990 if ((sp->phy[0] & 0x8000) == 0) {
991 int phy_addr = sp->phy[0] & 0x1f ;
992 /* Use 0x3300 for restarting NWay, other values to force xcvr:
993 0x0000 10-HD
994 0x0100 10-FD
995 0x2000 100-HD
996 0x2100 100-FD
998 #ifdef honor_default_port
999 mdio_write(dev, phy_addr, MII_BMCR, mii_ctrl[dev->default_port & 7]);
1000 #else
1001 mdio_write(dev, phy_addr, MII_BMCR, 0x3300);
1002 #endif
1004 #endif
1006 speedo_init_rx_ring(dev);
1008 /* Fire up the hardware. */
1009 iowrite16(SCBMaskAll, ioaddr + SCBCmd);
1010 speedo_resume(dev);
1012 netdevice_start(dev);
1013 netif_start_queue(dev);
1015 /* Setup the chip and configure the multicast list. */
1016 sp->mc_setup_head = NULL;
1017 sp->mc_setup_tail = NULL;
1018 sp->flow_ctrl = sp->partner = 0;
1019 sp->rx_mode = -1; /* Invalid -> always reset the mode. */
1020 set_rx_mode(dev);
1021 if ((sp->phy[0] & 0x8000) == 0)
1022 sp->mii_if.advertising = mdio_read(dev, sp->phy[0] & 0x1f, MII_ADVERTISE);
1024 mii_check_link(&sp->mii_if);
1026 if (netif_msg_ifup(sp)) {
1027 printk(KERN_DEBUG "%s: Done speedo_open(), status %8.8x.\n",
1028 dev->name, ioread16(ioaddr + SCBStatus));
1031 /* Set the timer. The timer serves a dual purpose:
1032 1) to monitor the media interface (e.g. link beat) and perhaps switch
1033 to an alternate media type
1034 2) to monitor Rx activity, and restart the Rx process if the receiver
1035 hangs. */
1036 sp->timer.expires = RUN_AT((24*HZ)/10); /* 2.4 sec. */
1037 sp->timer.data = (unsigned long)dev;
1038 sp->timer.function = &speedo_timer; /* timer handler */
1039 add_timer(&sp->timer);
1041 /* No need to wait for the command unit to accept here. */
1042 if ((sp->phy[0] & 0x8000) == 0)
1043 mdio_read(dev, sp->phy[0] & 0x1f, MII_BMCR);
1045 return 0;
1048 /* Start the chip hardware after a full reset. */
1049 static void speedo_resume(struct net_device *dev)
1051 struct speedo_private *sp = netdev_priv(dev);
1052 void __iomem *ioaddr = sp->regs;
1054 /* Start with a Tx threshold of 256 (0x..20.... 8 byte units). */
1055 sp->tx_threshold = 0x01208000;
1057 /* Set the segment registers to '0'. */
1058 if (wait_for_cmd_done(dev, sp) != 0) {
1059 iowrite32(PortPartialReset, ioaddr + SCBPort);
1060 udelay(10);
1063 iowrite32(0, ioaddr + SCBPointer);
1064 ioread32(ioaddr + SCBPointer); /* Flush to PCI. */
1065 udelay(10); /* Bogus, but it avoids the bug. */
1067 /* Note: these next two operations can take a while. */
1068 do_slow_command(dev, sp, RxAddrLoad);
1069 do_slow_command(dev, sp, CUCmdBase);
1071 /* Load the statistics block and rx ring addresses. */
1072 iowrite32(sp->lstats_dma, ioaddr + SCBPointer);
1073 ioread32(ioaddr + SCBPointer); /* Flush to PCI */
1075 iowrite8(CUStatsAddr, ioaddr + SCBCmd);
1076 sp->lstats->done_marker = 0;
1077 wait_for_cmd_done(dev, sp);
1079 if (sp->rx_ringp[sp->cur_rx % RX_RING_SIZE] == NULL) {
1080 if (netif_msg_rx_err(sp))
1081 printk(KERN_DEBUG "%s: NULL cur_rx in speedo_resume().\n",
1082 dev->name);
1083 } else {
1084 iowrite32(sp->rx_ring_dma[sp->cur_rx % RX_RING_SIZE],
1085 ioaddr + SCBPointer);
1086 ioread32(ioaddr + SCBPointer); /* Flush to PCI */
1089 /* Note: RxStart should complete instantly. */
1090 do_slow_command(dev, sp, RxStart);
1091 do_slow_command(dev, sp, CUDumpStats);
1093 /* Fill the first command with our physical address. */
1095 struct descriptor *ias_cmd;
1097 ias_cmd =
1098 (struct descriptor *)&sp->tx_ring[sp->cur_tx++ % TX_RING_SIZE];
1099 /* Avoid a bug(?!) here by marking the command already completed. */
1100 ias_cmd->cmd_status = cpu_to_le32((CmdSuspend | CmdIASetup) | 0xa000);
1101 ias_cmd->link =
1102 cpu_to_le32(TX_RING_ELEM_DMA(sp, sp->cur_tx % TX_RING_SIZE));
1103 memcpy(ias_cmd->params, dev->dev_addr, 6);
1104 if (sp->last_cmd)
1105 clear_suspend(sp->last_cmd);
1106 sp->last_cmd = ias_cmd;
1109 /* Start the chip's Tx process and unmask interrupts. */
1110 iowrite32(TX_RING_ELEM_DMA(sp, sp->dirty_tx % TX_RING_SIZE),
1111 ioaddr + SCBPointer);
1112 /* We are not ACK-ing FCP and ER in the interrupt handler yet so they should
1113 remain masked --Dragan */
1114 iowrite16(CUStart | SCBMaskEarlyRx | SCBMaskFlowCtl, ioaddr + SCBCmd);
1118 * Sometimes the receiver stops making progress. This routine knows how to
1119 * get it going again, without losing packets or being otherwise nasty like
1120 * a chip reset would be. Previously the driver had a whole sequence
1121 * of if RxSuspended, if it's no buffers do one thing, if it's no resources,
1122 * do another, etc. But those things don't really matter. Separate logic
1123 * in the ISR provides for allocating buffers--the other half of operation
1124 * is just making sure the receiver is active. speedo_rx_soft_reset does that.
1125 * This problem with the old, more involved algorithm is shown up under
1126 * ping floods on the order of 60K packets/second on a 100Mbps fdx network.
1128 static void
1129 speedo_rx_soft_reset(struct net_device *dev)
1131 struct speedo_private *sp = netdev_priv(dev);
1132 struct RxFD *rfd;
1133 void __iomem *ioaddr;
1135 ioaddr = sp->regs;
1136 if (wait_for_cmd_done(dev, sp) != 0) {
1137 printk("%s: previous command stalled\n", dev->name);
1138 return;
1141 * Put the hardware into a known state.
1143 iowrite8(RxAbort, ioaddr + SCBCmd);
1145 rfd = sp->rx_ringp[sp->cur_rx % RX_RING_SIZE];
1147 rfd->rx_buf_addr = 0xffffffff;
1149 if (wait_for_cmd_done(dev, sp) != 0) {
1150 printk("%s: RxAbort command stalled\n", dev->name);
1151 return;
1153 iowrite32(sp->rx_ring_dma[sp->cur_rx % RX_RING_SIZE],
1154 ioaddr + SCBPointer);
1155 iowrite8(RxStart, ioaddr + SCBCmd);
1159 /* Media monitoring and control. */
1160 static void speedo_timer(unsigned long data)
1162 struct net_device *dev = (struct net_device *)data;
1163 struct speedo_private *sp = netdev_priv(dev);
1164 void __iomem *ioaddr = sp->regs;
1165 int phy_num = sp->phy[0] & 0x1f;
1167 /* We have MII and lost link beat. */
1168 if ((sp->phy[0] & 0x8000) == 0) {
1169 int partner = mdio_read(dev, phy_num, MII_LPA);
1170 if (partner != sp->partner) {
1171 int flow_ctrl = sp->mii_if.advertising & partner & 0x0400 ? 1 : 0;
1172 if (netif_msg_link(sp)) {
1173 printk(KERN_DEBUG "%s: Link status change.\n", dev->name);
1174 printk(KERN_DEBUG "%s: Old partner %x, new %x, adv %x.\n",
1175 dev->name, sp->partner, partner, sp->mii_if.advertising);
1177 sp->partner = partner;
1178 if (flow_ctrl != sp->flow_ctrl) {
1179 sp->flow_ctrl = flow_ctrl;
1180 sp->rx_mode = -1; /* Trigger a reload. */
1184 mii_check_link(&sp->mii_if);
1185 if (netif_msg_timer(sp)) {
1186 printk(KERN_DEBUG "%s: Media control tick, status %4.4x.\n",
1187 dev->name, ioread16(ioaddr + SCBStatus));
1189 if (sp->rx_mode < 0 ||
1190 (sp->rx_bug && jiffies - sp->last_rx_time > 2*HZ)) {
1191 /* We haven't received a packet in a Long Time. We might have been
1192 bitten by the receiver hang bug. This can be cleared by sending
1193 a set multicast list command. */
1194 if (netif_msg_timer(sp))
1195 printk(KERN_DEBUG "%s: Sending a multicast list set command"
1196 " from a timer routine,"
1197 " m=%d, j=%ld, l=%ld.\n",
1198 dev->name, sp->rx_mode, jiffies, sp->last_rx_time);
1199 set_rx_mode(dev);
1201 /* We must continue to monitor the media. */
1202 sp->timer.expires = RUN_AT(2*HZ); /* 2.0 sec. */
1203 add_timer(&sp->timer);
1206 static void speedo_show_state(struct net_device *dev)
1208 struct speedo_private *sp = netdev_priv(dev);
1209 int i;
1211 if (netif_msg_pktdata(sp)) {
1212 printk(KERN_DEBUG "%s: Tx ring dump, Tx queue %u / %u:\n",
1213 dev->name, sp->cur_tx, sp->dirty_tx);
1214 for (i = 0; i < TX_RING_SIZE; i++)
1215 printk(KERN_DEBUG "%s: %c%c%2d %8.8x.\n", dev->name,
1216 i == sp->dirty_tx % TX_RING_SIZE ? '*' : ' ',
1217 i == sp->cur_tx % TX_RING_SIZE ? '=' : ' ',
1218 i, sp->tx_ring[i].status);
1220 printk(KERN_DEBUG "%s: Printing Rx ring"
1221 " (next to receive into %u, dirty index %u).\n",
1222 dev->name, sp->cur_rx, sp->dirty_rx);
1223 for (i = 0; i < RX_RING_SIZE; i++)
1224 printk(KERN_DEBUG "%s: %c%c%c%2d %8.8x.\n", dev->name,
1225 sp->rx_ringp[i] == sp->last_rxf ? 'l' : ' ',
1226 i == sp->dirty_rx % RX_RING_SIZE ? '*' : ' ',
1227 i == sp->cur_rx % RX_RING_SIZE ? '=' : ' ',
1228 i, (sp->rx_ringp[i] != NULL) ?
1229 (unsigned)sp->rx_ringp[i]->status : 0);
1232 #if 0
1234 void __iomem *ioaddr = sp->regs;
1235 int phy_num = sp->phy[0] & 0x1f;
1236 for (i = 0; i < 16; i++) {
1237 /* FIXME: what does it mean? --SAW */
1238 if (i == 6) i = 21;
1239 printk(KERN_DEBUG "%s: PHY index %d register %d is %4.4x.\n",
1240 dev->name, phy_num, i, mdio_read(dev, phy_num, i));
1243 #endif
1247 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1248 static void
1249 speedo_init_rx_ring(struct net_device *dev)
1251 struct speedo_private *sp = netdev_priv(dev);
1252 struct RxFD *rxf, *last_rxf = NULL;
1253 dma_addr_t last_rxf_dma = 0 /* to shut up the compiler */;
1254 int i;
1256 sp->cur_rx = 0;
1258 for (i = 0; i < RX_RING_SIZE; i++) {
1259 struct sk_buff *skb;
1260 skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD));
1261 if (skb)
1262 rx_align(skb); /* Align IP on 16 byte boundary */
1263 sp->rx_skbuff[i] = skb;
1264 if (skb == NULL)
1265 break; /* OK. Just initially short of Rx bufs. */
1266 skb->dev = dev; /* Mark as being used by this device. */
1267 rxf = (struct RxFD *)skb->data;
1268 sp->rx_ringp[i] = rxf;
1269 sp->rx_ring_dma[i] =
1270 pci_map_single(sp->pdev, rxf,
1271 PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_BIDIRECTIONAL);
1272 skb_reserve(skb, sizeof(struct RxFD));
1273 if (last_rxf) {
1274 last_rxf->link = cpu_to_le32(sp->rx_ring_dma[i]);
1275 pci_dma_sync_single_for_device(sp->pdev, last_rxf_dma,
1276 sizeof(struct RxFD), PCI_DMA_TODEVICE);
1278 last_rxf = rxf;
1279 last_rxf_dma = sp->rx_ring_dma[i];
1280 rxf->status = cpu_to_le32(0x00000001); /* '1' is flag value only. */
1281 rxf->link = 0; /* None yet. */
1282 /* This field unused by i82557. */
1283 rxf->rx_buf_addr = 0xffffffff;
1284 rxf->count = cpu_to_le32(PKT_BUF_SZ << 16);
1285 pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[i],
1286 sizeof(struct RxFD), PCI_DMA_TODEVICE);
1288 sp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1289 /* Mark the last entry as end-of-list. */
1290 last_rxf->status = cpu_to_le32(0xC0000002); /* '2' is flag value only. */
1291 pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[RX_RING_SIZE-1],
1292 sizeof(struct RxFD), PCI_DMA_TODEVICE);
1293 sp->last_rxf = last_rxf;
1294 sp->last_rxf_dma = last_rxf_dma;
1297 static void speedo_purge_tx(struct net_device *dev)
1299 struct speedo_private *sp = netdev_priv(dev);
1300 int entry;
1302 while ((int)(sp->cur_tx - sp->dirty_tx) > 0) {
1303 entry = sp->dirty_tx % TX_RING_SIZE;
1304 if (sp->tx_skbuff[entry]) {
1305 sp->stats.tx_errors++;
1306 pci_unmap_single(sp->pdev,
1307 le32_to_cpu(sp->tx_ring[entry].tx_buf_addr0),
1308 sp->tx_skbuff[entry]->len, PCI_DMA_TODEVICE);
1309 dev_kfree_skb_irq(sp->tx_skbuff[entry]);
1310 sp->tx_skbuff[entry] = NULL;
1312 sp->dirty_tx++;
1314 while (sp->mc_setup_head != NULL) {
1315 struct speedo_mc_block *t;
1316 if (netif_msg_tx_err(sp))
1317 printk(KERN_DEBUG "%s: freeing mc frame.\n", dev->name);
1318 pci_unmap_single(sp->pdev, sp->mc_setup_head->frame_dma,
1319 sp->mc_setup_head->len, PCI_DMA_TODEVICE);
1320 t = sp->mc_setup_head->next;
1321 kfree(sp->mc_setup_head);
1322 sp->mc_setup_head = t;
1324 sp->mc_setup_tail = NULL;
1325 sp->tx_full = 0;
1326 netif_wake_queue(dev);
1329 static void reset_mii(struct net_device *dev)
1331 struct speedo_private *sp = netdev_priv(dev);
1333 /* Reset the MII transceiver, suggested by Fred Young @ scalable.com. */
1334 if ((sp->phy[0] & 0x8000) == 0) {
1335 int phy_addr = sp->phy[0] & 0x1f;
1336 int advertising = mdio_read(dev, phy_addr, MII_ADVERTISE);
1337 int mii_bmcr = mdio_read(dev, phy_addr, MII_BMCR);
1338 mdio_write(dev, phy_addr, MII_BMCR, 0x0400);
1339 mdio_write(dev, phy_addr, MII_BMSR, 0x0000);
1340 mdio_write(dev, phy_addr, MII_ADVERTISE, 0x0000);
1341 mdio_write(dev, phy_addr, MII_BMCR, 0x8000);
1342 #ifdef honor_default_port
1343 mdio_write(dev, phy_addr, MII_BMCR, mii_ctrl[dev->default_port & 7]);
1344 #else
1345 mdio_read(dev, phy_addr, MII_BMCR);
1346 mdio_write(dev, phy_addr, MII_BMCR, mii_bmcr);
1347 mdio_write(dev, phy_addr, MII_ADVERTISE, advertising);
1348 #endif
1352 static void speedo_tx_timeout(struct net_device *dev)
1354 struct speedo_private *sp = netdev_priv(dev);
1355 void __iomem *ioaddr = sp->regs;
1356 int status = ioread16(ioaddr + SCBStatus);
1357 unsigned long flags;
1359 if (netif_msg_tx_err(sp)) {
1360 printk(KERN_WARNING "%s: Transmit timed out: status %4.4x "
1361 " %4.4x at %d/%d command %8.8x.\n",
1362 dev->name, status, ioread16(ioaddr + SCBCmd),
1363 sp->dirty_tx, sp->cur_tx,
1364 sp->tx_ring[sp->dirty_tx % TX_RING_SIZE].status);
1367 speedo_show_state(dev);
1368 #if 0
1369 if ((status & 0x00C0) != 0x0080
1370 && (status & 0x003C) == 0x0010) {
1371 /* Only the command unit has stopped. */
1372 printk(KERN_WARNING "%s: Trying to restart the transmitter...\n",
1373 dev->name);
1374 iowrite32(TX_RING_ELEM_DMA(sp, dirty_tx % TX_RING_SIZE]),
1375 ioaddr + SCBPointer);
1376 iowrite16(CUStart, ioaddr + SCBCmd);
1377 reset_mii(dev);
1378 } else {
1379 #else
1381 #endif
1382 del_timer_sync(&sp->timer);
1383 /* Reset the Tx and Rx units. */
1384 iowrite32(PortReset, ioaddr + SCBPort);
1385 /* We may get spurious interrupts here. But I don't think that they
1386 may do much harm. 1999/12/09 SAW */
1387 udelay(10);
1388 /* Disable interrupts. */
1389 iowrite16(SCBMaskAll, ioaddr + SCBCmd);
1390 synchronize_irq(dev->irq);
1391 speedo_tx_buffer_gc(dev);
1392 /* Free as much as possible.
1393 It helps to recover from a hang because of out-of-memory.
1394 It also simplifies speedo_resume() in case TX ring is full or
1395 close-to-be full. */
1396 speedo_purge_tx(dev);
1397 speedo_refill_rx_buffers(dev, 1);
1398 spin_lock_irqsave(&sp->lock, flags);
1399 speedo_resume(dev);
1400 sp->rx_mode = -1;
1401 dev->trans_start = jiffies;
1402 spin_unlock_irqrestore(&sp->lock, flags);
1403 set_rx_mode(dev); /* it takes the spinlock itself --SAW */
1404 /* Reset MII transceiver. Do it before starting the timer to serialize
1405 mdio_xxx operations. Yes, it's a paranoya :-) 2000/05/09 SAW */
1406 reset_mii(dev);
1407 sp->timer.expires = RUN_AT(2*HZ);
1408 add_timer(&sp->timer);
1410 return;
1413 static int
1414 speedo_start_xmit(struct sk_buff *skb, struct net_device *dev)
1416 struct speedo_private *sp = netdev_priv(dev);
1417 void __iomem *ioaddr = sp->regs;
1418 int entry;
1420 /* Prevent interrupts from changing the Tx ring from underneath us. */
1421 unsigned long flags;
1423 spin_lock_irqsave(&sp->lock, flags);
1425 /* Check if there are enough space. */
1426 if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
1427 printk(KERN_ERR "%s: incorrect tbusy state, fixed.\n", dev->name);
1428 netif_stop_queue(dev);
1429 sp->tx_full = 1;
1430 spin_unlock_irqrestore(&sp->lock, flags);
1431 return 1;
1434 /* Calculate the Tx descriptor entry. */
1435 entry = sp->cur_tx++ % TX_RING_SIZE;
1437 sp->tx_skbuff[entry] = skb;
1438 sp->tx_ring[entry].status =
1439 cpu_to_le32(CmdSuspend | CmdTx | CmdTxFlex);
1440 if (!(entry & ((TX_RING_SIZE>>2)-1)))
1441 sp->tx_ring[entry].status |= cpu_to_le32(CmdIntr);
1442 sp->tx_ring[entry].link =
1443 cpu_to_le32(TX_RING_ELEM_DMA(sp, sp->cur_tx % TX_RING_SIZE));
1444 sp->tx_ring[entry].tx_desc_addr =
1445 cpu_to_le32(TX_RING_ELEM_DMA(sp, entry) + TX_DESCR_BUF_OFFSET);
1446 /* The data region is always in one buffer descriptor. */
1447 sp->tx_ring[entry].count = cpu_to_le32(sp->tx_threshold);
1448 sp->tx_ring[entry].tx_buf_addr0 =
1449 cpu_to_le32(pci_map_single(sp->pdev, skb->data,
1450 skb->len, PCI_DMA_TODEVICE));
1451 sp->tx_ring[entry].tx_buf_size0 = cpu_to_le32(skb->len);
1453 /* workaround for hardware bug on 10 mbit half duplex */
1455 if ((sp->partner == 0) && (sp->chip_id == 1)) {
1456 wait_for_cmd_done(dev, sp);
1457 iowrite8(0 , ioaddr + SCBCmd);
1458 udelay(1);
1461 /* Trigger the command unit resume. */
1462 wait_for_cmd_done(dev, sp);
1463 clear_suspend(sp->last_cmd);
1464 /* We want the time window between clearing suspend flag on the previous
1465 command and resuming CU to be as small as possible.
1466 Interrupts in between are very undesired. --SAW */
1467 iowrite8(CUResume, ioaddr + SCBCmd);
1468 sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
1470 /* Leave room for set_rx_mode(). If there is no more space than reserved
1471 for multicast filter mark the ring as full. */
1472 if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
1473 netif_stop_queue(dev);
1474 sp->tx_full = 1;
1477 spin_unlock_irqrestore(&sp->lock, flags);
1479 dev->trans_start = jiffies;
1481 return 0;
1484 static void speedo_tx_buffer_gc(struct net_device *dev)
1486 unsigned int dirty_tx;
1487 struct speedo_private *sp = netdev_priv(dev);
1489 dirty_tx = sp->dirty_tx;
1490 while ((int)(sp->cur_tx - dirty_tx) > 0) {
1491 int entry = dirty_tx % TX_RING_SIZE;
1492 int status = le32_to_cpu(sp->tx_ring[entry].status);
1494 if (netif_msg_tx_done(sp))
1495 printk(KERN_DEBUG " scavenge candidate %d status %4.4x.\n",
1496 entry, status);
1497 if ((status & StatusComplete) == 0)
1498 break; /* It still hasn't been processed. */
1499 if (status & TxUnderrun)
1500 if (sp->tx_threshold < 0x01e08000) {
1501 if (netif_msg_tx_err(sp))
1502 printk(KERN_DEBUG "%s: TX underrun, threshold adjusted.\n",
1503 dev->name);
1504 sp->tx_threshold += 0x00040000;
1506 /* Free the original skb. */
1507 if (sp->tx_skbuff[entry]) {
1508 sp->stats.tx_packets++; /* Count only user packets. */
1509 sp->stats.tx_bytes += sp->tx_skbuff[entry]->len;
1510 pci_unmap_single(sp->pdev,
1511 le32_to_cpu(sp->tx_ring[entry].tx_buf_addr0),
1512 sp->tx_skbuff[entry]->len, PCI_DMA_TODEVICE);
1513 dev_kfree_skb_irq(sp->tx_skbuff[entry]);
1514 sp->tx_skbuff[entry] = NULL;
1516 dirty_tx++;
1519 if (netif_msg_tx_err(sp) && (int)(sp->cur_tx - dirty_tx) > TX_RING_SIZE) {
1520 printk(KERN_ERR "out-of-sync dirty pointer, %d vs. %d,"
1521 " full=%d.\n",
1522 dirty_tx, sp->cur_tx, sp->tx_full);
1523 dirty_tx += TX_RING_SIZE;
1526 while (sp->mc_setup_head != NULL
1527 && (int)(dirty_tx - sp->mc_setup_head->tx - 1) > 0) {
1528 struct speedo_mc_block *t;
1529 if (netif_msg_tx_err(sp))
1530 printk(KERN_DEBUG "%s: freeing mc frame.\n", dev->name);
1531 pci_unmap_single(sp->pdev, sp->mc_setup_head->frame_dma,
1532 sp->mc_setup_head->len, PCI_DMA_TODEVICE);
1533 t = sp->mc_setup_head->next;
1534 kfree(sp->mc_setup_head);
1535 sp->mc_setup_head = t;
1537 if (sp->mc_setup_head == NULL)
1538 sp->mc_setup_tail = NULL;
1540 sp->dirty_tx = dirty_tx;
1543 /* The interrupt handler does all of the Rx thread work and cleans up
1544 after the Tx thread. */
1545 static irqreturn_t speedo_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
1547 struct net_device *dev = (struct net_device *)dev_instance;
1548 struct speedo_private *sp;
1549 void __iomem *ioaddr;
1550 long boguscnt = max_interrupt_work;
1551 unsigned short status;
1552 unsigned int handled = 0;
1554 sp = netdev_priv(dev);
1555 ioaddr = sp->regs;
1557 #ifndef final_version
1558 /* A lock to prevent simultaneous entry on SMP machines. */
1559 if (test_and_set_bit(0, (void*)&sp->in_interrupt)) {
1560 printk(KERN_ERR"%s: SMP simultaneous entry of an interrupt handler.\n",
1561 dev->name);
1562 sp->in_interrupt = 0; /* Avoid halting machine. */
1563 return IRQ_NONE;
1565 #endif
1567 do {
1568 status = ioread16(ioaddr + SCBStatus);
1569 /* Acknowledge all of the current interrupt sources ASAP. */
1570 /* Will change from 0xfc00 to 0xff00 when we start handling
1571 FCP and ER interrupts --Dragan */
1572 iowrite16(status & 0xfc00, ioaddr + SCBStatus);
1574 if (netif_msg_intr(sp))
1575 printk(KERN_DEBUG "%s: interrupt status=%#4.4x.\n",
1576 dev->name, status);
1578 if ((status & 0xfc00) == 0)
1579 break;
1580 handled = 1;
1583 if ((status & 0x5000) || /* Packet received, or Rx error. */
1584 (sp->rx_ring_state&(RrNoMem|RrPostponed)) == RrPostponed)
1585 /* Need to gather the postponed packet. */
1586 speedo_rx(dev);
1588 /* Always check if all rx buffers are allocated. --SAW */
1589 speedo_refill_rx_buffers(dev, 0);
1591 spin_lock(&sp->lock);
1593 * The chip may have suspended reception for various reasons.
1594 * Check for that, and re-prime it should this be the case.
1596 switch ((status >> 2) & 0xf) {
1597 case 0: /* Idle */
1598 break;
1599 case 1: /* Suspended */
1600 case 2: /* No resources (RxFDs) */
1601 case 9: /* Suspended with no more RBDs */
1602 case 10: /* No resources due to no RBDs */
1603 case 12: /* Ready with no RBDs */
1604 speedo_rx_soft_reset(dev);
1605 break;
1606 case 3: case 5: case 6: case 7: case 8:
1607 case 11: case 13: case 14: case 15:
1608 /* these are all reserved values */
1609 break;
1613 /* User interrupt, Command/Tx unit interrupt or CU not active. */
1614 if (status & 0xA400) {
1615 speedo_tx_buffer_gc(dev);
1616 if (sp->tx_full
1617 && (int)(sp->cur_tx - sp->dirty_tx) < TX_QUEUE_UNFULL) {
1618 /* The ring is no longer full. */
1619 sp->tx_full = 0;
1620 netif_wake_queue(dev); /* Attention: under a spinlock. --SAW */
1624 spin_unlock(&sp->lock);
1626 if (--boguscnt < 0) {
1627 printk(KERN_ERR "%s: Too much work at interrupt, status=0x%4.4x.\n",
1628 dev->name, status);
1629 /* Clear all interrupt sources. */
1630 /* Will change from 0xfc00 to 0xff00 when we start handling
1631 FCP and ER interrupts --Dragan */
1632 iowrite16(0xfc00, ioaddr + SCBStatus);
1633 break;
1635 } while (1);
1637 if (netif_msg_intr(sp))
1638 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1639 dev->name, ioread16(ioaddr + SCBStatus));
1641 clear_bit(0, (void*)&sp->in_interrupt);
1642 return IRQ_RETVAL(handled);
1645 static inline struct RxFD *speedo_rx_alloc(struct net_device *dev, int entry)
1647 struct speedo_private *sp = netdev_priv(dev);
1648 struct RxFD *rxf;
1649 struct sk_buff *skb;
1650 /* Get a fresh skbuff to replace the consumed one. */
1651 skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD));
1652 if (skb)
1653 rx_align(skb); /* Align IP on 16 byte boundary */
1654 sp->rx_skbuff[entry] = skb;
1655 if (skb == NULL) {
1656 sp->rx_ringp[entry] = NULL;
1657 return NULL;
1659 rxf = sp->rx_ringp[entry] = (struct RxFD *)skb->data;
1660 sp->rx_ring_dma[entry] =
1661 pci_map_single(sp->pdev, rxf,
1662 PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_FROMDEVICE);
1663 skb->dev = dev;
1664 skb_reserve(skb, sizeof(struct RxFD));
1665 rxf->rx_buf_addr = 0xffffffff;
1666 pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[entry],
1667 sizeof(struct RxFD), PCI_DMA_TODEVICE);
1668 return rxf;
1671 static inline void speedo_rx_link(struct net_device *dev, int entry,
1672 struct RxFD *rxf, dma_addr_t rxf_dma)
1674 struct speedo_private *sp = netdev_priv(dev);
1675 rxf->status = cpu_to_le32(0xC0000001); /* '1' for driver use only. */
1676 rxf->link = 0; /* None yet. */
1677 rxf->count = cpu_to_le32(PKT_BUF_SZ << 16);
1678 sp->last_rxf->link = cpu_to_le32(rxf_dma);
1679 sp->last_rxf->status &= cpu_to_le32(~0xC0000000);
1680 pci_dma_sync_single_for_device(sp->pdev, sp->last_rxf_dma,
1681 sizeof(struct RxFD), PCI_DMA_TODEVICE);
1682 sp->last_rxf = rxf;
1683 sp->last_rxf_dma = rxf_dma;
1686 static int speedo_refill_rx_buf(struct net_device *dev, int force)
1688 struct speedo_private *sp = netdev_priv(dev);
1689 int entry;
1690 struct RxFD *rxf;
1692 entry = sp->dirty_rx % RX_RING_SIZE;
1693 if (sp->rx_skbuff[entry] == NULL) {
1694 rxf = speedo_rx_alloc(dev, entry);
1695 if (rxf == NULL) {
1696 unsigned int forw;
1697 int forw_entry;
1698 if (netif_msg_rx_err(sp) || !(sp->rx_ring_state & RrOOMReported)) {
1699 printk(KERN_WARNING "%s: can't fill rx buffer (force %d)!\n",
1700 dev->name, force);
1701 sp->rx_ring_state |= RrOOMReported;
1703 speedo_show_state(dev);
1704 if (!force)
1705 return -1; /* Better luck next time! */
1706 /* Borrow an skb from one of next entries. */
1707 for (forw = sp->dirty_rx + 1; forw != sp->cur_rx; forw++)
1708 if (sp->rx_skbuff[forw % RX_RING_SIZE] != NULL)
1709 break;
1710 if (forw == sp->cur_rx)
1711 return -1;
1712 forw_entry = forw % RX_RING_SIZE;
1713 sp->rx_skbuff[entry] = sp->rx_skbuff[forw_entry];
1714 sp->rx_skbuff[forw_entry] = NULL;
1715 rxf = sp->rx_ringp[forw_entry];
1716 sp->rx_ringp[forw_entry] = NULL;
1717 sp->rx_ringp[entry] = rxf;
1719 } else {
1720 rxf = sp->rx_ringp[entry];
1722 speedo_rx_link(dev, entry, rxf, sp->rx_ring_dma[entry]);
1723 sp->dirty_rx++;
1724 sp->rx_ring_state &= ~(RrNoMem|RrOOMReported); /* Mark the progress. */
1725 return 0;
1728 static void speedo_refill_rx_buffers(struct net_device *dev, int force)
1730 struct speedo_private *sp = netdev_priv(dev);
1732 /* Refill the RX ring. */
1733 while ((int)(sp->cur_rx - sp->dirty_rx) > 0 &&
1734 speedo_refill_rx_buf(dev, force) != -1);
1737 static int
1738 speedo_rx(struct net_device *dev)
1740 struct speedo_private *sp = netdev_priv(dev);
1741 int entry = sp->cur_rx % RX_RING_SIZE;
1742 int rx_work_limit = sp->dirty_rx + RX_RING_SIZE - sp->cur_rx;
1743 int alloc_ok = 1;
1744 int npkts = 0;
1746 if (netif_msg_intr(sp))
1747 printk(KERN_DEBUG " In speedo_rx().\n");
1748 /* If we own the next entry, it's a new packet. Send it up. */
1749 while (sp->rx_ringp[entry] != NULL) {
1750 int status;
1751 int pkt_len;
1753 pci_dma_sync_single_for_cpu(sp->pdev, sp->rx_ring_dma[entry],
1754 sizeof(struct RxFD), PCI_DMA_FROMDEVICE);
1755 status = le32_to_cpu(sp->rx_ringp[entry]->status);
1756 pkt_len = le32_to_cpu(sp->rx_ringp[entry]->count) & 0x3fff;
1758 if (!(status & RxComplete))
1759 break;
1761 if (--rx_work_limit < 0)
1762 break;
1764 /* Check for a rare out-of-memory case: the current buffer is
1765 the last buffer allocated in the RX ring. --SAW */
1766 if (sp->last_rxf == sp->rx_ringp[entry]) {
1767 /* Postpone the packet. It'll be reaped at an interrupt when this
1768 packet is no longer the last packet in the ring. */
1769 if (netif_msg_rx_err(sp))
1770 printk(KERN_DEBUG "%s: RX packet postponed!\n",
1771 dev->name);
1772 sp->rx_ring_state |= RrPostponed;
1773 break;
1776 if (netif_msg_rx_status(sp))
1777 printk(KERN_DEBUG " speedo_rx() status %8.8x len %d.\n", status,
1778 pkt_len);
1779 if ((status & (RxErrTooBig|RxOK|0x0f90)) != RxOK) {
1780 if (status & RxErrTooBig)
1781 printk(KERN_ERR "%s: Ethernet frame overran the Rx buffer, "
1782 "status %8.8x!\n", dev->name, status);
1783 else if (! (status & RxOK)) {
1784 /* There was a fatal error. This *should* be impossible. */
1785 sp->stats.rx_errors++;
1786 printk(KERN_ERR "%s: Anomalous event in speedo_rx(), "
1787 "status %8.8x.\n",
1788 dev->name, status);
1790 } else {
1791 struct sk_buff *skb;
1793 /* Check if the packet is long enough to just accept without
1794 copying to a properly sized skbuff. */
1795 if (pkt_len < rx_copybreak
1796 && (skb = dev_alloc_skb(pkt_len + 2)) != 0) {
1797 skb->dev = dev;
1798 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1799 /* 'skb_put()' points to the start of sk_buff data area. */
1800 pci_dma_sync_single_for_cpu(sp->pdev, sp->rx_ring_dma[entry],
1801 sizeof(struct RxFD) + pkt_len,
1802 PCI_DMA_FROMDEVICE);
1804 #if 1 || USE_IP_CSUM
1805 /* Packet is in one chunk -- we can copy + cksum. */
1806 eth_copy_and_sum(skb, sp->rx_skbuff[entry]->data, pkt_len, 0);
1807 skb_put(skb, pkt_len);
1808 #else
1809 memcpy(skb_put(skb, pkt_len), sp->rx_skbuff[entry]->data,
1810 pkt_len);
1811 #endif
1812 pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[entry],
1813 sizeof(struct RxFD) + pkt_len,
1814 PCI_DMA_FROMDEVICE);
1815 npkts++;
1816 } else {
1817 /* Pass up the already-filled skbuff. */
1818 skb = sp->rx_skbuff[entry];
1819 if (skb == NULL) {
1820 printk(KERN_ERR "%s: Inconsistent Rx descriptor chain.\n",
1821 dev->name);
1822 break;
1824 sp->rx_skbuff[entry] = NULL;
1825 skb_put(skb, pkt_len);
1826 npkts++;
1827 sp->rx_ringp[entry] = NULL;
1828 pci_unmap_single(sp->pdev, sp->rx_ring_dma[entry],
1829 PKT_BUF_SZ + sizeof(struct RxFD),
1830 PCI_DMA_FROMDEVICE);
1832 skb->protocol = eth_type_trans(skb, dev);
1833 netif_rx(skb);
1834 dev->last_rx = jiffies;
1835 sp->stats.rx_packets++;
1836 sp->stats.rx_bytes += pkt_len;
1838 entry = (++sp->cur_rx) % RX_RING_SIZE;
1839 sp->rx_ring_state &= ~RrPostponed;
1840 /* Refill the recently taken buffers.
1841 Do it one-by-one to handle traffic bursts better. */
1842 if (alloc_ok && speedo_refill_rx_buf(dev, 0) == -1)
1843 alloc_ok = 0;
1846 /* Try hard to refill the recently taken buffers. */
1847 speedo_refill_rx_buffers(dev, 1);
1849 if (npkts)
1850 sp->last_rx_time = jiffies;
1852 return 0;
1855 static int
1856 speedo_close(struct net_device *dev)
1858 struct speedo_private *sp = netdev_priv(dev);
1859 void __iomem *ioaddr = sp->regs;
1860 int i;
1862 netdevice_stop(dev);
1863 netif_stop_queue(dev);
1865 if (netif_msg_ifdown(sp))
1866 printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x.\n",
1867 dev->name, ioread16(ioaddr + SCBStatus));
1869 /* Shut off the media monitoring timer. */
1870 del_timer_sync(&sp->timer);
1872 iowrite16(SCBMaskAll, ioaddr + SCBCmd);
1874 /* Shutting down the chip nicely fails to disable flow control. So.. */
1875 iowrite32(PortPartialReset, ioaddr + SCBPort);
1876 ioread32(ioaddr + SCBPort); /* flush posted write */
1878 * The chip requires a 10 microsecond quiet period. Wait here!
1880 udelay(10);
1882 free_irq(dev->irq, dev);
1883 speedo_show_state(dev);
1885 /* Free all the skbuffs in the Rx and Tx queues. */
1886 for (i = 0; i < RX_RING_SIZE; i++) {
1887 struct sk_buff *skb = sp->rx_skbuff[i];
1888 sp->rx_skbuff[i] = NULL;
1889 /* Clear the Rx descriptors. */
1890 if (skb) {
1891 pci_unmap_single(sp->pdev,
1892 sp->rx_ring_dma[i],
1893 PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_FROMDEVICE);
1894 dev_kfree_skb(skb);
1898 for (i = 0; i < TX_RING_SIZE; i++) {
1899 struct sk_buff *skb = sp->tx_skbuff[i];
1900 sp->tx_skbuff[i] = NULL;
1901 /* Clear the Tx descriptors. */
1902 if (skb) {
1903 pci_unmap_single(sp->pdev,
1904 le32_to_cpu(sp->tx_ring[i].tx_buf_addr0),
1905 skb->len, PCI_DMA_TODEVICE);
1906 dev_kfree_skb(skb);
1910 /* Free multicast setting blocks. */
1911 for (i = 0; sp->mc_setup_head != NULL; i++) {
1912 struct speedo_mc_block *t;
1913 t = sp->mc_setup_head->next;
1914 kfree(sp->mc_setup_head);
1915 sp->mc_setup_head = t;
1917 sp->mc_setup_tail = NULL;
1918 if (netif_msg_ifdown(sp))
1919 printk(KERN_DEBUG "%s: %d multicast blocks dropped.\n", dev->name, i);
1921 pci_set_power_state(sp->pdev, PCI_D2);
1923 return 0;
1926 /* The Speedo-3 has an especially awkward and unusable method of getting
1927 statistics out of the chip. It takes an unpredictable length of time
1928 for the dump-stats command to complete. To avoid a busy-wait loop we
1929 update the stats with the previous dump results, and then trigger a
1930 new dump.
1932 Oh, and incoming frames are dropped while executing dump-stats!
1934 static struct net_device_stats *
1935 speedo_get_stats(struct net_device *dev)
1937 struct speedo_private *sp = netdev_priv(dev);
1938 void __iomem *ioaddr = sp->regs;
1940 /* Update only if the previous dump finished. */
1941 if (sp->lstats->done_marker == le32_to_cpu(0xA007)) {
1942 sp->stats.tx_aborted_errors += le32_to_cpu(sp->lstats->tx_coll16_errs);
1943 sp->stats.tx_window_errors += le32_to_cpu(sp->lstats->tx_late_colls);
1944 sp->stats.tx_fifo_errors += le32_to_cpu(sp->lstats->tx_underruns);
1945 sp->stats.tx_fifo_errors += le32_to_cpu(sp->lstats->tx_lost_carrier);
1946 /*sp->stats.tx_deferred += le32_to_cpu(sp->lstats->tx_deferred);*/
1947 sp->stats.collisions += le32_to_cpu(sp->lstats->tx_total_colls);
1948 sp->stats.rx_crc_errors += le32_to_cpu(sp->lstats->rx_crc_errs);
1949 sp->stats.rx_frame_errors += le32_to_cpu(sp->lstats->rx_align_errs);
1950 sp->stats.rx_over_errors += le32_to_cpu(sp->lstats->rx_resource_errs);
1951 sp->stats.rx_fifo_errors += le32_to_cpu(sp->lstats->rx_overrun_errs);
1952 sp->stats.rx_length_errors += le32_to_cpu(sp->lstats->rx_runt_errs);
1953 sp->lstats->done_marker = 0x0000;
1954 if (netif_running(dev)) {
1955 unsigned long flags;
1956 /* Take a spinlock to make wait_for_cmd_done and sending the
1957 command atomic. --SAW */
1958 spin_lock_irqsave(&sp->lock, flags);
1959 wait_for_cmd_done(dev, sp);
1960 iowrite8(CUDumpStats, ioaddr + SCBCmd);
1961 spin_unlock_irqrestore(&sp->lock, flags);
1964 return &sp->stats;
1967 static void speedo_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1969 struct speedo_private *sp = netdev_priv(dev);
1970 strncpy(info->driver, "eepro100", sizeof(info->driver)-1);
1971 strncpy(info->version, version, sizeof(info->version)-1);
1972 if (sp->pdev)
1973 strcpy(info->bus_info, pci_name(sp->pdev));
1976 static int speedo_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1978 struct speedo_private *sp = netdev_priv(dev);
1979 spin_lock_irq(&sp->lock);
1980 mii_ethtool_gset(&sp->mii_if, ecmd);
1981 spin_unlock_irq(&sp->lock);
1982 return 0;
1985 static int speedo_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1987 struct speedo_private *sp = netdev_priv(dev);
1988 int res;
1989 spin_lock_irq(&sp->lock);
1990 res = mii_ethtool_sset(&sp->mii_if, ecmd);
1991 spin_unlock_irq(&sp->lock);
1992 return res;
1995 static int speedo_nway_reset(struct net_device *dev)
1997 struct speedo_private *sp = netdev_priv(dev);
1998 return mii_nway_restart(&sp->mii_if);
2001 static u32 speedo_get_link(struct net_device *dev)
2003 struct speedo_private *sp = netdev_priv(dev);
2004 return mii_link_ok(&sp->mii_if);
2007 static u32 speedo_get_msglevel(struct net_device *dev)
2009 struct speedo_private *sp = netdev_priv(dev);
2010 return sp->msg_enable;
2013 static void speedo_set_msglevel(struct net_device *dev, u32 v)
2015 struct speedo_private *sp = netdev_priv(dev);
2016 sp->msg_enable = v;
2019 static struct ethtool_ops ethtool_ops = {
2020 .get_drvinfo = speedo_get_drvinfo,
2021 .get_settings = speedo_get_settings,
2022 .set_settings = speedo_set_settings,
2023 .nway_reset = speedo_nway_reset,
2024 .get_link = speedo_get_link,
2025 .get_msglevel = speedo_get_msglevel,
2026 .set_msglevel = speedo_set_msglevel,
2029 static int speedo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2031 struct speedo_private *sp = netdev_priv(dev);
2032 struct mii_ioctl_data *data = if_mii(rq);
2033 int phy = sp->phy[0] & 0x1f;
2034 int saved_acpi;
2035 int t;
2037 switch(cmd) {
2038 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
2039 data->phy_id = phy;
2041 case SIOCGMIIREG: /* Read MII PHY register. */
2042 /* FIXME: these operations need to be serialized with MDIO
2043 access from the timeout handler.
2044 They are currently serialized only with MDIO access from the
2045 timer routine. 2000/05/09 SAW */
2046 saved_acpi = pci_set_power_state(sp->pdev, PCI_D0);
2047 t = del_timer_sync(&sp->timer);
2048 data->val_out = mdio_read(dev, data->phy_id & 0x1f, data->reg_num & 0x1f);
2049 if (t)
2050 add_timer(&sp->timer); /* may be set to the past --SAW */
2051 pci_set_power_state(sp->pdev, saved_acpi);
2052 return 0;
2054 case SIOCSMIIREG: /* Write MII PHY register. */
2055 if (!capable(CAP_NET_ADMIN))
2056 return -EPERM;
2057 saved_acpi = pci_set_power_state(sp->pdev, PCI_D0);
2058 t = del_timer_sync(&sp->timer);
2059 mdio_write(dev, data->phy_id, data->reg_num, data->val_in);
2060 if (t)
2061 add_timer(&sp->timer); /* may be set to the past --SAW */
2062 pci_set_power_state(sp->pdev, saved_acpi);
2063 return 0;
2064 default:
2065 return -EOPNOTSUPP;
2069 /* Set or clear the multicast filter for this adaptor.
2070 This is very ugly with Intel chips -- we usually have to execute an
2071 entire configuration command, plus process a multicast command.
2072 This is complicated. We must put a large configuration command and
2073 an arbitrarily-sized multicast command in the transmit list.
2074 To minimize the disruption -- the previous command might have already
2075 loaded the link -- we convert the current command block, normally a Tx
2076 command, into a no-op and link it to the new command.
2078 static void set_rx_mode(struct net_device *dev)
2080 struct speedo_private *sp = netdev_priv(dev);
2081 void __iomem *ioaddr = sp->regs;
2082 struct descriptor *last_cmd;
2083 char new_rx_mode;
2084 unsigned long flags;
2085 int entry, i;
2087 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
2088 new_rx_mode = 3;
2089 } else if ((dev->flags & IFF_ALLMULTI) ||
2090 dev->mc_count > multicast_filter_limit) {
2091 new_rx_mode = 1;
2092 } else
2093 new_rx_mode = 0;
2095 if (netif_msg_rx_status(sp))
2096 printk(KERN_DEBUG "%s: set_rx_mode %d -> %d\n", dev->name,
2097 sp->rx_mode, new_rx_mode);
2099 if ((int)(sp->cur_tx - sp->dirty_tx) > TX_RING_SIZE - TX_MULTICAST_SIZE) {
2100 /* The Tx ring is full -- don't add anything! Hope the mode will be
2101 * set again later. */
2102 sp->rx_mode = -1;
2103 return;
2106 if (new_rx_mode != sp->rx_mode) {
2107 u8 *config_cmd_data;
2109 spin_lock_irqsave(&sp->lock, flags);
2110 entry = sp->cur_tx++ % TX_RING_SIZE;
2111 last_cmd = sp->last_cmd;
2112 sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
2114 sp->tx_skbuff[entry] = NULL; /* Redundant. */
2115 sp->tx_ring[entry].status = cpu_to_le32(CmdSuspend | CmdConfigure);
2116 sp->tx_ring[entry].link =
2117 cpu_to_le32(TX_RING_ELEM_DMA(sp, (entry + 1) % TX_RING_SIZE));
2118 config_cmd_data = (void *)&sp->tx_ring[entry].tx_desc_addr;
2119 /* Construct a full CmdConfig frame. */
2120 memcpy(config_cmd_data, i82558_config_cmd, CONFIG_DATA_SIZE);
2121 config_cmd_data[1] = (txfifo << 4) | rxfifo;
2122 config_cmd_data[4] = rxdmacount;
2123 config_cmd_data[5] = txdmacount + 0x80;
2124 config_cmd_data[15] |= (new_rx_mode & 2) ? 1 : 0;
2125 /* 0x80 doesn't disable FC 0x84 does.
2126 Disable Flow control since we are not ACK-ing any FC interrupts
2127 for now. --Dragan */
2128 config_cmd_data[19] = 0x84;
2129 config_cmd_data[19] |= sp->mii_if.full_duplex ? 0x40 : 0;
2130 config_cmd_data[21] = (new_rx_mode & 1) ? 0x0D : 0x05;
2131 if (sp->phy[0] & 0x8000) { /* Use the AUI port instead. */
2132 config_cmd_data[15] |= 0x80;
2133 config_cmd_data[8] = 0;
2135 /* Trigger the command unit resume. */
2136 wait_for_cmd_done(dev, sp);
2137 clear_suspend(last_cmd);
2138 iowrite8(CUResume, ioaddr + SCBCmd);
2139 if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
2140 netif_stop_queue(dev);
2141 sp->tx_full = 1;
2143 spin_unlock_irqrestore(&sp->lock, flags);
2146 if (new_rx_mode == 0 && dev->mc_count < 4) {
2147 /* The simple case of 0-3 multicast list entries occurs often, and
2148 fits within one tx_ring[] entry. */
2149 struct dev_mc_list *mclist;
2150 u16 *setup_params, *eaddrs;
2152 spin_lock_irqsave(&sp->lock, flags);
2153 entry = sp->cur_tx++ % TX_RING_SIZE;
2154 last_cmd = sp->last_cmd;
2155 sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
2157 sp->tx_skbuff[entry] = NULL;
2158 sp->tx_ring[entry].status = cpu_to_le32(CmdSuspend | CmdMulticastList);
2159 sp->tx_ring[entry].link =
2160 cpu_to_le32(TX_RING_ELEM_DMA(sp, (entry + 1) % TX_RING_SIZE));
2161 sp->tx_ring[entry].tx_desc_addr = 0; /* Really MC list count. */
2162 setup_params = (u16 *)&sp->tx_ring[entry].tx_desc_addr;
2163 *setup_params++ = cpu_to_le16(dev->mc_count*6);
2164 /* Fill in the multicast addresses. */
2165 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
2166 i++, mclist = mclist->next) {
2167 eaddrs = (u16 *)mclist->dmi_addr;
2168 *setup_params++ = *eaddrs++;
2169 *setup_params++ = *eaddrs++;
2170 *setup_params++ = *eaddrs++;
2173 wait_for_cmd_done(dev, sp);
2174 clear_suspend(last_cmd);
2175 /* Immediately trigger the command unit resume. */
2176 iowrite8(CUResume, ioaddr + SCBCmd);
2178 if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
2179 netif_stop_queue(dev);
2180 sp->tx_full = 1;
2182 spin_unlock_irqrestore(&sp->lock, flags);
2183 } else if (new_rx_mode == 0) {
2184 struct dev_mc_list *mclist;
2185 u16 *setup_params, *eaddrs;
2186 struct speedo_mc_block *mc_blk;
2187 struct descriptor *mc_setup_frm;
2188 int i;
2190 mc_blk = kmalloc(sizeof(*mc_blk) + 2 + multicast_filter_limit*6,
2191 GFP_ATOMIC);
2192 if (mc_blk == NULL) {
2193 printk(KERN_ERR "%s: Failed to allocate a setup frame.\n",
2194 dev->name);
2195 sp->rx_mode = -1; /* We failed, try again. */
2196 return;
2198 mc_blk->next = NULL;
2199 mc_blk->len = 2 + multicast_filter_limit*6;
2200 mc_blk->frame_dma =
2201 pci_map_single(sp->pdev, &mc_blk->frame, mc_blk->len,
2202 PCI_DMA_TODEVICE);
2203 mc_setup_frm = &mc_blk->frame;
2205 /* Fill the setup frame. */
2206 if (netif_msg_ifup(sp))
2207 printk(KERN_DEBUG "%s: Constructing a setup frame at %p.\n",
2208 dev->name, mc_setup_frm);
2209 mc_setup_frm->cmd_status =
2210 cpu_to_le32(CmdSuspend | CmdIntr | CmdMulticastList);
2211 /* Link set below. */
2212 setup_params = (u16 *)&mc_setup_frm->params;
2213 *setup_params++ = cpu_to_le16(dev->mc_count*6);
2214 /* Fill in the multicast addresses. */
2215 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
2216 i++, mclist = mclist->next) {
2217 eaddrs = (u16 *)mclist->dmi_addr;
2218 *setup_params++ = *eaddrs++;
2219 *setup_params++ = *eaddrs++;
2220 *setup_params++ = *eaddrs++;
2223 /* Disable interrupts while playing with the Tx Cmd list. */
2224 spin_lock_irqsave(&sp->lock, flags);
2226 if (sp->mc_setup_tail)
2227 sp->mc_setup_tail->next = mc_blk;
2228 else
2229 sp->mc_setup_head = mc_blk;
2230 sp->mc_setup_tail = mc_blk;
2231 mc_blk->tx = sp->cur_tx;
2233 entry = sp->cur_tx++ % TX_RING_SIZE;
2234 last_cmd = sp->last_cmd;
2235 sp->last_cmd = mc_setup_frm;
2237 /* Change the command to a NoOp, pointing to the CmdMulti command. */
2238 sp->tx_skbuff[entry] = NULL;
2239 sp->tx_ring[entry].status = cpu_to_le32(CmdNOp);
2240 sp->tx_ring[entry].link = cpu_to_le32(mc_blk->frame_dma);
2242 /* Set the link in the setup frame. */
2243 mc_setup_frm->link =
2244 cpu_to_le32(TX_RING_ELEM_DMA(sp, (entry + 1) % TX_RING_SIZE));
2246 pci_dma_sync_single_for_device(sp->pdev, mc_blk->frame_dma,
2247 mc_blk->len, PCI_DMA_TODEVICE);
2249 wait_for_cmd_done(dev, sp);
2250 clear_suspend(last_cmd);
2251 /* Immediately trigger the command unit resume. */
2252 iowrite8(CUResume, ioaddr + SCBCmd);
2254 if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
2255 netif_stop_queue(dev);
2256 sp->tx_full = 1;
2258 spin_unlock_irqrestore(&sp->lock, flags);
2260 if (netif_msg_rx_status(sp))
2261 printk(" CmdMCSetup frame length %d in entry %d.\n",
2262 dev->mc_count, entry);
2265 sp->rx_mode = new_rx_mode;
2268 #ifdef CONFIG_PM
2269 static int eepro100_suspend(struct pci_dev *pdev, pm_message_t state)
2271 struct net_device *dev = pci_get_drvdata (pdev);
2272 struct speedo_private *sp = netdev_priv(dev);
2273 void __iomem *ioaddr = sp->regs;
2275 pci_save_state(pdev);
2277 if (!netif_running(dev))
2278 return 0;
2280 del_timer_sync(&sp->timer);
2282 netif_device_detach(dev);
2283 iowrite32(PortPartialReset, ioaddr + SCBPort);
2285 /* XXX call pci_set_power_state ()? */
2286 pci_disable_device(pdev);
2287 pci_set_power_state (pdev, PCI_D3hot);
2288 return 0;
2291 static int eepro100_resume(struct pci_dev *pdev)
2293 struct net_device *dev = pci_get_drvdata (pdev);
2294 struct speedo_private *sp = netdev_priv(dev);
2295 void __iomem *ioaddr = sp->regs;
2297 pci_set_power_state(pdev, PCI_D0);
2298 pci_restore_state(pdev);
2299 pci_enable_device(pdev);
2300 pci_set_master(pdev);
2302 if (!netif_running(dev))
2303 return 0;
2305 /* I'm absolutely uncertain if this part of code may work.
2306 The problems are:
2307 - correct hardware reinitialization;
2308 - correct driver behavior between different steps of the
2309 reinitialization;
2310 - serialization with other driver calls.
2311 2000/03/08 SAW */
2312 iowrite16(SCBMaskAll, ioaddr + SCBCmd);
2313 speedo_resume(dev);
2314 netif_device_attach(dev);
2315 sp->rx_mode = -1;
2316 sp->flow_ctrl = sp->partner = 0;
2317 set_rx_mode(dev);
2318 sp->timer.expires = RUN_AT(2*HZ);
2319 add_timer(&sp->timer);
2320 return 0;
2322 #endif /* CONFIG_PM */
2324 static void __devexit eepro100_remove_one (struct pci_dev *pdev)
2326 struct net_device *dev = pci_get_drvdata (pdev);
2327 struct speedo_private *sp = netdev_priv(dev);
2329 unregister_netdev(dev);
2331 release_region(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
2332 release_mem_region(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
2334 pci_iounmap(pdev, sp->regs);
2335 pci_free_consistent(pdev, TX_RING_SIZE * sizeof(struct TxFD)
2336 + sizeof(struct speedo_stats),
2337 sp->tx_ring, sp->tx_ring_dma);
2338 pci_disable_device(pdev);
2339 free_netdev(dev);
2342 static struct pci_device_id eepro100_pci_tbl[] = {
2343 { PCI_VENDOR_ID_INTEL, 0x1229, PCI_ANY_ID, PCI_ANY_ID, },
2344 { PCI_VENDOR_ID_INTEL, 0x1209, PCI_ANY_ID, PCI_ANY_ID, },
2345 { PCI_VENDOR_ID_INTEL, 0x1029, PCI_ANY_ID, PCI_ANY_ID, },
2346 { PCI_VENDOR_ID_INTEL, 0x1030, PCI_ANY_ID, PCI_ANY_ID, },
2347 { PCI_VENDOR_ID_INTEL, 0x1031, PCI_ANY_ID, PCI_ANY_ID, },
2348 { PCI_VENDOR_ID_INTEL, 0x1032, PCI_ANY_ID, PCI_ANY_ID, },
2349 { PCI_VENDOR_ID_INTEL, 0x1033, PCI_ANY_ID, PCI_ANY_ID, },
2350 { PCI_VENDOR_ID_INTEL, 0x1034, PCI_ANY_ID, PCI_ANY_ID, },
2351 { PCI_VENDOR_ID_INTEL, 0x1035, PCI_ANY_ID, PCI_ANY_ID, },
2352 { PCI_VENDOR_ID_INTEL, 0x1036, PCI_ANY_ID, PCI_ANY_ID, },
2353 { PCI_VENDOR_ID_INTEL, 0x1037, PCI_ANY_ID, PCI_ANY_ID, },
2354 { PCI_VENDOR_ID_INTEL, 0x1038, PCI_ANY_ID, PCI_ANY_ID, },
2355 { PCI_VENDOR_ID_INTEL, 0x1039, PCI_ANY_ID, PCI_ANY_ID, },
2356 { PCI_VENDOR_ID_INTEL, 0x103A, PCI_ANY_ID, PCI_ANY_ID, },
2357 { PCI_VENDOR_ID_INTEL, 0x103B, PCI_ANY_ID, PCI_ANY_ID, },
2358 { PCI_VENDOR_ID_INTEL, 0x103C, PCI_ANY_ID, PCI_ANY_ID, },
2359 { PCI_VENDOR_ID_INTEL, 0x103D, PCI_ANY_ID, PCI_ANY_ID, },
2360 { PCI_VENDOR_ID_INTEL, 0x103E, PCI_ANY_ID, PCI_ANY_ID, },
2361 { PCI_VENDOR_ID_INTEL, 0x1050, PCI_ANY_ID, PCI_ANY_ID, },
2362 { PCI_VENDOR_ID_INTEL, 0x1059, PCI_ANY_ID, PCI_ANY_ID, },
2363 { PCI_VENDOR_ID_INTEL, 0x1227, PCI_ANY_ID, PCI_ANY_ID, },
2364 { PCI_VENDOR_ID_INTEL, 0x2449, PCI_ANY_ID, PCI_ANY_ID, },
2365 { PCI_VENDOR_ID_INTEL, 0x2459, PCI_ANY_ID, PCI_ANY_ID, },
2366 { PCI_VENDOR_ID_INTEL, 0x245D, PCI_ANY_ID, PCI_ANY_ID, },
2367 { PCI_VENDOR_ID_INTEL, 0x5200, PCI_ANY_ID, PCI_ANY_ID, },
2368 { PCI_VENDOR_ID_INTEL, 0x5201, PCI_ANY_ID, PCI_ANY_ID, },
2369 { 0,}
2371 MODULE_DEVICE_TABLE(pci, eepro100_pci_tbl);
2373 static struct pci_driver eepro100_driver = {
2374 .name = "eepro100",
2375 .id_table = eepro100_pci_tbl,
2376 .probe = eepro100_init_one,
2377 .remove = __devexit_p(eepro100_remove_one),
2378 #ifdef CONFIG_PM
2379 .suspend = eepro100_suspend,
2380 .resume = eepro100_resume,
2381 #endif /* CONFIG_PM */
2384 static int __init eepro100_init_module(void)
2386 #ifdef MODULE
2387 printk(version);
2388 #endif
2389 return pci_module_init(&eepro100_driver);
2392 static void __exit eepro100_cleanup_module(void)
2394 pci_unregister_driver(&eepro100_driver);
2397 module_init(eepro100_init_module);
2398 module_exit(eepro100_cleanup_module);
2401 * Local variables:
2402 * compile-command: "gcc -DMODULE -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -c eepro100.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS`"
2403 * c-indent-level: 4
2404 * c-basic-offset: 4
2405 * tab-width: 4
2406 * End: