Merge with Linux 2.5.74.
[linux-2.6/linux-mips.git] / drivers / net / eepro100.c
blobdc3745149bf42849ba75cf6d2c259570d8639fc8
1 /* drivers/net/eepro100.c: An Intel i82557-559 Ethernet driver for Linux. */
2 /*
3 Written 1996-1999 by Donald Becker.
5 The driver also contains updates by different kernel developers
6 (see incomplete list below).
7 Current maintainer is Andrey V. Savochkin <saw@saw.sw.com.sg>.
8 Please use this email address and linux-kernel mailing list for bug reports.
10 This software may be used and distributed according to the terms
11 of the GNU General Public License, incorporated herein by reference.
13 This driver is for the Intel EtherExpress Pro100 (Speedo3) design.
14 It should work with all i82557/558/559 boards.
16 Version history:
17 1998 Apr - 2000 Feb Andrey V. Savochkin <saw@saw.sw.com.sg>
18 Serious fixes for multicast filter list setting, TX timeout routine;
19 RX ring refilling logic; other stuff
20 2000 Feb Jeff Garzik <jgarzik@pobox.com>
21 Convert to new PCI driver interface
22 2000 Mar 24 Dragan Stancevic <visitor@valinux.com>
23 Disabled FC and ER, to avoid lockups when when we get FCP interrupts.
24 2000 Jul 17 Goutham Rao <goutham.rao@intel.com>
25 PCI DMA API fixes, adding pci_dma_sync_single calls where neccesary
26 2000 Aug 31 David Mosberger <davidm@hpl.hp.com>
27 rx_align support: enables rx DMA without causing unaligned accesses.
30 static const char *version =
31 "eepro100.c:v1.09j-t 9/29/99 Donald Becker http://www.scyld.com/network/eepro100.html\n"
32 "eepro100.c: $Revision: 1.36 $ 2000/11/17 Modified by Andrey V. Savochkin <saw@saw.sw.com.sg> and others\n";
34 /* A few user-configurable values that apply to all boards.
35 First set is undocumented and spelled per Intel recommendations. */
37 static int congenb /* = 0 */; /* Enable congestion control in the DP83840. */
38 static int txfifo = 8; /* Tx FIFO threshold in 4 byte units, 0-15 */
39 static int rxfifo = 8; /* Rx FIFO threshold, default 32 bytes. */
40 /* Tx/Rx DMA burst length, 0-127, 0 == no preemption, tx==128 -> disabled. */
41 static int txdmacount = 128;
42 static int rxdmacount /* = 0 */;
44 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__) || defined(__mips__) || \
45 defined(__arm__)
46 /* align rx buffers to 2 bytes so that IP header is aligned */
47 # define rx_align(skb) skb_reserve((skb), 2)
48 # define RxFD_ALIGNMENT __attribute__ ((aligned (2), packed))
49 #else
50 # define rx_align(skb)
51 # define RxFD_ALIGNMENT
52 #endif
54 /* Set the copy breakpoint for the copy-only-tiny-buffer Rx method.
55 Lower values use more memory, but are faster. */
56 static int rx_copybreak = 200;
58 /* Maximum events (Rx packets, etc.) to handle at each interrupt. */
59 static int max_interrupt_work = 20;
61 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast) */
62 static int multicast_filter_limit = 64;
64 /* 'options' is used to pass a transceiver override or full-duplex flag
65 e.g. "options=16" for FD, "options=32" for 100mbps-only. */
66 static int full_duplex[] = {-1, -1, -1, -1, -1, -1, -1, -1};
67 static int options[] = {-1, -1, -1, -1, -1, -1, -1, -1};
69 /* A few values that may be tweaked. */
70 /* The ring sizes should be a power of two for efficiency. */
71 #define TX_RING_SIZE 64
72 #define RX_RING_SIZE 64
73 /* How much slots multicast filter setup may take.
74 Do not descrease without changing set_rx_mode() implementaion. */
75 #define TX_MULTICAST_SIZE 2
76 #define TX_MULTICAST_RESERV (TX_MULTICAST_SIZE*2)
77 /* Actual number of TX packets queued, must be
78 <= TX_RING_SIZE-TX_MULTICAST_RESERV. */
79 #define TX_QUEUE_LIMIT (TX_RING_SIZE-TX_MULTICAST_RESERV)
80 /* Hysteresis marking queue as no longer full. */
81 #define TX_QUEUE_UNFULL (TX_QUEUE_LIMIT-4)
83 /* Operational parameters that usually are not changed. */
85 /* Time in jiffies before concluding the transmitter is hung. */
86 #define TX_TIMEOUT (2*HZ)
87 /* Size of an pre-allocated Rx buffer: <Ethernet MTU> + slack.*/
88 #define PKT_BUF_SZ 1536
90 #if !defined(__OPTIMIZE__) || !defined(__KERNEL__)
91 #warning You must compile this file with the correct options!
92 #warning See the last lines of the source file.
93 #error You must compile this driver with "-O".
94 #endif
96 #include <linux/config.h>
97 #include <linux/version.h>
98 #include <linux/module.h>
100 #include <linux/kernel.h>
101 #include <linux/string.h>
102 #include <linux/errno.h>
103 #include <linux/ioport.h>
104 #include <linux/slab.h>
105 #include <linux/interrupt.h>
106 #include <linux/timer.h>
107 #include <linux/pci.h>
108 #include <linux/spinlock.h>
109 #include <linux/init.h>
110 #include <linux/mii.h>
111 #include <linux/delay.h>
113 #include <asm/bitops.h>
114 #include <asm/io.h>
115 #include <asm/uaccess.h>
117 #include <linux/netdevice.h>
118 #include <linux/etherdevice.h>
119 #include <linux/rtnetlink.h>
120 #include <linux/skbuff.h>
121 #include <linux/ethtool.h>
122 #include <linux/mii.h>
124 /* enable PIO instead of MMIO, if CONFIG_EEPRO100_PIO is selected */
125 #ifdef CONFIG_EEPRO100_PIO
126 #define USE_IO 1
127 #endif
129 static int debug = -1;
130 #define DEBUG_DEFAULT (NETIF_MSG_DRV | \
131 NETIF_MSG_HW | \
132 NETIF_MSG_RX_ERR | \
133 NETIF_MSG_TX_ERR)
134 #define DEBUG ((debug >= 0) ? (1<<debug)-1 : DEBUG_DEFAULT)
137 MODULE_AUTHOR("Maintainer: Andrey V. Savochkin <saw@saw.sw.com.sg>");
138 MODULE_DESCRIPTION("Intel i82557/i82558/i82559 PCI EtherExpressPro driver");
139 MODULE_LICENSE("GPL");
140 MODULE_PARM(debug, "i");
141 MODULE_PARM(options, "1-" __MODULE_STRING(8) "i");
142 MODULE_PARM(full_duplex, "1-" __MODULE_STRING(8) "i");
143 MODULE_PARM(congenb, "i");
144 MODULE_PARM(txfifo, "i");
145 MODULE_PARM(rxfifo, "i");
146 MODULE_PARM(txdmacount, "i");
147 MODULE_PARM(rxdmacount, "i");
148 MODULE_PARM(rx_copybreak, "i");
149 MODULE_PARM(max_interrupt_work, "i");
150 MODULE_PARM(multicast_filter_limit, "i");
151 MODULE_PARM_DESC(debug, "debug level (0-6)");
152 MODULE_PARM_DESC(options, "Bits 0-3: transceiver type, bit 4: full duplex, bit 5: 100Mbps");
153 MODULE_PARM_DESC(full_duplex, "full duplex setting(s) (1)");
154 MODULE_PARM_DESC(congenb, "Enable congestion control (1)");
155 MODULE_PARM_DESC(txfifo, "Tx FIFO threshold in 4 byte units, (0-15)");
156 MODULE_PARM_DESC(rxfifo, "Rx FIFO threshold in 4 byte units, (0-15)");
157 MODULE_PARM_DESC(txdmaccount, "Tx DMA burst length; 128 - disable (0-128)");
158 MODULE_PARM_DESC(rxdmaccount, "Rx DMA burst length; 128 - disable (0-128)");
159 MODULE_PARM_DESC(rx_copybreak, "copy breakpoint for copy-only-tiny-frames");
160 MODULE_PARM_DESC(max_interrupt_work, "maximum events handled per interrupt");
161 MODULE_PARM_DESC(multicast_filter_limit, "maximum number of filtered multicast addresses");
163 #define RUN_AT(x) (jiffies + (x))
165 /* ACPI power states don't universally work (yet) */
166 #ifndef CONFIG_PM
167 #undef pci_set_power_state
168 #define pci_set_power_state null_set_power_state
169 static inline int null_set_power_state(struct pci_dev *dev, int state)
171 return 0;
173 #endif /* CONFIG_PM */
175 #define netdevice_start(dev)
176 #define netdevice_stop(dev)
177 #define netif_set_tx_timeout(dev, tf, tm) \
178 do { \
179 (dev)->tx_timeout = (tf); \
180 (dev)->watchdog_timeo = (tm); \
181 } while(0)
186 Theory of Operation
188 I. Board Compatibility
190 This device driver is designed for the Intel i82557 "Speedo3" chip, Intel's
191 single-chip fast Ethernet controller for PCI, as used on the Intel
192 EtherExpress Pro 100 adapter.
194 II. Board-specific settings
196 PCI bus devices are configured by the system at boot time, so no jumpers
197 need to be set on the board. The system BIOS should be set to assign the
198 PCI INTA signal to an otherwise unused system IRQ line. While it's
199 possible to share PCI interrupt lines, it negatively impacts performance and
200 only recent kernels support it.
202 III. Driver operation
204 IIIA. General
205 The Speedo3 is very similar to other Intel network chips, that is to say
206 "apparently designed on a different planet". This chips retains the complex
207 Rx and Tx descriptors and multiple buffers pointers as previous chips, but
208 also has simplified Tx and Rx buffer modes. This driver uses the "flexible"
209 Tx mode, but in a simplified lower-overhead manner: it associates only a
210 single buffer descriptor with each frame descriptor.
212 Despite the extra space overhead in each receive skbuff, the driver must use
213 the simplified Rx buffer mode to assure that only a single data buffer is
214 associated with each RxFD. The driver implements this by reserving space
215 for the Rx descriptor at the head of each Rx skbuff.
217 The Speedo-3 has receive and command unit base addresses that are added to
218 almost all descriptor pointers. The driver sets these to zero, so that all
219 pointer fields are absolute addresses.
221 The System Control Block (SCB) of some previous Intel chips exists on the
222 chip in both PCI I/O and memory space. This driver uses the I/O space
223 registers, but might switch to memory mapped mode to better support non-x86
224 processors.
226 IIIB. Transmit structure
228 The driver must use the complex Tx command+descriptor mode in order to
229 have a indirect pointer to the skbuff data section. Each Tx command block
230 (TxCB) is associated with two immediately appended Tx Buffer Descriptor
231 (TxBD). A fixed ring of these TxCB+TxBD pairs are kept as part of the
232 speedo_private data structure for each adapter instance.
234 The newer i82558 explicitly supports this structure, and can read the two
235 TxBDs in the same PCI burst as the TxCB.
237 This ring structure is used for all normal transmit packets, but the
238 transmit packet descriptors aren't long enough for most non-Tx commands such
239 as CmdConfigure. This is complicated by the possibility that the chip has
240 already loaded the link address in the previous descriptor. So for these
241 commands we convert the next free descriptor on the ring to a NoOp, and point
242 that descriptor's link to the complex command.
244 An additional complexity of these non-transmit commands are that they may be
245 added asynchronous to the normal transmit queue, so we disable interrupts
246 whenever the Tx descriptor ring is manipulated.
248 A notable aspect of these special configure commands is that they do
249 work with the normal Tx ring entry scavenge method. The Tx ring scavenge
250 is done at interrupt time using the 'dirty_tx' index, and checking for the
251 command-complete bit. While the setup frames may have the NoOp command on the
252 Tx ring marked as complete, but not have completed the setup command, this
253 is not a problem. The tx_ring entry can be still safely reused, as the
254 tx_skbuff[] entry is always empty for config_cmd and mc_setup frames.
256 Commands may have bits set e.g. CmdSuspend in the command word to either
257 suspend or stop the transmit/command unit. This driver always flags the last
258 command with CmdSuspend, erases the CmdSuspend in the previous command, and
259 then issues a CU_RESUME.
260 Note: Watch out for the potential race condition here: imagine
261 erasing the previous suspend
262 the chip processes the previous command
263 the chip processes the final command, and suspends
264 doing the CU_RESUME
265 the chip processes the next-yet-valid post-final-command.
266 So blindly sending a CU_RESUME is only safe if we do it immediately after
267 after erasing the previous CmdSuspend, without the possibility of an
268 intervening delay. Thus the resume command is always within the
269 interrupts-disabled region. This is a timing dependence, but handling this
270 condition in a timing-independent way would considerably complicate the code.
272 Note: In previous generation Intel chips, restarting the command unit was a
273 notoriously slow process. This is presumably no longer true.
275 IIIC. Receive structure
277 Because of the bus-master support on the Speedo3 this driver uses the new
278 SKBUFF_RX_COPYBREAK scheme, rather than a fixed intermediate receive buffer.
279 This scheme allocates full-sized skbuffs as receive buffers. The value
280 SKBUFF_RX_COPYBREAK is used as the copying breakpoint: it is chosen to
281 trade-off the memory wasted by passing the full-sized skbuff to the queue
282 layer for all frames vs. the copying cost of copying a frame to a
283 correctly-sized skbuff.
285 For small frames the copying cost is negligible (esp. considering that we
286 are pre-loading the cache with immediately useful header information), so we
287 allocate a new, minimally-sized skbuff. For large frames the copying cost
288 is non-trivial, and the larger copy might flush the cache of useful data, so
289 we pass up the skbuff the packet was received into.
291 IV. Notes
293 Thanks to Steve Williams of Intel for arranging the non-disclosure agreement
294 that stated that I could disclose the information. But I still resent
295 having to sign an Intel NDA when I'm helping Intel sell their own product!
299 static int speedo_found1(struct pci_dev *pdev, long ioaddr, int fnd_cnt, int acpi_idle_state);
301 enum pci_flags_bit {
302 PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
303 PCI_ADDR0=0x10<<0, PCI_ADDR1=0x10<<1, PCI_ADDR2=0x10<<2, PCI_ADDR3=0x10<<3,
306 static inline unsigned int io_inw(unsigned long port)
308 return inw(port);
310 static inline void io_outw(unsigned int val, unsigned long port)
312 outw(val, port);
315 #ifndef USE_IO
316 /* Currently alpha headers define in/out macros.
317 Undefine them. 2000/03/30 SAW */
318 #undef inb
319 #undef inw
320 #undef inl
321 #undef outb
322 #undef outw
323 #undef outl
324 #define inb readb
325 #define inw readw
326 #define inl readl
327 #define outb writeb
328 #define outw writew
329 #define outl writel
330 #endif
332 /* Offsets to the various registers.
333 All accesses need not be longword aligned. */
334 enum speedo_offsets {
335 SCBStatus = 0, SCBCmd = 2, /* Rx/Command Unit command and status. */
336 SCBIntmask = 3,
337 SCBPointer = 4, /* General purpose pointer. */
338 SCBPort = 8, /* Misc. commands and operands. */
339 SCBflash = 12, SCBeeprom = 14, /* EEPROM and flash memory control. */
340 SCBCtrlMDI = 16, /* MDI interface control. */
341 SCBEarlyRx = 20, /* Early receive byte count. */
343 /* Commands that can be put in a command list entry. */
344 enum commands {
345 CmdNOp = 0, CmdIASetup = 0x10000, CmdConfigure = 0x20000,
346 CmdMulticastList = 0x30000, CmdTx = 0x40000, CmdTDR = 0x50000,
347 CmdDump = 0x60000, CmdDiagnose = 0x70000,
348 CmdSuspend = 0x40000000, /* Suspend after completion. */
349 CmdIntr = 0x20000000, /* Interrupt after completion. */
350 CmdTxFlex = 0x00080000, /* Use "Flexible mode" for CmdTx command. */
352 /* Clear CmdSuspend (1<<30) avoiding interference with the card access to the
353 status bits. Previous driver versions used separate 16 bit fields for
354 commands and statuses. --SAW
356 #if defined(__alpha__)
357 # define clear_suspend(cmd) clear_bit(30, &(cmd)->cmd_status);
358 #else
359 # if defined(__LITTLE_ENDIAN)
360 # define clear_suspend(cmd) ((__u16 *)&(cmd)->cmd_status)[1] &= ~0x4000
361 # elif defined(__BIG_ENDIAN)
362 # define clear_suspend(cmd) ((__u16 *)&(cmd)->cmd_status)[1] &= ~0x0040
363 # else
364 # error Unsupported byteorder
365 # endif
366 #endif
368 enum SCBCmdBits {
369 SCBMaskCmdDone=0x8000, SCBMaskRxDone=0x4000, SCBMaskCmdIdle=0x2000,
370 SCBMaskRxSuspend=0x1000, SCBMaskEarlyRx=0x0800, SCBMaskFlowCtl=0x0400,
371 SCBTriggerIntr=0x0200, SCBMaskAll=0x0100,
372 /* The rest are Rx and Tx commands. */
373 CUStart=0x0010, CUResume=0x0020, CUStatsAddr=0x0040, CUShowStats=0x0050,
374 CUCmdBase=0x0060, /* CU Base address (set to zero) . */
375 CUDumpStats=0x0070, /* Dump then reset stats counters. */
376 RxStart=0x0001, RxResume=0x0002, RxAbort=0x0004, RxAddrLoad=0x0006,
377 RxResumeNoResources=0x0007,
380 enum SCBPort_cmds {
381 PortReset=0, PortSelfTest=1, PortPartialReset=2, PortDump=3,
384 /* The Speedo3 Rx and Tx frame/buffer descriptors. */
385 struct descriptor { /* A generic descriptor. */
386 volatile s32 cmd_status; /* All command and status fields. */
387 u32 link; /* struct descriptor * */
388 unsigned char params[0];
391 /* The Speedo3 Rx and Tx buffer descriptors. */
392 struct RxFD { /* Receive frame descriptor. */
393 volatile s32 status;
394 u32 link; /* struct RxFD * */
395 u32 rx_buf_addr; /* void * */
396 u32 count;
397 } RxFD_ALIGNMENT;
399 /* Selected elements of the Tx/RxFD.status word. */
400 enum RxFD_bits {
401 RxComplete=0x8000, RxOK=0x2000,
402 RxErrCRC=0x0800, RxErrAlign=0x0400, RxErrTooBig=0x0200, RxErrSymbol=0x0010,
403 RxEth2Type=0x0020, RxNoMatch=0x0004, RxNoIAMatch=0x0002,
404 TxUnderrun=0x1000, StatusComplete=0x8000,
407 #define CONFIG_DATA_SIZE 22
408 struct TxFD { /* Transmit frame descriptor set. */
409 s32 status;
410 u32 link; /* void * */
411 u32 tx_desc_addr; /* Always points to the tx_buf_addr element. */
412 s32 count; /* # of TBD (=1), Tx start thresh., etc. */
413 /* This constitutes two "TBD" entries -- we only use one. */
414 #define TX_DESCR_BUF_OFFSET 16
415 u32 tx_buf_addr0; /* void *, frame to be transmitted. */
416 s32 tx_buf_size0; /* Length of Tx frame. */
417 u32 tx_buf_addr1; /* void *, frame to be transmitted. */
418 s32 tx_buf_size1; /* Length of Tx frame. */
419 /* the structure must have space for at least CONFIG_DATA_SIZE starting
420 * from tx_desc_addr field */
423 /* Multicast filter setting block. --SAW */
424 struct speedo_mc_block {
425 struct speedo_mc_block *next;
426 unsigned int tx;
427 dma_addr_t frame_dma;
428 unsigned int len;
429 struct descriptor frame __attribute__ ((__aligned__(16)));
432 /* Elements of the dump_statistics block. This block must be lword aligned. */
433 struct speedo_stats {
434 u32 tx_good_frames;
435 u32 tx_coll16_errs;
436 u32 tx_late_colls;
437 u32 tx_underruns;
438 u32 tx_lost_carrier;
439 u32 tx_deferred;
440 u32 tx_one_colls;
441 u32 tx_multi_colls;
442 u32 tx_total_colls;
443 u32 rx_good_frames;
444 u32 rx_crc_errs;
445 u32 rx_align_errs;
446 u32 rx_resource_errs;
447 u32 rx_overrun_errs;
448 u32 rx_colls_errs;
449 u32 rx_runt_errs;
450 u32 done_marker;
453 enum Rx_ring_state_bits {
454 RrNoMem=1, RrPostponed=2, RrNoResources=4, RrOOMReported=8,
457 /* Do not change the position (alignment) of the first few elements!
458 The later elements are grouped for cache locality.
460 Unfortunately, all the positions have been shifted since there.
461 A new re-alignment is required. 2000/03/06 SAW */
462 struct speedo_private {
463 struct TxFD *tx_ring; /* Commands (usually CmdTxPacket). */
464 struct RxFD *rx_ringp[RX_RING_SIZE]; /* Rx descriptor, used as ring. */
465 /* The addresses of a Tx/Rx-in-place packets/buffers. */
466 struct sk_buff *tx_skbuff[TX_RING_SIZE];
467 struct sk_buff *rx_skbuff[RX_RING_SIZE];
468 /* Mapped addresses of the rings. */
469 dma_addr_t tx_ring_dma;
470 #define TX_RING_ELEM_DMA(sp, n) ((sp)->tx_ring_dma + (n)*sizeof(struct TxFD))
471 dma_addr_t rx_ring_dma[RX_RING_SIZE];
472 struct descriptor *last_cmd; /* Last command sent. */
473 unsigned int cur_tx, dirty_tx; /* The ring entries to be free()ed. */
474 spinlock_t lock; /* Group with Tx control cache line. */
475 u32 tx_threshold; /* The value for txdesc.count. */
476 struct RxFD *last_rxf; /* Last filled RX buffer. */
477 dma_addr_t last_rxf_dma;
478 unsigned int cur_rx, dirty_rx; /* The next free ring entry */
479 long last_rx_time; /* Last Rx, in jiffies, to handle Rx hang. */
480 struct net_device_stats stats;
481 struct speedo_stats *lstats;
482 dma_addr_t lstats_dma;
483 int chip_id;
484 struct pci_dev *pdev;
485 struct timer_list timer; /* Media selection timer. */
486 struct speedo_mc_block *mc_setup_head; /* Multicast setup frame list head. */
487 struct speedo_mc_block *mc_setup_tail; /* Multicast setup frame list tail. */
488 long in_interrupt; /* Word-aligned dev->interrupt */
489 unsigned char acpi_pwr;
490 signed char rx_mode; /* Current PROMISC/ALLMULTI setting. */
491 unsigned int tx_full:1; /* The Tx queue is full. */
492 unsigned int flow_ctrl:1; /* Use 802.3x flow control. */
493 unsigned int rx_bug:1; /* Work around receiver hang errata. */
494 unsigned char default_port:8; /* Last dev->if_port value. */
495 unsigned char rx_ring_state; /* RX ring status flags. */
496 unsigned short phy[2]; /* PHY media interfaces available. */
497 unsigned short partner; /* Link partner caps. */
498 struct mii_if_info mii_if; /* MII API hooks, info */
499 u32 msg_enable; /* debug message level */
500 #ifdef CONFIG_PM
501 u32 pm_state[16];
502 #endif
505 /* The parameters for a CmdConfigure operation.
506 There are so many options that it would be difficult to document each bit.
507 We mostly use the default or recommended settings. */
508 static const char i82557_config_cmd[CONFIG_DATA_SIZE] = {
509 22, 0x08, 0, 0, 0, 0, 0x32, 0x03, 1, /* 1=Use MII 0=Use AUI */
510 0, 0x2E, 0, 0x60, 0,
511 0xf2, 0x48, 0, 0x40, 0xf2, 0x80, /* 0x40=Force full-duplex */
512 0x3f, 0x05, };
513 static const char i82558_config_cmd[CONFIG_DATA_SIZE] = {
514 22, 0x08, 0, 1, 0, 0, 0x22, 0x03, 1, /* 1=Use MII 0=Use AUI */
515 0, 0x2E, 0, 0x60, 0x08, 0x88,
516 0x68, 0, 0x40, 0xf2, 0x84, /* Disable FC */
517 0x31, 0x05, };
519 /* PHY media interface chips. */
520 static const char *phys[] = {
521 "None", "i82553-A/B", "i82553-C", "i82503",
522 "DP83840", "80c240", "80c24", "i82555",
523 "unknown-8", "unknown-9", "DP83840A", "unknown-11",
524 "unknown-12", "unknown-13", "unknown-14", "unknown-15", };
525 enum phy_chips { NonSuchPhy=0, I82553AB, I82553C, I82503, DP83840, S80C240,
526 S80C24, I82555, DP83840A=10, };
527 static const char is_mii[] = { 0, 1, 1, 0, 1, 1, 0, 1 };
528 #define EE_READ_CMD (6)
530 static int eepro100_init_one(struct pci_dev *pdev,
531 const struct pci_device_id *ent);
533 static int do_eeprom_cmd(long ioaddr, int cmd, int cmd_len);
534 static int mdio_read(struct net_device *dev, int phy_id, int location);
535 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
536 static int speedo_open(struct net_device *dev);
537 static void speedo_resume(struct net_device *dev);
538 static void speedo_timer(unsigned long data);
539 static void speedo_init_rx_ring(struct net_device *dev);
540 static void speedo_tx_timeout(struct net_device *dev);
541 static int speedo_start_xmit(struct sk_buff *skb, struct net_device *dev);
542 static void speedo_refill_rx_buffers(struct net_device *dev, int force);
543 static int speedo_rx(struct net_device *dev);
544 static void speedo_tx_buffer_gc(struct net_device *dev);
545 static irqreturn_t speedo_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
546 static int speedo_close(struct net_device *dev);
547 static struct net_device_stats *speedo_get_stats(struct net_device *dev);
548 static int speedo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
549 static void set_rx_mode(struct net_device *dev);
550 static void speedo_show_state(struct net_device *dev);
554 #ifdef honor_default_port
555 /* Optional driver feature to allow forcing the transceiver setting.
556 Not recommended. */
557 static int mii_ctrl[8] = { 0x3300, 0x3100, 0x0000, 0x0100,
558 0x2000, 0x2100, 0x0400, 0x3100};
559 #endif
561 /* How to wait for the command unit to accept a command.
562 Typically this takes 0 ticks. */
563 static inline unsigned char wait_for_cmd_done(struct net_device *dev)
565 int wait = 1000;
566 long cmd_ioaddr = dev->base_addr + SCBCmd;
567 unsigned char r;
569 do {
570 udelay(1);
571 r = inb(cmd_ioaddr);
572 } while(r && --wait >= 0);
574 if (wait < 0)
575 printk(KERN_ALERT "%s: wait_for_cmd_done timeout!\n", dev->name);
576 return r;
579 static int __devinit eepro100_init_one (struct pci_dev *pdev,
580 const struct pci_device_id *ent)
582 unsigned long ioaddr;
583 int irq;
584 int acpi_idle_state = 0, pm;
585 static int cards_found /* = 0 */;
587 #ifndef MODULE
588 /* when built-in, we only print version if device is found */
589 static int did_version;
590 if (did_version++ == 0)
591 printk(version);
592 #endif
594 /* save power state before pci_enable_device overwrites it */
595 pm = pci_find_capability(pdev, PCI_CAP_ID_PM);
596 if (pm) {
597 u16 pwr_command;
598 pci_read_config_word(pdev, pm + PCI_PM_CTRL, &pwr_command);
599 acpi_idle_state = pwr_command & PCI_PM_CTRL_STATE_MASK;
602 if (pci_enable_device(pdev))
603 goto err_out_free_mmio_region;
605 pci_set_master(pdev);
607 if (!request_region(pci_resource_start(pdev, 1),
608 pci_resource_len(pdev, 1), "eepro100")) {
609 printk (KERN_ERR "eepro100: cannot reserve I/O ports\n");
610 goto err_out_none;
612 if (!request_mem_region(pci_resource_start(pdev, 0),
613 pci_resource_len(pdev, 0), "eepro100")) {
614 printk (KERN_ERR "eepro100: cannot reserve MMIO region\n");
615 goto err_out_free_pio_region;
618 irq = pdev->irq;
619 #ifdef USE_IO
620 ioaddr = pci_resource_start(pdev, 1);
621 if (DEBUG & NETIF_MSG_PROBE)
622 printk("Found Intel i82557 PCI Speedo at I/O %#lx, IRQ %d.\n",
623 ioaddr, irq);
624 #else
625 ioaddr = (unsigned long)ioremap(pci_resource_start(pdev, 0),
626 pci_resource_len(pdev, 0));
627 if (!ioaddr) {
628 printk (KERN_ERR "eepro100: cannot remap MMIO region %lx @ %lx\n",
629 pci_resource_len(pdev, 0), pci_resource_start(pdev, 0));
630 goto err_out_free_mmio_region;
632 if (DEBUG & NETIF_MSG_PROBE)
633 printk("Found Intel i82557 PCI Speedo, MMIO at %#lx, IRQ %d.\n",
634 pci_resource_start(pdev, 0), irq);
635 #endif
638 if (speedo_found1(pdev, ioaddr, cards_found, acpi_idle_state) == 0)
639 cards_found++;
640 else
641 goto err_out_iounmap;
643 return 0;
645 err_out_iounmap: ;
646 #ifndef USE_IO
647 iounmap ((void *)ioaddr);
648 #endif
649 err_out_free_mmio_region:
650 release_mem_region(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
651 err_out_free_pio_region:
652 release_region(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
653 err_out_none:
654 return -ENODEV;
657 static int __devinit speedo_found1(struct pci_dev *pdev,
658 long ioaddr, int card_idx, int acpi_idle_state)
660 struct net_device *dev;
661 struct speedo_private *sp;
662 const char *product;
663 int i, option;
664 u16 eeprom[0x100];
665 int size;
666 void *tx_ring_space;
667 dma_addr_t tx_ring_dma;
669 size = TX_RING_SIZE * sizeof(struct TxFD) + sizeof(struct speedo_stats);
670 tx_ring_space = pci_alloc_consistent(pdev, size, &tx_ring_dma);
671 if (tx_ring_space == NULL)
672 return -1;
674 dev = alloc_etherdev(sizeof(struct speedo_private));
675 if (dev == NULL) {
676 printk(KERN_ERR "eepro100: Could not allocate ethernet device.\n");
677 pci_free_consistent(pdev, size, tx_ring_space, tx_ring_dma);
678 return -1;
681 SET_MODULE_OWNER(dev);
682 SET_NETDEV_DEV(dev, &pdev->dev);
684 if (dev->mem_start > 0)
685 option = dev->mem_start;
686 else if (card_idx >= 0 && options[card_idx] >= 0)
687 option = options[card_idx];
688 else
689 option = 0;
691 rtnl_lock();
692 if (dev_alloc_name(dev, dev->name) < 0)
693 goto err_free_unlock;
695 /* Read the station address EEPROM before doing the reset.
696 Nominally his should even be done before accepting the device, but
697 then we wouldn't have a device name with which to report the error.
698 The size test is for 6 bit vs. 8 bit address serial EEPROMs.
701 unsigned long iobase;
702 int read_cmd, ee_size;
703 u16 sum;
704 int j;
706 /* Use IO only to avoid postponed writes and satisfy EEPROM timing
707 requirements. */
708 iobase = pci_resource_start(pdev, 1);
709 if ((do_eeprom_cmd(iobase, EE_READ_CMD << 24, 27) & 0xffe0000)
710 == 0xffe0000) {
711 ee_size = 0x100;
712 read_cmd = EE_READ_CMD << 24;
713 } else {
714 ee_size = 0x40;
715 read_cmd = EE_READ_CMD << 22;
718 for (j = 0, i = 0, sum = 0; i < ee_size; i++) {
719 u16 value = do_eeprom_cmd(iobase, read_cmd | (i << 16), 27);
720 eeprom[i] = value;
721 sum += value;
722 if (i < 3) {
723 dev->dev_addr[j++] = value;
724 dev->dev_addr[j++] = value >> 8;
727 if (sum != 0xBABA)
728 printk(KERN_WARNING "%s: Invalid EEPROM checksum %#4.4x, "
729 "check settings before activating this device!\n",
730 dev->name, sum);
731 /* Don't unregister_netdev(dev); as the EEPro may actually be
732 usable, especially if the MAC address is set later.
733 On the other hand, it may be unusable if MDI data is corrupted. */
736 /* Reset the chip: stop Tx and Rx processes and clear counters.
737 This takes less than 10usec and will easily finish before the next
738 action. */
739 outl(PortReset, ioaddr + SCBPort);
740 inl(ioaddr + SCBPort);
741 udelay(10);
743 if (eeprom[3] & 0x0100)
744 product = "OEM i82557/i82558 10/100 Ethernet";
745 else
746 product = pdev->dev.name;
748 printk(KERN_INFO "%s: %s, ", dev->name, product);
750 for (i = 0; i < 5; i++)
751 printk("%2.2X:", dev->dev_addr[i]);
752 printk("%2.2X, ", dev->dev_addr[i]);
753 #ifdef USE_IO
754 printk("I/O at %#3lx, ", ioaddr);
755 #endif
756 printk("IRQ %d.\n", pdev->irq);
758 /* we must initialize base_addr early, for mdio_{read,write} */
759 dev->base_addr = ioaddr;
761 #if 1 || defined(kernel_bloat)
762 /* OK, this is pure kernel bloat. I don't like it when other drivers
763 waste non-pageable kernel space to emit similar messages, but I need
764 them for bug reports. */
766 const char *connectors[] = {" RJ45", " BNC", " AUI", " MII"};
767 /* The self-test results must be paragraph aligned. */
768 volatile s32 *self_test_results;
769 int boguscnt = 16000; /* Timeout for set-test. */
770 if ((eeprom[3] & 0x03) != 0x03)
771 printk(KERN_INFO " Receiver lock-up bug exists -- enabling"
772 " work-around.\n");
773 printk(KERN_INFO " Board assembly %4.4x%2.2x-%3.3d, Physical"
774 " connectors present:",
775 eeprom[8], eeprom[9]>>8, eeprom[9] & 0xff);
776 for (i = 0; i < 4; i++)
777 if (eeprom[5] & (1<<i))
778 printk(connectors[i]);
779 printk("\n"KERN_INFO" Primary interface chip %s PHY #%d.\n",
780 phys[(eeprom[6]>>8)&15], eeprom[6] & 0x1f);
781 if (eeprom[7] & 0x0700)
782 printk(KERN_INFO " Secondary interface chip %s.\n",
783 phys[(eeprom[7]>>8)&7]);
784 if (((eeprom[6]>>8) & 0x3f) == DP83840
785 || ((eeprom[6]>>8) & 0x3f) == DP83840A) {
786 int mdi_reg23 = mdio_read(dev, eeprom[6] & 0x1f, 23) | 0x0422;
787 if (congenb)
788 mdi_reg23 |= 0x0100;
789 printk(KERN_INFO" DP83840 specific setup, setting register 23 to %4.4x.\n",
790 mdi_reg23);
791 mdio_write(dev, eeprom[6] & 0x1f, 23, mdi_reg23);
793 if ((option >= 0) && (option & 0x70)) {
794 printk(KERN_INFO " Forcing %dMbs %s-duplex operation.\n",
795 (option & 0x20 ? 100 : 10),
796 (option & 0x10 ? "full" : "half"));
797 mdio_write(dev, eeprom[6] & 0x1f, MII_BMCR,
798 ((option & 0x20) ? 0x2000 : 0) | /* 100mbps? */
799 ((option & 0x10) ? 0x0100 : 0)); /* Full duplex? */
802 /* Perform a system self-test. */
803 self_test_results = (s32*) ((((long) tx_ring_space) + 15) & ~0xf);
804 self_test_results[0] = 0;
805 self_test_results[1] = -1;
806 outl(tx_ring_dma | PortSelfTest, ioaddr + SCBPort);
807 do {
808 udelay(10);
809 } while (self_test_results[1] == -1 && --boguscnt >= 0);
811 if (boguscnt < 0) { /* Test optimized out. */
812 printk(KERN_ERR "Self test failed, status %8.8x:\n"
813 KERN_ERR " Failure to initialize the i82557.\n"
814 KERN_ERR " Verify that the card is a bus-master"
815 " capable slot.\n",
816 self_test_results[1]);
817 } else
818 printk(KERN_INFO " General self-test: %s.\n"
819 KERN_INFO " Serial sub-system self-test: %s.\n"
820 KERN_INFO " Internal registers self-test: %s.\n"
821 KERN_INFO " ROM checksum self-test: %s (%#8.8x).\n",
822 self_test_results[1] & 0x1000 ? "failed" : "passed",
823 self_test_results[1] & 0x0020 ? "failed" : "passed",
824 self_test_results[1] & 0x0008 ? "failed" : "passed",
825 self_test_results[1] & 0x0004 ? "failed" : "passed",
826 self_test_results[0]);
828 #endif /* kernel_bloat */
830 outl(PortReset, ioaddr + SCBPort);
831 inl(ioaddr + SCBPort);
832 udelay(10);
834 /* Return the chip to its original power state. */
835 pci_set_power_state(pdev, acpi_idle_state);
837 pci_set_drvdata (pdev, dev);
838 SET_NETDEV_DEV(dev, &pdev->dev);
840 dev->irq = pdev->irq;
842 sp = dev->priv;
843 sp->pdev = pdev;
844 sp->msg_enable = DEBUG;
845 sp->acpi_pwr = acpi_idle_state;
846 sp->tx_ring = tx_ring_space;
847 sp->tx_ring_dma = tx_ring_dma;
848 sp->lstats = (struct speedo_stats *)(sp->tx_ring + TX_RING_SIZE);
849 sp->lstats_dma = TX_RING_ELEM_DMA(sp, TX_RING_SIZE);
850 init_timer(&sp->timer); /* used in ioctl() */
851 spin_lock_init(&sp->lock);
853 sp->mii_if.full_duplex = option >= 0 && (option & 0x10) ? 1 : 0;
854 if (card_idx >= 0) {
855 if (full_duplex[card_idx] >= 0)
856 sp->mii_if.full_duplex = full_duplex[card_idx];
858 sp->default_port = option >= 0 ? (option & 0x0f) : 0;
860 sp->phy[0] = eeprom[6];
861 sp->phy[1] = eeprom[7];
863 sp->mii_if.phy_id = eeprom[6] & 0x1f;
864 sp->mii_if.phy_id_mask = 0x1f;
865 sp->mii_if.reg_num_mask = 0x1f;
866 sp->mii_if.dev = dev;
867 sp->mii_if.mdio_read = mdio_read;
868 sp->mii_if.mdio_write = mdio_write;
870 sp->rx_bug = (eeprom[3] & 0x03) == 3 ? 0 : 1;
871 if (((pdev->device > 0x1030 && (pdev->device < 0x103F)))
872 || (pdev->device == 0x2449) || (pdev->device == 0x2459)
873 || (pdev->device == 0x245D)) {
874 sp->chip_id = 1;
877 if (sp->rx_bug)
878 printk(KERN_INFO " Receiver lock-up workaround activated.\n");
880 /* The Speedo-specific entries in the device structure. */
881 dev->open = &speedo_open;
882 dev->hard_start_xmit = &speedo_start_xmit;
883 netif_set_tx_timeout(dev, &speedo_tx_timeout, TX_TIMEOUT);
884 dev->stop = &speedo_close;
885 dev->get_stats = &speedo_get_stats;
886 dev->set_multicast_list = &set_rx_mode;
887 dev->do_ioctl = &speedo_ioctl;
889 if (register_netdevice(dev))
890 goto err_free_unlock;
891 rtnl_unlock();
893 return 0;
895 err_free_unlock:
896 rtnl_unlock();
897 kfree(dev);
898 return -1;
901 static void do_slow_command(struct net_device *dev, int cmd)
903 long cmd_ioaddr = dev->base_addr + SCBCmd;
904 int wait = 0;
906 if (inb(cmd_ioaddr) == 0) break;
907 while(++wait <= 200);
908 if (wait > 100)
909 printk(KERN_ERR "Command %4.4x never accepted (%d polls)!\n",
910 inb(cmd_ioaddr), wait);
912 outb(cmd, cmd_ioaddr);
914 for (wait = 0; wait <= 100; wait++)
915 if (inb(cmd_ioaddr) == 0) return;
916 for (; wait <= 20000; wait++)
917 if (inb(cmd_ioaddr) == 0) return;
918 else udelay(1);
919 printk(KERN_ERR "Command %4.4x was not accepted after %d polls!"
920 " Current status %8.8x.\n",
921 cmd, wait, inl(dev->base_addr + SCBStatus));
924 /* Serial EEPROM section.
925 A "bit" grungy, but we work our way through bit-by-bit :->. */
926 /* EEPROM_Ctrl bits. */
927 #define EE_SHIFT_CLK 0x01 /* EEPROM shift clock. */
928 #define EE_CS 0x02 /* EEPROM chip select. */
929 #define EE_DATA_WRITE 0x04 /* EEPROM chip data in. */
930 #define EE_DATA_READ 0x08 /* EEPROM chip data out. */
931 #define EE_ENB (0x4800 | EE_CS)
932 #define EE_WRITE_0 0x4802
933 #define EE_WRITE_1 0x4806
934 #define EE_OFFSET SCBeeprom
936 /* The fixes for the code were kindly provided by Dragan Stancevic
937 <visitor@valinux.com> to strictly follow Intel specifications of EEPROM
938 access timing.
939 The publicly available sheet 64486302 (sec. 3.1) specifies 1us access
940 interval for serial EEPROM. However, it looks like that there is an
941 additional requirement dictating larger udelay's in the code below.
942 2000/05/24 SAW */
943 static int __devinit do_eeprom_cmd(long ioaddr, int cmd, int cmd_len)
945 unsigned retval = 0;
946 long ee_addr = ioaddr + SCBeeprom;
948 io_outw(EE_ENB, ee_addr); udelay(2);
949 io_outw(EE_ENB | EE_SHIFT_CLK, ee_addr); udelay(2);
951 /* Shift the command bits out. */
952 do {
953 short dataval = (cmd & (1 << cmd_len)) ? EE_WRITE_1 : EE_WRITE_0;
954 io_outw(dataval, ee_addr); udelay(2);
955 io_outw(dataval | EE_SHIFT_CLK, ee_addr); udelay(2);
956 retval = (retval << 1) | ((io_inw(ee_addr) & EE_DATA_READ) ? 1 : 0);
957 } while (--cmd_len >= 0);
958 io_outw(EE_ENB, ee_addr); udelay(2);
960 /* Terminate the EEPROM access. */
961 io_outw(EE_ENB & ~EE_CS, ee_addr);
962 return retval;
965 static int mdio_read(struct net_device *dev, int phy_id, int location)
967 long ioaddr = dev->base_addr;
968 int val, boguscnt = 64*10; /* <64 usec. to complete, typ 27 ticks */
969 outl(0x08000000 | (location<<16) | (phy_id<<21), ioaddr + SCBCtrlMDI);
970 do {
971 val = inl(ioaddr + SCBCtrlMDI);
972 if (--boguscnt < 0) {
973 printk(KERN_ERR " mdio_read() timed out with val = %8.8x.\n", val);
974 break;
976 } while (! (val & 0x10000000));
977 return val & 0xffff;
980 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
982 long ioaddr = dev->base_addr;
983 int val, boguscnt = 64*10; /* <64 usec. to complete, typ 27 ticks */
984 outl(0x04000000 | (location<<16) | (phy_id<<21) | value,
985 ioaddr + SCBCtrlMDI);
986 do {
987 val = inl(ioaddr + SCBCtrlMDI);
988 if (--boguscnt < 0) {
989 printk(KERN_ERR" mdio_write() timed out with val = %8.8x.\n", val);
990 break;
992 } while (! (val & 0x10000000));
995 static int
996 speedo_open(struct net_device *dev)
998 struct speedo_private *sp = (struct speedo_private *)dev->priv;
999 long ioaddr = dev->base_addr;
1000 int retval;
1002 if (netif_msg_ifup(sp))
1003 printk(KERN_DEBUG "%s: speedo_open() irq %d.\n", dev->name, dev->irq);
1005 pci_set_power_state(sp->pdev, 0);
1007 /* Set up the Tx queue early.. */
1008 sp->cur_tx = 0;
1009 sp->dirty_tx = 0;
1010 sp->last_cmd = 0;
1011 sp->tx_full = 0;
1012 sp->in_interrupt = 0;
1014 /* .. we can safely take handler calls during init. */
1015 retval = request_irq(dev->irq, &speedo_interrupt, SA_SHIRQ, dev->name, dev);
1016 if (retval) {
1017 return retval;
1020 dev->if_port = sp->default_port;
1022 #ifdef oh_no_you_dont_unless_you_honour_the_options_passed_in_to_us
1023 /* Retrigger negotiation to reset previous errors. */
1024 if ((sp->phy[0] & 0x8000) == 0) {
1025 int phy_addr = sp->phy[0] & 0x1f ;
1026 /* Use 0x3300 for restarting NWay, other values to force xcvr:
1027 0x0000 10-HD
1028 0x0100 10-FD
1029 0x2000 100-HD
1030 0x2100 100-FD
1032 #ifdef honor_default_port
1033 mdio_write(dev, phy_addr, MII_BMCR, mii_ctrl[dev->default_port & 7]);
1034 #else
1035 mdio_write(dev, phy_addr, MII_BMCR, 0x3300);
1036 #endif
1038 #endif
1040 speedo_init_rx_ring(dev);
1042 /* Fire up the hardware. */
1043 outw(SCBMaskAll, ioaddr + SCBCmd);
1044 speedo_resume(dev);
1046 netdevice_start(dev);
1047 netif_start_queue(dev);
1049 /* Setup the chip and configure the multicast list. */
1050 sp->mc_setup_head = NULL;
1051 sp->mc_setup_tail = NULL;
1052 sp->flow_ctrl = sp->partner = 0;
1053 sp->rx_mode = -1; /* Invalid -> always reset the mode. */
1054 set_rx_mode(dev);
1055 if ((sp->phy[0] & 0x8000) == 0)
1056 sp->mii_if.advertising = mdio_read(dev, sp->phy[0] & 0x1f, MII_ADVERTISE);
1058 mii_check_link(&sp->mii_if);
1060 if (netif_msg_ifup(sp)) {
1061 printk(KERN_DEBUG "%s: Done speedo_open(), status %8.8x.\n",
1062 dev->name, inw(ioaddr + SCBStatus));
1065 /* Set the timer. The timer serves a dual purpose:
1066 1) to monitor the media interface (e.g. link beat) and perhaps switch
1067 to an alternate media type
1068 2) to monitor Rx activity, and restart the Rx process if the receiver
1069 hangs. */
1070 sp->timer.expires = RUN_AT((24*HZ)/10); /* 2.4 sec. */
1071 sp->timer.data = (unsigned long)dev;
1072 sp->timer.function = &speedo_timer; /* timer handler */
1073 add_timer(&sp->timer);
1075 /* No need to wait for the command unit to accept here. */
1076 if ((sp->phy[0] & 0x8000) == 0)
1077 mdio_read(dev, sp->phy[0] & 0x1f, MII_BMCR);
1079 return 0;
1082 /* Start the chip hardware after a full reset. */
1083 static void speedo_resume(struct net_device *dev)
1085 struct speedo_private *sp = dev->priv;
1086 long ioaddr = dev->base_addr;
1088 /* Start with a Tx threshold of 256 (0x..20.... 8 byte units). */
1089 sp->tx_threshold = 0x01208000;
1091 /* Set the segment registers to '0'. */
1092 if (wait_for_cmd_done(dev) != 0) {
1093 outl(PortPartialReset, ioaddr + SCBPort);
1094 udelay(10);
1097 outl(0, ioaddr + SCBPointer);
1098 inl(ioaddr + SCBPointer); /* Flush to PCI. */
1099 udelay(10); /* Bogus, but it avoids the bug. */
1101 /* Note: these next two operations can take a while. */
1102 do_slow_command(dev, RxAddrLoad);
1103 do_slow_command(dev, CUCmdBase);
1105 /* Load the statistics block and rx ring addresses. */
1106 outl(sp->lstats_dma, ioaddr + SCBPointer);
1107 inl(ioaddr + SCBPointer); /* Flush to PCI */
1109 outb(CUStatsAddr, ioaddr + SCBCmd);
1110 sp->lstats->done_marker = 0;
1111 wait_for_cmd_done(dev);
1113 if (sp->rx_ringp[sp->cur_rx % RX_RING_SIZE] == NULL) {
1114 if (netif_msg_rx_err(sp))
1115 printk(KERN_DEBUG "%s: NULL cur_rx in speedo_resume().\n",
1116 dev->name);
1117 } else {
1118 outl(sp->rx_ring_dma[sp->cur_rx % RX_RING_SIZE],
1119 ioaddr + SCBPointer);
1120 inl(ioaddr + SCBPointer); /* Flush to PCI */
1123 /* Note: RxStart should complete instantly. */
1124 do_slow_command(dev, RxStart);
1125 do_slow_command(dev, CUDumpStats);
1127 /* Fill the first command with our physical address. */
1129 struct descriptor *ias_cmd;
1131 ias_cmd =
1132 (struct descriptor *)&sp->tx_ring[sp->cur_tx++ % TX_RING_SIZE];
1133 /* Avoid a bug(?!) here by marking the command already completed. */
1134 ias_cmd->cmd_status = cpu_to_le32((CmdSuspend | CmdIASetup) | 0xa000);
1135 ias_cmd->link =
1136 cpu_to_le32(TX_RING_ELEM_DMA(sp, sp->cur_tx % TX_RING_SIZE));
1137 memcpy(ias_cmd->params, dev->dev_addr, 6);
1138 if (sp->last_cmd)
1139 clear_suspend(sp->last_cmd);
1140 sp->last_cmd = ias_cmd;
1143 /* Start the chip's Tx process and unmask interrupts. */
1144 outl(TX_RING_ELEM_DMA(sp, sp->dirty_tx % TX_RING_SIZE),
1145 ioaddr + SCBPointer);
1146 /* We are not ACK-ing FCP and ER in the interrupt handler yet so they should
1147 remain masked --Dragan */
1148 outw(CUStart | SCBMaskEarlyRx | SCBMaskFlowCtl, ioaddr + SCBCmd);
1152 * Sometimes the receiver stops making progress. This routine knows how to
1153 * get it going again, without losing packets or being otherwise nasty like
1154 * a chip reset would be. Previously the driver had a whole sequence
1155 * of if RxSuspended, if it's no buffers do one thing, if it's no resources,
1156 * do another, etc. But those things don't really matter. Separate logic
1157 * in the ISR provides for allocating buffers--the other half of operation
1158 * is just making sure the receiver is active. speedo_rx_soft_reset does that.
1159 * This problem with the old, more involved algorithm is shown up under
1160 * ping floods on the order of 60K packets/second on a 100Mbps fdx network.
1162 static void
1163 speedo_rx_soft_reset(struct net_device *dev)
1165 struct speedo_private *sp = dev->priv;
1166 struct RxFD *rfd;
1167 long ioaddr;
1169 ioaddr = dev->base_addr;
1170 if (wait_for_cmd_done(dev) != 0) {
1171 printk("%s: previous command stalled\n", dev->name);
1172 return;
1175 * Put the hardware into a known state.
1177 outb(RxAbort, ioaddr + SCBCmd);
1179 rfd = sp->rx_ringp[sp->cur_rx % RX_RING_SIZE];
1181 rfd->rx_buf_addr = 0xffffffff;
1183 if (wait_for_cmd_done(dev) != 0) {
1184 printk("%s: RxAbort command stalled\n", dev->name);
1185 return;
1187 outl(sp->rx_ring_dma[sp->cur_rx % RX_RING_SIZE],
1188 ioaddr + SCBPointer);
1189 outb(RxStart, ioaddr + SCBCmd);
1193 /* Media monitoring and control. */
1194 static void speedo_timer(unsigned long data)
1196 struct net_device *dev = (struct net_device *)data;
1197 struct speedo_private *sp = (struct speedo_private *)dev->priv;
1198 long ioaddr = dev->base_addr;
1199 int phy_num = sp->phy[0] & 0x1f;
1201 /* We have MII and lost link beat. */
1202 if ((sp->phy[0] & 0x8000) == 0) {
1203 int partner = mdio_read(dev, phy_num, MII_LPA);
1204 if (partner != sp->partner) {
1205 int flow_ctrl = sp->mii_if.advertising & partner & 0x0400 ? 1 : 0;
1206 if (netif_msg_link(sp)) {
1207 printk(KERN_DEBUG "%s: Link status change.\n", dev->name);
1208 printk(KERN_DEBUG "%s: Old partner %x, new %x, adv %x.\n",
1209 dev->name, sp->partner, partner, sp->mii_if.advertising);
1211 sp->partner = partner;
1212 if (flow_ctrl != sp->flow_ctrl) {
1213 sp->flow_ctrl = flow_ctrl;
1214 sp->rx_mode = -1; /* Trigger a reload. */
1218 mii_check_link(&sp->mii_if);
1219 if (netif_msg_timer(sp)) {
1220 printk(KERN_DEBUG "%s: Media control tick, status %4.4x.\n",
1221 dev->name, inw(ioaddr + SCBStatus));
1223 if (sp->rx_mode < 0 ||
1224 (sp->rx_bug && jiffies - sp->last_rx_time > 2*HZ)) {
1225 /* We haven't received a packet in a Long Time. We might have been
1226 bitten by the receiver hang bug. This can be cleared by sending
1227 a set multicast list command. */
1228 if (netif_msg_timer(sp))
1229 printk(KERN_DEBUG "%s: Sending a multicast list set command"
1230 " from a timer routine,"
1231 " m=%d, j=%ld, l=%ld.\n",
1232 dev->name, sp->rx_mode, jiffies, sp->last_rx_time);
1233 set_rx_mode(dev);
1235 /* We must continue to monitor the media. */
1236 sp->timer.expires = RUN_AT(2*HZ); /* 2.0 sec. */
1237 add_timer(&sp->timer);
1240 static void speedo_show_state(struct net_device *dev)
1242 struct speedo_private *sp = (struct speedo_private *)dev->priv;
1243 int i;
1245 if (netif_msg_pktdata(sp)) {
1246 printk(KERN_DEBUG "%s: Tx ring dump, Tx queue %u / %u:\n",
1247 dev->name, sp->cur_tx, sp->dirty_tx);
1248 for (i = 0; i < TX_RING_SIZE; i++)
1249 printk(KERN_DEBUG "%s: %c%c%2d %8.8x.\n", dev->name,
1250 i == sp->dirty_tx % TX_RING_SIZE ? '*' : ' ',
1251 i == sp->cur_tx % TX_RING_SIZE ? '=' : ' ',
1252 i, sp->tx_ring[i].status);
1254 printk(KERN_DEBUG "%s: Printing Rx ring"
1255 " (next to receive into %u, dirty index %u).\n",
1256 dev->name, sp->cur_rx, sp->dirty_rx);
1257 for (i = 0; i < RX_RING_SIZE; i++)
1258 printk(KERN_DEBUG "%s: %c%c%c%2d %8.8x.\n", dev->name,
1259 sp->rx_ringp[i] == sp->last_rxf ? 'l' : ' ',
1260 i == sp->dirty_rx % RX_RING_SIZE ? '*' : ' ',
1261 i == sp->cur_rx % RX_RING_SIZE ? '=' : ' ',
1262 i, (sp->rx_ringp[i] != NULL) ?
1263 (unsigned)sp->rx_ringp[i]->status : 0);
1266 #if 0
1268 long ioaddr = dev->base_addr;
1269 int phy_num = sp->phy[0] & 0x1f;
1270 for (i = 0; i < 16; i++) {
1271 /* FIXME: what does it mean? --SAW */
1272 if (i == 6) i = 21;
1273 printk(KERN_DEBUG "%s: PHY index %d register %d is %4.4x.\n",
1274 dev->name, phy_num, i, mdio_read(dev, phy_num, i));
1277 #endif
1281 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1282 static void
1283 speedo_init_rx_ring(struct net_device *dev)
1285 struct speedo_private *sp = (struct speedo_private *)dev->priv;
1286 struct RxFD *rxf, *last_rxf = NULL;
1287 dma_addr_t last_rxf_dma = 0 /* to shut up the compiler */;
1288 int i;
1290 sp->cur_rx = 0;
1292 for (i = 0; i < RX_RING_SIZE; i++) {
1293 struct sk_buff *skb;
1294 skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD));
1295 /* XXX: do we really want to call this before the NULL check? --hch */
1296 rx_align(skb); /* Align IP on 16 byte boundary */
1297 sp->rx_skbuff[i] = skb;
1298 if (skb == NULL)
1299 break; /* OK. Just initially short of Rx bufs. */
1300 skb->dev = dev; /* Mark as being used by this device. */
1301 rxf = (struct RxFD *)skb->tail;
1302 sp->rx_ringp[i] = rxf;
1303 sp->rx_ring_dma[i] =
1304 pci_map_single(sp->pdev, rxf,
1305 PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_BIDIRECTIONAL);
1306 skb_reserve(skb, sizeof(struct RxFD));
1307 if (last_rxf) {
1308 last_rxf->link = cpu_to_le32(sp->rx_ring_dma[i]);
1309 pci_dma_sync_single(sp->pdev, last_rxf_dma,
1310 sizeof(struct RxFD), PCI_DMA_TODEVICE);
1312 last_rxf = rxf;
1313 last_rxf_dma = sp->rx_ring_dma[i];
1314 rxf->status = cpu_to_le32(0x00000001); /* '1' is flag value only. */
1315 rxf->link = 0; /* None yet. */
1316 /* This field unused by i82557. */
1317 rxf->rx_buf_addr = 0xffffffff;
1318 rxf->count = cpu_to_le32(PKT_BUF_SZ << 16);
1319 pci_dma_sync_single(sp->pdev, sp->rx_ring_dma[i],
1320 sizeof(struct RxFD), PCI_DMA_TODEVICE);
1322 sp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1323 /* Mark the last entry as end-of-list. */
1324 last_rxf->status = cpu_to_le32(0xC0000002); /* '2' is flag value only. */
1325 pci_dma_sync_single(sp->pdev, sp->rx_ring_dma[RX_RING_SIZE-1],
1326 sizeof(struct RxFD), PCI_DMA_TODEVICE);
1327 sp->last_rxf = last_rxf;
1328 sp->last_rxf_dma = last_rxf_dma;
1331 static void speedo_purge_tx(struct net_device *dev)
1333 struct speedo_private *sp = (struct speedo_private *)dev->priv;
1334 int entry;
1336 while ((int)(sp->cur_tx - sp->dirty_tx) > 0) {
1337 entry = sp->dirty_tx % TX_RING_SIZE;
1338 if (sp->tx_skbuff[entry]) {
1339 sp->stats.tx_errors++;
1340 pci_unmap_single(sp->pdev,
1341 le32_to_cpu(sp->tx_ring[entry].tx_buf_addr0),
1342 sp->tx_skbuff[entry]->len, PCI_DMA_TODEVICE);
1343 dev_kfree_skb_irq(sp->tx_skbuff[entry]);
1344 sp->tx_skbuff[entry] = 0;
1346 sp->dirty_tx++;
1348 while (sp->mc_setup_head != NULL) {
1349 struct speedo_mc_block *t;
1350 if (netif_msg_tx_err(sp))
1351 printk(KERN_DEBUG "%s: freeing mc frame.\n", dev->name);
1352 pci_unmap_single(sp->pdev, sp->mc_setup_head->frame_dma,
1353 sp->mc_setup_head->len, PCI_DMA_TODEVICE);
1354 t = sp->mc_setup_head->next;
1355 kfree(sp->mc_setup_head);
1356 sp->mc_setup_head = t;
1358 sp->mc_setup_tail = NULL;
1359 sp->tx_full = 0;
1360 netif_wake_queue(dev);
1363 static void reset_mii(struct net_device *dev)
1365 struct speedo_private *sp = (struct speedo_private *)dev->priv;
1367 /* Reset the MII transceiver, suggested by Fred Young @ scalable.com. */
1368 if ((sp->phy[0] & 0x8000) == 0) {
1369 int phy_addr = sp->phy[0] & 0x1f;
1370 int advertising = mdio_read(dev, phy_addr, MII_ADVERTISE);
1371 int mii_bmcr = mdio_read(dev, phy_addr, MII_BMCR);
1372 mdio_write(dev, phy_addr, MII_BMCR, 0x0400);
1373 mdio_write(dev, phy_addr, MII_BMSR, 0x0000);
1374 mdio_write(dev, phy_addr, MII_ADVERTISE, 0x0000);
1375 mdio_write(dev, phy_addr, MII_BMCR, 0x8000);
1376 #ifdef honor_default_port
1377 mdio_write(dev, phy_addr, MII_BMCR, mii_ctrl[dev->default_port & 7]);
1378 #else
1379 mdio_read(dev, phy_addr, MII_BMCR);
1380 mdio_write(dev, phy_addr, MII_BMCR, mii_bmcr);
1381 mdio_write(dev, phy_addr, MII_ADVERTISE, advertising);
1382 #endif
1386 static void speedo_tx_timeout(struct net_device *dev)
1388 struct speedo_private *sp = (struct speedo_private *)dev->priv;
1389 long ioaddr = dev->base_addr;
1390 int status = inw(ioaddr + SCBStatus);
1391 unsigned long flags;
1393 if (netif_msg_tx_err(sp)) {
1394 printk(KERN_WARNING "%s: Transmit timed out: status %4.4x "
1395 " %4.4x at %d/%d command %8.8x.\n",
1396 dev->name, status, inw(ioaddr + SCBCmd),
1397 sp->dirty_tx, sp->cur_tx,
1398 sp->tx_ring[sp->dirty_tx % TX_RING_SIZE].status);
1401 speedo_show_state(dev);
1402 #if 0
1403 if ((status & 0x00C0) != 0x0080
1404 && (status & 0x003C) == 0x0010) {
1405 /* Only the command unit has stopped. */
1406 printk(KERN_WARNING "%s: Trying to restart the transmitter...\n",
1407 dev->name);
1408 outl(TX_RING_ELEM_DMA(sp, dirty_tx % TX_RING_SIZE]),
1409 ioaddr + SCBPointer);
1410 outw(CUStart, ioaddr + SCBCmd);
1411 reset_mii(dev);
1412 } else {
1413 #else
1415 #endif
1416 del_timer_sync(&sp->timer);
1417 /* Reset the Tx and Rx units. */
1418 outl(PortReset, ioaddr + SCBPort);
1419 /* We may get spurious interrupts here. But I don't think that they
1420 may do much harm. 1999/12/09 SAW */
1421 udelay(10);
1422 /* Disable interrupts. */
1423 outw(SCBMaskAll, ioaddr + SCBCmd);
1424 synchronize_irq(dev->irq);
1425 speedo_tx_buffer_gc(dev);
1426 /* Free as much as possible.
1427 It helps to recover from a hang because of out-of-memory.
1428 It also simplifies speedo_resume() in case TX ring is full or
1429 close-to-be full. */
1430 speedo_purge_tx(dev);
1431 speedo_refill_rx_buffers(dev, 1);
1432 spin_lock_irqsave(&sp->lock, flags);
1433 speedo_resume(dev);
1434 sp->rx_mode = -1;
1435 dev->trans_start = jiffies;
1436 spin_unlock_irqrestore(&sp->lock, flags);
1437 set_rx_mode(dev); /* it takes the spinlock itself --SAW */
1438 /* Reset MII transceiver. Do it before starting the timer to serialize
1439 mdio_xxx operations. Yes, it's a paranoya :-) 2000/05/09 SAW */
1440 reset_mii(dev);
1441 sp->timer.expires = RUN_AT(2*HZ);
1442 add_timer(&sp->timer);
1444 return;
1447 static int
1448 speedo_start_xmit(struct sk_buff *skb, struct net_device *dev)
1450 struct speedo_private *sp = (struct speedo_private *)dev->priv;
1451 long ioaddr = dev->base_addr;
1452 int entry;
1454 /* Prevent interrupts from changing the Tx ring from underneath us. */
1455 unsigned long flags;
1457 spin_lock_irqsave(&sp->lock, flags);
1459 /* Check if there are enough space. */
1460 if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
1461 printk(KERN_ERR "%s: incorrect tbusy state, fixed.\n", dev->name);
1462 netif_stop_queue(dev);
1463 sp->tx_full = 1;
1464 spin_unlock_irqrestore(&sp->lock, flags);
1465 return 1;
1468 /* Calculate the Tx descriptor entry. */
1469 entry = sp->cur_tx++ % TX_RING_SIZE;
1471 sp->tx_skbuff[entry] = skb;
1472 sp->tx_ring[entry].status =
1473 cpu_to_le32(CmdSuspend | CmdTx | CmdTxFlex);
1474 if (!(entry & ((TX_RING_SIZE>>2)-1)))
1475 sp->tx_ring[entry].status |= cpu_to_le32(CmdIntr);
1476 sp->tx_ring[entry].link =
1477 cpu_to_le32(TX_RING_ELEM_DMA(sp, sp->cur_tx % TX_RING_SIZE));
1478 sp->tx_ring[entry].tx_desc_addr =
1479 cpu_to_le32(TX_RING_ELEM_DMA(sp, entry) + TX_DESCR_BUF_OFFSET);
1480 /* The data region is always in one buffer descriptor. */
1481 sp->tx_ring[entry].count = cpu_to_le32(sp->tx_threshold);
1482 sp->tx_ring[entry].tx_buf_addr0 =
1483 cpu_to_le32(pci_map_single(sp->pdev, skb->data,
1484 skb->len, PCI_DMA_TODEVICE));
1485 sp->tx_ring[entry].tx_buf_size0 = cpu_to_le32(skb->len);
1487 /* workaround for hardware bug on 10 mbit half duplex */
1489 if ((sp->partner == 0) && (sp->chip_id == 1)) {
1490 wait_for_cmd_done(dev);
1491 outb(0 , ioaddr + SCBCmd);
1492 udelay(1);
1495 /* Trigger the command unit resume. */
1496 wait_for_cmd_done(dev);
1497 clear_suspend(sp->last_cmd);
1498 /* We want the time window between clearing suspend flag on the previous
1499 command and resuming CU to be as small as possible.
1500 Interrupts in between are very undesired. --SAW */
1501 outb(CUResume, ioaddr + SCBCmd);
1502 sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
1504 /* Leave room for set_rx_mode(). If there is no more space than reserved
1505 for multicast filter mark the ring as full. */
1506 if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
1507 netif_stop_queue(dev);
1508 sp->tx_full = 1;
1511 spin_unlock_irqrestore(&sp->lock, flags);
1513 dev->trans_start = jiffies;
1515 return 0;
1518 static void speedo_tx_buffer_gc(struct net_device *dev)
1520 unsigned int dirty_tx;
1521 struct speedo_private *sp = (struct speedo_private *)dev->priv;
1523 dirty_tx = sp->dirty_tx;
1524 while ((int)(sp->cur_tx - dirty_tx) > 0) {
1525 int entry = dirty_tx % TX_RING_SIZE;
1526 int status = le32_to_cpu(sp->tx_ring[entry].status);
1528 if (netif_msg_tx_done(sp))
1529 printk(KERN_DEBUG " scavenge candidate %d status %4.4x.\n",
1530 entry, status);
1531 if ((status & StatusComplete) == 0)
1532 break; /* It still hasn't been processed. */
1533 if (status & TxUnderrun)
1534 if (sp->tx_threshold < 0x01e08000) {
1535 if (netif_msg_tx_err(sp))
1536 printk(KERN_DEBUG "%s: TX underrun, threshold adjusted.\n",
1537 dev->name);
1538 sp->tx_threshold += 0x00040000;
1540 /* Free the original skb. */
1541 if (sp->tx_skbuff[entry]) {
1542 sp->stats.tx_packets++; /* Count only user packets. */
1543 sp->stats.tx_bytes += sp->tx_skbuff[entry]->len;
1544 pci_unmap_single(sp->pdev,
1545 le32_to_cpu(sp->tx_ring[entry].tx_buf_addr0),
1546 sp->tx_skbuff[entry]->len, PCI_DMA_TODEVICE);
1547 dev_kfree_skb_irq(sp->tx_skbuff[entry]);
1548 sp->tx_skbuff[entry] = 0;
1550 dirty_tx++;
1553 if (netif_msg_tx_err(sp) && (int)(sp->cur_tx - dirty_tx) > TX_RING_SIZE) {
1554 printk(KERN_ERR "out-of-sync dirty pointer, %d vs. %d,"
1555 " full=%d.\n",
1556 dirty_tx, sp->cur_tx, sp->tx_full);
1557 dirty_tx += TX_RING_SIZE;
1560 while (sp->mc_setup_head != NULL
1561 && (int)(dirty_tx - sp->mc_setup_head->tx - 1) > 0) {
1562 struct speedo_mc_block *t;
1563 if (netif_msg_tx_err(sp))
1564 printk(KERN_DEBUG "%s: freeing mc frame.\n", dev->name);
1565 pci_unmap_single(sp->pdev, sp->mc_setup_head->frame_dma,
1566 sp->mc_setup_head->len, PCI_DMA_TODEVICE);
1567 t = sp->mc_setup_head->next;
1568 kfree(sp->mc_setup_head);
1569 sp->mc_setup_head = t;
1571 if (sp->mc_setup_head == NULL)
1572 sp->mc_setup_tail = NULL;
1574 sp->dirty_tx = dirty_tx;
1577 /* The interrupt handler does all of the Rx thread work and cleans up
1578 after the Tx thread. */
1579 static irqreturn_t speedo_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
1581 struct net_device *dev = (struct net_device *)dev_instance;
1582 struct speedo_private *sp;
1583 long ioaddr, boguscnt = max_interrupt_work;
1584 unsigned short status;
1585 unsigned int handled = 0;
1587 ioaddr = dev->base_addr;
1588 sp = (struct speedo_private *)dev->priv;
1590 #ifndef final_version
1591 /* A lock to prevent simultaneous entry on SMP machines. */
1592 if (test_and_set_bit(0, (void*)&sp->in_interrupt)) {
1593 printk(KERN_ERR"%s: SMP simultaneous entry of an interrupt handler.\n",
1594 dev->name);
1595 sp->in_interrupt = 0; /* Avoid halting machine. */
1596 return IRQ_NONE;
1598 #endif
1600 do {
1601 status = inw(ioaddr + SCBStatus);
1602 /* Acknowledge all of the current interrupt sources ASAP. */
1603 /* Will change from 0xfc00 to 0xff00 when we start handling
1604 FCP and ER interrupts --Dragan */
1605 outw(status & 0xfc00, ioaddr + SCBStatus);
1607 if (netif_msg_intr(sp))
1608 printk(KERN_DEBUG "%s: interrupt status=%#4.4x.\n",
1609 dev->name, status);
1611 if ((status & 0xfc00) == 0)
1612 break;
1613 handled = 1;
1616 if ((status & 0x5000) || /* Packet received, or Rx error. */
1617 (sp->rx_ring_state&(RrNoMem|RrPostponed)) == RrPostponed)
1618 /* Need to gather the postponed packet. */
1619 speedo_rx(dev);
1621 /* Always check if all rx buffers are allocated. --SAW */
1622 speedo_refill_rx_buffers(dev, 0);
1624 spin_lock(&sp->lock);
1626 * The chip may have suspended reception for various reasons.
1627 * Check for that, and re-prime it should this be the case.
1629 switch ((status >> 2) & 0xf) {
1630 case 0: /* Idle */
1631 break;
1632 case 1: /* Suspended */
1633 case 2: /* No resources (RxFDs) */
1634 case 9: /* Suspended with no more RBDs */
1635 case 10: /* No resources due to no RBDs */
1636 case 12: /* Ready with no RBDs */
1637 speedo_rx_soft_reset(dev);
1638 break;
1639 case 3: case 5: case 6: case 7: case 8:
1640 case 11: case 13: case 14: case 15:
1641 /* these are all reserved values */
1642 break;
1646 /* User interrupt, Command/Tx unit interrupt or CU not active. */
1647 if (status & 0xA400) {
1648 speedo_tx_buffer_gc(dev);
1649 if (sp->tx_full
1650 && (int)(sp->cur_tx - sp->dirty_tx) < TX_QUEUE_UNFULL) {
1651 /* The ring is no longer full. */
1652 sp->tx_full = 0;
1653 netif_wake_queue(dev); /* Attention: under a spinlock. --SAW */
1657 spin_unlock(&sp->lock);
1659 if (--boguscnt < 0) {
1660 printk(KERN_ERR "%s: Too much work at interrupt, status=0x%4.4x.\n",
1661 dev->name, status);
1662 /* Clear all interrupt sources. */
1663 /* Will change from 0xfc00 to 0xff00 when we start handling
1664 FCP and ER interrupts --Dragan */
1665 outw(0xfc00, ioaddr + SCBStatus);
1666 break;
1668 } while (1);
1670 if (netif_msg_intr(sp))
1671 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1672 dev->name, inw(ioaddr + SCBStatus));
1674 clear_bit(0, (void*)&sp->in_interrupt);
1675 return IRQ_RETVAL(handled);
1678 static inline struct RxFD *speedo_rx_alloc(struct net_device *dev, int entry)
1680 struct speedo_private *sp = (struct speedo_private *)dev->priv;
1681 struct RxFD *rxf;
1682 struct sk_buff *skb;
1683 /* Get a fresh skbuff to replace the consumed one. */
1684 skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD));
1685 /* XXX: do we really want to call this before the NULL check? --hch */
1686 rx_align(skb); /* Align IP on 16 byte boundary */
1687 sp->rx_skbuff[entry] = skb;
1688 if (skb == NULL) {
1689 sp->rx_ringp[entry] = NULL;
1690 return NULL;
1692 rxf = sp->rx_ringp[entry] = (struct RxFD *)skb->tail;
1693 sp->rx_ring_dma[entry] =
1694 pci_map_single(sp->pdev, rxf,
1695 PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_FROMDEVICE);
1696 skb->dev = dev;
1697 skb_reserve(skb, sizeof(struct RxFD));
1698 rxf->rx_buf_addr = 0xffffffff;
1699 pci_dma_sync_single(sp->pdev, sp->rx_ring_dma[entry],
1700 sizeof(struct RxFD), PCI_DMA_TODEVICE);
1701 return rxf;
1704 static inline void speedo_rx_link(struct net_device *dev, int entry,
1705 struct RxFD *rxf, dma_addr_t rxf_dma)
1707 struct speedo_private *sp = (struct speedo_private *)dev->priv;
1708 rxf->status = cpu_to_le32(0xC0000001); /* '1' for driver use only. */
1709 rxf->link = 0; /* None yet. */
1710 rxf->count = cpu_to_le32(PKT_BUF_SZ << 16);
1711 sp->last_rxf->link = cpu_to_le32(rxf_dma);
1712 sp->last_rxf->status &= cpu_to_le32(~0xC0000000);
1713 pci_dma_sync_single(sp->pdev, sp->last_rxf_dma,
1714 sizeof(struct RxFD), PCI_DMA_TODEVICE);
1715 sp->last_rxf = rxf;
1716 sp->last_rxf_dma = rxf_dma;
1719 static int speedo_refill_rx_buf(struct net_device *dev, int force)
1721 struct speedo_private *sp = (struct speedo_private *)dev->priv;
1722 int entry;
1723 struct RxFD *rxf;
1725 entry = sp->dirty_rx % RX_RING_SIZE;
1726 if (sp->rx_skbuff[entry] == NULL) {
1727 rxf = speedo_rx_alloc(dev, entry);
1728 if (rxf == NULL) {
1729 unsigned int forw;
1730 int forw_entry;
1731 if (netif_msg_rx_err(sp) || !(sp->rx_ring_state & RrOOMReported)) {
1732 printk(KERN_WARNING "%s: can't fill rx buffer (force %d)!\n",
1733 dev->name, force);
1734 sp->rx_ring_state |= RrOOMReported;
1736 speedo_show_state(dev);
1737 if (!force)
1738 return -1; /* Better luck next time! */
1739 /* Borrow an skb from one of next entries. */
1740 for (forw = sp->dirty_rx + 1; forw != sp->cur_rx; forw++)
1741 if (sp->rx_skbuff[forw % RX_RING_SIZE] != NULL)
1742 break;
1743 if (forw == sp->cur_rx)
1744 return -1;
1745 forw_entry = forw % RX_RING_SIZE;
1746 sp->rx_skbuff[entry] = sp->rx_skbuff[forw_entry];
1747 sp->rx_skbuff[forw_entry] = NULL;
1748 rxf = sp->rx_ringp[forw_entry];
1749 sp->rx_ringp[forw_entry] = NULL;
1750 sp->rx_ringp[entry] = rxf;
1752 } else {
1753 rxf = sp->rx_ringp[entry];
1755 speedo_rx_link(dev, entry, rxf, sp->rx_ring_dma[entry]);
1756 sp->dirty_rx++;
1757 sp->rx_ring_state &= ~(RrNoMem|RrOOMReported); /* Mark the progress. */
1758 return 0;
1761 static void speedo_refill_rx_buffers(struct net_device *dev, int force)
1763 struct speedo_private *sp = (struct speedo_private *)dev->priv;
1765 /* Refill the RX ring. */
1766 while ((int)(sp->cur_rx - sp->dirty_rx) > 0 &&
1767 speedo_refill_rx_buf(dev, force) != -1);
1770 static int
1771 speedo_rx(struct net_device *dev)
1773 struct speedo_private *sp = (struct speedo_private *)dev->priv;
1774 int entry = sp->cur_rx % RX_RING_SIZE;
1775 int rx_work_limit = sp->dirty_rx + RX_RING_SIZE - sp->cur_rx;
1776 int alloc_ok = 1;
1777 int npkts = 0;
1779 if (netif_msg_intr(sp))
1780 printk(KERN_DEBUG " In speedo_rx().\n");
1781 /* If we own the next entry, it's a new packet. Send it up. */
1782 while (sp->rx_ringp[entry] != NULL) {
1783 int status;
1784 int pkt_len;
1786 pci_dma_sync_single(sp->pdev, sp->rx_ring_dma[entry],
1787 sizeof(struct RxFD), PCI_DMA_FROMDEVICE);
1788 status = le32_to_cpu(sp->rx_ringp[entry]->status);
1789 pkt_len = le32_to_cpu(sp->rx_ringp[entry]->count) & 0x3fff;
1791 if (!(status & RxComplete))
1792 break;
1794 if (--rx_work_limit < 0)
1795 break;
1797 /* Check for a rare out-of-memory case: the current buffer is
1798 the last buffer allocated in the RX ring. --SAW */
1799 if (sp->last_rxf == sp->rx_ringp[entry]) {
1800 /* Postpone the packet. It'll be reaped at an interrupt when this
1801 packet is no longer the last packet in the ring. */
1802 if (netif_msg_rx_err(sp))
1803 printk(KERN_DEBUG "%s: RX packet postponed!\n",
1804 dev->name);
1805 sp->rx_ring_state |= RrPostponed;
1806 break;
1809 if (netif_msg_rx_status(sp))
1810 printk(KERN_DEBUG " speedo_rx() status %8.8x len %d.\n", status,
1811 pkt_len);
1812 if ((status & (RxErrTooBig|RxOK|0x0f90)) != RxOK) {
1813 if (status & RxErrTooBig)
1814 printk(KERN_ERR "%s: Ethernet frame overran the Rx buffer, "
1815 "status %8.8x!\n", dev->name, status);
1816 else if (! (status & RxOK)) {
1817 /* There was a fatal error. This *should* be impossible. */
1818 sp->stats.rx_errors++;
1819 printk(KERN_ERR "%s: Anomalous event in speedo_rx(), "
1820 "status %8.8x.\n",
1821 dev->name, status);
1823 } else {
1824 struct sk_buff *skb;
1826 /* Check if the packet is long enough to just accept without
1827 copying to a properly sized skbuff. */
1828 if (pkt_len < rx_copybreak
1829 && (skb = dev_alloc_skb(pkt_len + 2)) != 0) {
1830 skb->dev = dev;
1831 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1832 /* 'skb_put()' points to the start of sk_buff data area. */
1833 pci_dma_sync_single(sp->pdev, sp->rx_ring_dma[entry],
1834 sizeof(struct RxFD) + pkt_len, PCI_DMA_FROMDEVICE);
1836 #if 1 || USE_IP_CSUM
1837 /* Packet is in one chunk -- we can copy + cksum. */
1838 eth_copy_and_sum(skb, sp->rx_skbuff[entry]->tail, pkt_len, 0);
1839 skb_put(skb, pkt_len);
1840 #else
1841 memcpy(skb_put(skb, pkt_len), sp->rx_skbuff[entry]->tail,
1842 pkt_len);
1843 #endif
1844 npkts++;
1845 } else {
1846 /* Pass up the already-filled skbuff. */
1847 skb = sp->rx_skbuff[entry];
1848 if (skb == NULL) {
1849 printk(KERN_ERR "%s: Inconsistent Rx descriptor chain.\n",
1850 dev->name);
1851 break;
1853 sp->rx_skbuff[entry] = NULL;
1854 skb_put(skb, pkt_len);
1855 npkts++;
1856 sp->rx_ringp[entry] = NULL;
1857 pci_unmap_single(sp->pdev, sp->rx_ring_dma[entry],
1858 PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_FROMDEVICE);
1860 skb->protocol = eth_type_trans(skb, dev);
1861 netif_rx(skb);
1862 dev->last_rx = jiffies;
1863 sp->stats.rx_packets++;
1864 sp->stats.rx_bytes += pkt_len;
1866 entry = (++sp->cur_rx) % RX_RING_SIZE;
1867 sp->rx_ring_state &= ~RrPostponed;
1868 /* Refill the recently taken buffers.
1869 Do it one-by-one to handle traffic bursts better. */
1870 if (alloc_ok && speedo_refill_rx_buf(dev, 0) == -1)
1871 alloc_ok = 0;
1874 /* Try hard to refill the recently taken buffers. */
1875 speedo_refill_rx_buffers(dev, 1);
1877 if (npkts)
1878 sp->last_rx_time = jiffies;
1880 return 0;
1883 static int
1884 speedo_close(struct net_device *dev)
1886 long ioaddr = dev->base_addr;
1887 struct speedo_private *sp = (struct speedo_private *)dev->priv;
1888 int i;
1890 netdevice_stop(dev);
1891 netif_stop_queue(dev);
1893 if (netif_msg_ifdown(sp))
1894 printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x.\n",
1895 dev->name, inw(ioaddr + SCBStatus));
1897 /* Shut off the media monitoring timer. */
1898 del_timer_sync(&sp->timer);
1900 outw(SCBMaskAll, ioaddr + SCBCmd);
1902 /* Shutting down the chip nicely fails to disable flow control. So.. */
1903 outl(PortPartialReset, ioaddr + SCBPort);
1904 inl(ioaddr + SCBPort); /* flush posted write */
1906 * The chip requires a 10 microsecond quiet period. Wait here!
1908 udelay(10);
1910 free_irq(dev->irq, dev);
1911 speedo_show_state(dev);
1913 /* Free all the skbuffs in the Rx and Tx queues. */
1914 for (i = 0; i < RX_RING_SIZE; i++) {
1915 struct sk_buff *skb = sp->rx_skbuff[i];
1916 sp->rx_skbuff[i] = 0;
1917 /* Clear the Rx descriptors. */
1918 if (skb) {
1919 pci_unmap_single(sp->pdev,
1920 sp->rx_ring_dma[i],
1921 PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_FROMDEVICE);
1922 dev_kfree_skb(skb);
1926 for (i = 0; i < TX_RING_SIZE; i++) {
1927 struct sk_buff *skb = sp->tx_skbuff[i];
1928 sp->tx_skbuff[i] = 0;
1929 /* Clear the Tx descriptors. */
1930 if (skb) {
1931 pci_unmap_single(sp->pdev,
1932 le32_to_cpu(sp->tx_ring[i].tx_buf_addr0),
1933 skb->len, PCI_DMA_TODEVICE);
1934 dev_kfree_skb(skb);
1938 /* Free multicast setting blocks. */
1939 for (i = 0; sp->mc_setup_head != NULL; i++) {
1940 struct speedo_mc_block *t;
1941 t = sp->mc_setup_head->next;
1942 kfree(sp->mc_setup_head);
1943 sp->mc_setup_head = t;
1945 sp->mc_setup_tail = NULL;
1946 if (netif_msg_ifdown(sp))
1947 printk(KERN_DEBUG "%s: %d multicast blocks dropped.\n", dev->name, i);
1949 pci_set_power_state(sp->pdev, 2);
1951 return 0;
1954 /* The Speedo-3 has an especially awkward and unusable method of getting
1955 statistics out of the chip. It takes an unpredictable length of time
1956 for the dump-stats command to complete. To avoid a busy-wait loop we
1957 update the stats with the previous dump results, and then trigger a
1958 new dump.
1960 Oh, and incoming frames are dropped while executing dump-stats!
1962 static struct net_device_stats *
1963 speedo_get_stats(struct net_device *dev)
1965 struct speedo_private *sp = (struct speedo_private *)dev->priv;
1966 long ioaddr = dev->base_addr;
1968 /* Update only if the previous dump finished. */
1969 if (sp->lstats->done_marker == le32_to_cpu(0xA007)) {
1970 sp->stats.tx_aborted_errors += le32_to_cpu(sp->lstats->tx_coll16_errs);
1971 sp->stats.tx_window_errors += le32_to_cpu(sp->lstats->tx_late_colls);
1972 sp->stats.tx_fifo_errors += le32_to_cpu(sp->lstats->tx_underruns);
1973 sp->stats.tx_fifo_errors += le32_to_cpu(sp->lstats->tx_lost_carrier);
1974 /*sp->stats.tx_deferred += le32_to_cpu(sp->lstats->tx_deferred);*/
1975 sp->stats.collisions += le32_to_cpu(sp->lstats->tx_total_colls);
1976 sp->stats.rx_crc_errors += le32_to_cpu(sp->lstats->rx_crc_errs);
1977 sp->stats.rx_frame_errors += le32_to_cpu(sp->lstats->rx_align_errs);
1978 sp->stats.rx_over_errors += le32_to_cpu(sp->lstats->rx_resource_errs);
1979 sp->stats.rx_fifo_errors += le32_to_cpu(sp->lstats->rx_overrun_errs);
1980 sp->stats.rx_length_errors += le32_to_cpu(sp->lstats->rx_runt_errs);
1981 sp->lstats->done_marker = 0x0000;
1982 if (netif_running(dev)) {
1983 unsigned long flags;
1984 /* Take a spinlock to make wait_for_cmd_done and sending the
1985 command atomic. --SAW */
1986 spin_lock_irqsave(&sp->lock, flags);
1987 wait_for_cmd_done(dev);
1988 outb(CUDumpStats, ioaddr + SCBCmd);
1989 spin_unlock_irqrestore(&sp->lock, flags);
1992 return &sp->stats;
1995 static int netdev_ethtool_ioctl(struct net_device *dev, void *useraddr)
1997 u32 ethcmd;
1998 struct speedo_private *sp = dev->priv;
2000 if (copy_from_user(&ethcmd, useraddr, sizeof(ethcmd)))
2001 return -EFAULT;
2003 switch (ethcmd) {
2004 /* get driver-specific version/etc. info */
2005 case ETHTOOL_GDRVINFO: {
2006 struct ethtool_drvinfo info = {ETHTOOL_GDRVINFO};
2007 strncpy(info.driver, "eepro100", sizeof(info.driver)-1);
2008 strncpy(info.version, version, sizeof(info.version)-1);
2009 if (sp && sp->pdev)
2010 strcpy(info.bus_info, sp->pdev->slot_name);
2011 if (copy_to_user(useraddr, &info, sizeof(info)))
2012 return -EFAULT;
2013 return 0;
2016 /* get settings */
2017 case ETHTOOL_GSET: {
2018 struct ethtool_cmd ecmd = { ETHTOOL_GSET };
2019 spin_lock_irq(&sp->lock);
2020 mii_ethtool_gset(&sp->mii_if, &ecmd);
2021 spin_unlock_irq(&sp->lock);
2022 if (copy_to_user(useraddr, &ecmd, sizeof(ecmd)))
2023 return -EFAULT;
2024 return 0;
2026 /* set settings */
2027 case ETHTOOL_SSET: {
2028 int r;
2029 struct ethtool_cmd ecmd;
2030 if (copy_from_user(&ecmd, useraddr, sizeof(ecmd)))
2031 return -EFAULT;
2032 spin_lock_irq(&sp->lock);
2033 r = mii_ethtool_sset(&sp->mii_if, &ecmd);
2034 spin_unlock_irq(&sp->lock);
2035 return r;
2037 /* restart autonegotiation */
2038 case ETHTOOL_NWAY_RST: {
2039 return mii_nway_restart(&sp->mii_if);
2041 /* get link status */
2042 case ETHTOOL_GLINK: {
2043 struct ethtool_value edata = {ETHTOOL_GLINK};
2044 edata.data = mii_link_ok(&sp->mii_if);
2045 if (copy_to_user(useraddr, &edata, sizeof(edata)))
2046 return -EFAULT;
2047 return 0;
2049 /* get message-level */
2050 case ETHTOOL_GMSGLVL: {
2051 struct ethtool_value edata = {ETHTOOL_GMSGLVL};
2052 edata.data = sp->msg_enable;
2053 if (copy_to_user(useraddr, &edata, sizeof(edata)))
2054 return -EFAULT;
2055 return 0;
2057 /* set message-level */
2058 case ETHTOOL_SMSGLVL: {
2059 struct ethtool_value edata;
2060 if (copy_from_user(&edata, useraddr, sizeof(edata)))
2061 return -EFAULT;
2062 sp->msg_enable = edata.data;
2063 return 0;
2068 return -EOPNOTSUPP;
2071 static int speedo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2073 struct speedo_private *sp = (struct speedo_private *)dev->priv;
2074 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&rq->ifr_data;
2075 int phy = sp->phy[0] & 0x1f;
2076 int saved_acpi;
2077 int t;
2079 switch(cmd) {
2080 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
2081 data->phy_id = phy;
2083 case SIOCGMIIREG: /* Read MII PHY register. */
2084 /* FIXME: these operations need to be serialized with MDIO
2085 access from the timeout handler.
2086 They are currently serialized only with MDIO access from the
2087 timer routine. 2000/05/09 SAW */
2088 saved_acpi = pci_set_power_state(sp->pdev, 0);
2089 t = del_timer_sync(&sp->timer);
2090 data->val_out = mdio_read(dev, data->phy_id & 0x1f, data->reg_num & 0x1f);
2091 if (t)
2092 add_timer(&sp->timer); /* may be set to the past --SAW */
2093 pci_set_power_state(sp->pdev, saved_acpi);
2094 return 0;
2096 case SIOCSMIIREG: /* Write MII PHY register. */
2097 if (!capable(CAP_NET_ADMIN))
2098 return -EPERM;
2099 saved_acpi = pci_set_power_state(sp->pdev, 0);
2100 t = del_timer_sync(&sp->timer);
2101 mdio_write(dev, data->phy_id, data->reg_num, data->val_in);
2102 if (t)
2103 add_timer(&sp->timer); /* may be set to the past --SAW */
2104 pci_set_power_state(sp->pdev, saved_acpi);
2105 return 0;
2106 case SIOCETHTOOL:
2107 return netdev_ethtool_ioctl(dev, (void *) rq->ifr_data);
2108 default:
2109 return -EOPNOTSUPP;
2113 /* Set or clear the multicast filter for this adaptor.
2114 This is very ugly with Intel chips -- we usually have to execute an
2115 entire configuration command, plus process a multicast command.
2116 This is complicated. We must put a large configuration command and
2117 an arbitrarily-sized multicast command in the transmit list.
2118 To minimize the disruption -- the previous command might have already
2119 loaded the link -- we convert the current command block, normally a Tx
2120 command, into a no-op and link it to the new command.
2122 static void set_rx_mode(struct net_device *dev)
2124 struct speedo_private *sp = (struct speedo_private *)dev->priv;
2125 long ioaddr = dev->base_addr;
2126 struct descriptor *last_cmd;
2127 char new_rx_mode;
2128 unsigned long flags;
2129 int entry, i;
2131 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
2132 new_rx_mode = 3;
2133 } else if ((dev->flags & IFF_ALLMULTI) ||
2134 dev->mc_count > multicast_filter_limit) {
2135 new_rx_mode = 1;
2136 } else
2137 new_rx_mode = 0;
2139 if (netif_msg_rx_status(sp))
2140 printk(KERN_DEBUG "%s: set_rx_mode %d -> %d\n", dev->name,
2141 sp->rx_mode, new_rx_mode);
2143 if ((int)(sp->cur_tx - sp->dirty_tx) > TX_RING_SIZE - TX_MULTICAST_SIZE) {
2144 /* The Tx ring is full -- don't add anything! Hope the mode will be
2145 * set again later. */
2146 sp->rx_mode = -1;
2147 return;
2150 if (new_rx_mode != sp->rx_mode) {
2151 u8 *config_cmd_data;
2153 spin_lock_irqsave(&sp->lock, flags);
2154 entry = sp->cur_tx++ % TX_RING_SIZE;
2155 last_cmd = sp->last_cmd;
2156 sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
2158 sp->tx_skbuff[entry] = 0; /* Redundant. */
2159 sp->tx_ring[entry].status = cpu_to_le32(CmdSuspend | CmdConfigure);
2160 sp->tx_ring[entry].link =
2161 cpu_to_le32(TX_RING_ELEM_DMA(sp, (entry + 1) % TX_RING_SIZE));
2162 config_cmd_data = (void *)&sp->tx_ring[entry].tx_desc_addr;
2163 /* Construct a full CmdConfig frame. */
2164 memcpy(config_cmd_data, i82558_config_cmd, CONFIG_DATA_SIZE);
2165 config_cmd_data[1] = (txfifo << 4) | rxfifo;
2166 config_cmd_data[4] = rxdmacount;
2167 config_cmd_data[5] = txdmacount + 0x80;
2168 config_cmd_data[15] |= (new_rx_mode & 2) ? 1 : 0;
2169 /* 0x80 doesn't disable FC 0x84 does.
2170 Disable Flow control since we are not ACK-ing any FC interrupts
2171 for now. --Dragan */
2172 config_cmd_data[19] = 0x84;
2173 config_cmd_data[19] |= sp->mii_if.full_duplex ? 0x40 : 0;
2174 config_cmd_data[21] = (new_rx_mode & 1) ? 0x0D : 0x05;
2175 if (sp->phy[0] & 0x8000) { /* Use the AUI port instead. */
2176 config_cmd_data[15] |= 0x80;
2177 config_cmd_data[8] = 0;
2179 /* Trigger the command unit resume. */
2180 wait_for_cmd_done(dev);
2181 clear_suspend(last_cmd);
2182 outb(CUResume, ioaddr + SCBCmd);
2183 if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
2184 netif_stop_queue(dev);
2185 sp->tx_full = 1;
2187 spin_unlock_irqrestore(&sp->lock, flags);
2190 if (new_rx_mode == 0 && dev->mc_count < 4) {
2191 /* The simple case of 0-3 multicast list entries occurs often, and
2192 fits within one tx_ring[] entry. */
2193 struct dev_mc_list *mclist;
2194 u16 *setup_params, *eaddrs;
2196 spin_lock_irqsave(&sp->lock, flags);
2197 entry = sp->cur_tx++ % TX_RING_SIZE;
2198 last_cmd = sp->last_cmd;
2199 sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
2201 sp->tx_skbuff[entry] = 0;
2202 sp->tx_ring[entry].status = cpu_to_le32(CmdSuspend | CmdMulticastList);
2203 sp->tx_ring[entry].link =
2204 cpu_to_le32(TX_RING_ELEM_DMA(sp, (entry + 1) % TX_RING_SIZE));
2205 sp->tx_ring[entry].tx_desc_addr = 0; /* Really MC list count. */
2206 setup_params = (u16 *)&sp->tx_ring[entry].tx_desc_addr;
2207 *setup_params++ = cpu_to_le16(dev->mc_count*6);
2208 /* Fill in the multicast addresses. */
2209 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
2210 i++, mclist = mclist->next) {
2211 eaddrs = (u16 *)mclist->dmi_addr;
2212 *setup_params++ = *eaddrs++;
2213 *setup_params++ = *eaddrs++;
2214 *setup_params++ = *eaddrs++;
2217 wait_for_cmd_done(dev);
2218 clear_suspend(last_cmd);
2219 /* Immediately trigger the command unit resume. */
2220 outb(CUResume, ioaddr + SCBCmd);
2222 if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
2223 netif_stop_queue(dev);
2224 sp->tx_full = 1;
2226 spin_unlock_irqrestore(&sp->lock, flags);
2227 } else if (new_rx_mode == 0) {
2228 struct dev_mc_list *mclist;
2229 u16 *setup_params, *eaddrs;
2230 struct speedo_mc_block *mc_blk;
2231 struct descriptor *mc_setup_frm;
2232 int i;
2234 mc_blk = kmalloc(sizeof(*mc_blk) + 2 + multicast_filter_limit*6,
2235 GFP_ATOMIC);
2236 if (mc_blk == NULL) {
2237 printk(KERN_ERR "%s: Failed to allocate a setup frame.\n",
2238 dev->name);
2239 sp->rx_mode = -1; /* We failed, try again. */
2240 return;
2242 mc_blk->next = NULL;
2243 mc_blk->len = 2 + multicast_filter_limit*6;
2244 mc_blk->frame_dma =
2245 pci_map_single(sp->pdev, &mc_blk->frame, mc_blk->len,
2246 PCI_DMA_TODEVICE);
2247 mc_setup_frm = &mc_blk->frame;
2249 /* Fill the setup frame. */
2250 if (netif_msg_ifup(sp))
2251 printk(KERN_DEBUG "%s: Constructing a setup frame at %p.\n",
2252 dev->name, mc_setup_frm);
2253 mc_setup_frm->cmd_status =
2254 cpu_to_le32(CmdSuspend | CmdIntr | CmdMulticastList);
2255 /* Link set below. */
2256 setup_params = (u16 *)&mc_setup_frm->params;
2257 *setup_params++ = cpu_to_le16(dev->mc_count*6);
2258 /* Fill in the multicast addresses. */
2259 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
2260 i++, mclist = mclist->next) {
2261 eaddrs = (u16 *)mclist->dmi_addr;
2262 *setup_params++ = *eaddrs++;
2263 *setup_params++ = *eaddrs++;
2264 *setup_params++ = *eaddrs++;
2267 /* Disable interrupts while playing with the Tx Cmd list. */
2268 spin_lock_irqsave(&sp->lock, flags);
2270 if (sp->mc_setup_tail)
2271 sp->mc_setup_tail->next = mc_blk;
2272 else
2273 sp->mc_setup_head = mc_blk;
2274 sp->mc_setup_tail = mc_blk;
2275 mc_blk->tx = sp->cur_tx;
2277 entry = sp->cur_tx++ % TX_RING_SIZE;
2278 last_cmd = sp->last_cmd;
2279 sp->last_cmd = mc_setup_frm;
2281 /* Change the command to a NoOp, pointing to the CmdMulti command. */
2282 sp->tx_skbuff[entry] = 0;
2283 sp->tx_ring[entry].status = cpu_to_le32(CmdNOp);
2284 sp->tx_ring[entry].link = cpu_to_le32(mc_blk->frame_dma);
2286 /* Set the link in the setup frame. */
2287 mc_setup_frm->link =
2288 cpu_to_le32(TX_RING_ELEM_DMA(sp, (entry + 1) % TX_RING_SIZE));
2290 pci_dma_sync_single(sp->pdev, mc_blk->frame_dma,
2291 mc_blk->len, PCI_DMA_TODEVICE);
2293 wait_for_cmd_done(dev);
2294 clear_suspend(last_cmd);
2295 /* Immediately trigger the command unit resume. */
2296 outb(CUResume, ioaddr + SCBCmd);
2298 if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
2299 netif_stop_queue(dev);
2300 sp->tx_full = 1;
2302 spin_unlock_irqrestore(&sp->lock, flags);
2304 if (netif_msg_rx_status(sp))
2305 printk(" CmdMCSetup frame length %d in entry %d.\n",
2306 dev->mc_count, entry);
2309 sp->rx_mode = new_rx_mode;
2312 #ifdef CONFIG_PM
2313 static int eepro100_suspend(struct pci_dev *pdev, u32 state)
2315 struct net_device *dev = pci_get_drvdata (pdev);
2316 struct speedo_private *sp = (struct speedo_private *)dev->priv;
2317 long ioaddr = dev->base_addr;
2319 pci_save_state(pdev, sp->pm_state);
2321 if (!netif_running(dev))
2322 return 0;
2324 del_timer_sync(&sp->timer);
2326 netif_device_detach(dev);
2327 outl(PortPartialReset, ioaddr + SCBPort);
2329 /* XXX call pci_set_power_state ()? */
2330 return 0;
2333 static int eepro100_resume(struct pci_dev *pdev)
2335 struct net_device *dev = pci_get_drvdata (pdev);
2336 struct speedo_private *sp = (struct speedo_private *)dev->priv;
2337 long ioaddr = dev->base_addr;
2339 pci_restore_state(pdev, sp->pm_state);
2341 if (!netif_running(dev))
2342 return 0;
2344 /* I'm absolutely uncertain if this part of code may work.
2345 The problems are:
2346 - correct hardware reinitialization;
2347 - correct driver behavior between different steps of the
2348 reinitialization;
2349 - serialization with other driver calls.
2350 2000/03/08 SAW */
2351 outw(SCBMaskAll, ioaddr + SCBCmd);
2352 speedo_resume(dev);
2353 netif_device_attach(dev);
2354 sp->rx_mode = -1;
2355 sp->flow_ctrl = sp->partner = 0;
2356 set_rx_mode(dev);
2357 sp->timer.expires = RUN_AT(2*HZ);
2358 add_timer(&sp->timer);
2359 return 0;
2361 #endif /* CONFIG_PM */
2363 static void __devexit eepro100_remove_one (struct pci_dev *pdev)
2365 struct net_device *dev = pci_get_drvdata (pdev);
2366 struct speedo_private *sp = (struct speedo_private *)dev->priv;
2368 unregister_netdev(dev);
2370 release_region(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
2371 release_mem_region(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
2373 #ifndef USE_IO
2374 iounmap((char *)dev->base_addr);
2375 #endif
2377 pci_free_consistent(pdev, TX_RING_SIZE * sizeof(struct TxFD)
2378 + sizeof(struct speedo_stats),
2379 sp->tx_ring, sp->tx_ring_dma);
2380 pci_disable_device(pdev);
2381 kfree(dev);
2384 static struct pci_device_id eepro100_pci_tbl[] __devinitdata = {
2385 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82557,
2386 PCI_ANY_ID, PCI_ANY_ID, },
2387 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82559ER,
2388 PCI_ANY_ID, PCI_ANY_ID, },
2389 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_7,
2390 PCI_ANY_ID, PCI_ANY_ID, },
2391 { PCI_VENDOR_ID_INTEL, 0x1029, PCI_ANY_ID, PCI_ANY_ID, },
2392 { PCI_VENDOR_ID_INTEL, 0x1030, PCI_ANY_ID, PCI_ANY_ID, },
2393 { PCI_VENDOR_ID_INTEL, 0x1031, PCI_ANY_ID, PCI_ANY_ID, },
2394 { PCI_VENDOR_ID_INTEL, 0x1032, PCI_ANY_ID, PCI_ANY_ID, },
2395 { PCI_VENDOR_ID_INTEL, 0x1033, PCI_ANY_ID, PCI_ANY_ID, },
2396 { PCI_VENDOR_ID_INTEL, 0x1034, PCI_ANY_ID, PCI_ANY_ID, },
2397 { PCI_VENDOR_ID_INTEL, 0x1035, PCI_ANY_ID, PCI_ANY_ID, },
2398 { PCI_VENDOR_ID_INTEL, 0x1036, PCI_ANY_ID, PCI_ANY_ID, },
2399 { PCI_VENDOR_ID_INTEL, 0x1037, PCI_ANY_ID, PCI_ANY_ID, },
2400 { PCI_VENDOR_ID_INTEL, 0x1038, PCI_ANY_ID, PCI_ANY_ID, },
2401 { PCI_VENDOR_ID_INTEL, 0x1039, PCI_ANY_ID, PCI_ANY_ID, },
2402 { PCI_VENDOR_ID_INTEL, 0x103A, PCI_ANY_ID, PCI_ANY_ID, },
2403 { PCI_VENDOR_ID_INTEL, 0x103B, PCI_ANY_ID, PCI_ANY_ID, },
2404 { PCI_VENDOR_ID_INTEL, 0x103C, PCI_ANY_ID, PCI_ANY_ID, },
2405 { PCI_VENDOR_ID_INTEL, 0x103D, PCI_ANY_ID, PCI_ANY_ID, },
2406 { PCI_VENDOR_ID_INTEL, 0x103E, PCI_ANY_ID, PCI_ANY_ID, },
2407 { PCI_VENDOR_ID_INTEL, 0x1050, PCI_ANY_ID, PCI_ANY_ID, },
2408 { PCI_VENDOR_ID_INTEL, 0x1059, PCI_ANY_ID, PCI_ANY_ID, },
2409 { PCI_VENDOR_ID_INTEL, 0x1227, PCI_ANY_ID, PCI_ANY_ID, },
2410 { PCI_VENDOR_ID_INTEL, 0x1228, PCI_ANY_ID, PCI_ANY_ID, },
2411 { PCI_VENDOR_ID_INTEL, 0x2449, PCI_ANY_ID, PCI_ANY_ID, },
2412 { PCI_VENDOR_ID_INTEL, 0x2459, PCI_ANY_ID, PCI_ANY_ID, },
2413 { PCI_VENDOR_ID_INTEL, 0x245D, PCI_ANY_ID, PCI_ANY_ID, },
2414 { PCI_VENDOR_ID_INTEL, 0x5200, PCI_ANY_ID, PCI_ANY_ID, },
2415 { PCI_VENDOR_ID_INTEL, 0x5201, PCI_ANY_ID, PCI_ANY_ID, },
2416 { 0,}
2418 MODULE_DEVICE_TABLE(pci, eepro100_pci_tbl);
2420 static struct pci_driver eepro100_driver = {
2421 .name = "eepro100",
2422 .id_table = eepro100_pci_tbl,
2423 .probe = eepro100_init_one,
2424 .remove = __devexit_p(eepro100_remove_one),
2425 #ifdef CONFIG_PM
2426 .suspend = eepro100_suspend,
2427 .resume = eepro100_resume,
2428 #endif /* CONFIG_PM */
2431 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,48)
2432 static int pci_module_init(struct pci_driver *pdev)
2434 int rc;
2436 rc = pci_register_driver(pdev);
2437 if (rc <= 0) {
2438 printk(KERN_INFO "%s: No cards found, driver not installed.\n",
2439 pdev->name);
2440 pci_unregister_driver(pdev);
2441 return -ENODEV;
2443 return 0;
2445 #endif
2447 static int __init eepro100_init_module(void)
2449 #ifdef MODULE
2450 printk(version);
2451 #endif
2452 return pci_module_init(&eepro100_driver);
2455 static void __exit eepro100_cleanup_module(void)
2457 pci_unregister_driver(&eepro100_driver);
2460 module_init(eepro100_init_module);
2461 module_exit(eepro100_cleanup_module);
2464 * Local variables:
2465 * compile-command: "gcc -DMODULE -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -c eepro100.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS`"
2466 * c-indent-level: 4
2467 * c-basic-offset: 4
2468 * tab-width: 4
2469 * End: