Import 2.3.18pre1
[davej-history.git] / drivers / net / eepro100.c
blobe1a4a3c917841cf8678d96e449b976bac5e97f5e
1 /* drivers/net/eepro100.c: An Intel i82557-559 Ethernet driver for Linux. */
2 /*
3 NOTICE: this version tested with kernels 1.3.72 and later only!
4 Written 1996-1999 by Donald Becker.
6 This software may be used and distributed according to the terms
7 of the GNU Public License, incorporated herein by reference.
9 This driver is for the Intel EtherExpress Pro100 (Speedo3) design.
10 It should work with all i82557/558/559 boards.
12 To use as a module, use the compile-command at the end of the file.
14 The author may be reached as becker@CESDIS.usra.edu, or C/O
15 Center of Excellence in Space Data and Information Sciences
16 Code 930.5, NASA Goddard Space Flight Center, Greenbelt MD 20771
17 For updates see
18 http://cesdis.gsfc.nasa.gov/linux/drivers/eepro100.html
19 For installation instructions
20 http://cesdis.gsfc.nasa.gov/linux/misc/modules.html
21 There is a Majordomo mailing list based at
22 linux-eepro100@cesdis.gsfc.nasa.gov
25 static const char *version =
26 "eepro100.c:v1.09j 7/27/99 Donald Becker http://cesdis.gsfc.nasa.gov/linux/drivers/eepro100.html\n";
28 /* A few user-configurable values that apply to all boards.
29 First set is undocumented and spelled per Intel recommendations. */
31 static int congenb = 0; /* Enable congestion control in the DP83840. */
32 static int txfifo = 8; /* Tx FIFO threshold in 4 byte units, 0-15 */
33 static int rxfifo = 8; /* Rx FIFO threshold, default 32 bytes. */
34 /* Tx/Rx DMA burst length, 0-127, 0 == no preemption, tx==128 -> disabled. */
35 static int txdmacount = 128;
36 static int rxdmacount = 0;
38 /* Set the copy breakpoint for the copy-only-tiny-buffer Rx method.
39 Lower values use more memory, but are faster. */
40 static int rx_copybreak = 200;
42 /* Maximum events (Rx packets, etc.) to handle at each interrupt. */
43 static int max_interrupt_work = 20;
45 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast) */
46 static int multicast_filter_limit = 64;
48 /* 'options' is used to pass a transceiver override or full-duplex flag
49 e.g. "options=16" for FD, "options=32" for 100mbps-only. */
50 static int full_duplex[] = {-1, -1, -1, -1, -1, -1, -1, -1};
51 static int options[] = {-1, -1, -1, -1, -1, -1, -1, -1};
52 #ifdef MODULE
53 static int debug = -1; /* The debug level */
54 #endif
56 /* A few values that may be tweaked. */
57 /* The ring sizes should be a power of two for efficiency. */
58 #define TX_RING_SIZE 32 /* Effectively 2 entries fewer. */
59 #define RX_RING_SIZE 32
60 /* Actual number of TX packets queued, must be <= TX_RING_SIZE-2. */
61 #define TX_QUEUE_LIMIT 12
63 /* Operational parameters that usually are not changed. */
65 /* Time in jiffies before concluding the transmitter is hung. */
66 #define TX_TIMEOUT (2*HZ)
67 /* Size of an pre-allocated Rx buffer: <Ethernet MTU> + slack.*/
68 #define PKT_BUF_SZ 1536
70 #if !defined(__OPTIMIZE__) || !defined(__KERNEL__)
71 #warning You must compile this file with the correct options!
72 #warning See the last lines of the source file.
73 #error You must compile this driver with "-O".
74 #endif
76 #include <linux/version.h>
77 #include <linux/module.h>
78 #ifdef MODVERSIONS
79 #include <linux/modversions.h>
80 #endif
82 #include <linux/kernel.h>
83 #include <linux/string.h>
84 #include <linux/timer.h>
85 #include <linux/errno.h>
86 #include <linux/ioport.h>
87 #include <linux/malloc.h>
88 #include <linux/interrupt.h>
89 #ifdef HAS_PCI_NETIF
90 #include "pci-netif.h"
91 #else
92 #include <linux/pci.h>
93 #if LINUX_VERSION_CODE < 0x20155
94 #include <linux/bios32.h> /* Ignore the bogus warning in 2.1.100+ */
95 #endif
96 #endif
97 #include <linux/spinlock.h>
98 #include <asm/bitops.h>
99 #include <asm/io.h>
101 #include <linux/netdevice.h>
102 #include <linux/etherdevice.h>
103 #include <linux/skbuff.h>
104 #include <linux/delay.h>
106 #if LINUX_VERSION_CODE > 0x20118 && defined(MODULE)
107 MODULE_AUTHOR("Donald Becker <becker@cesdis.gsfc.nasa.gov>");
108 MODULE_DESCRIPTION("Intel i82557/i82558 PCI EtherExpressPro driver");
109 MODULE_PARM(debug, "i");
110 MODULE_PARM(options, "1-" __MODULE_STRING(8) "i");
111 MODULE_PARM(full_duplex, "1-" __MODULE_STRING(8) "i");
112 MODULE_PARM(congenb, "i");
113 MODULE_PARM(txfifo, "i");
114 MODULE_PARM(rxfifo, "i");
115 MODULE_PARM(txdmacount, "i");
116 MODULE_PARM(rxdmacount, "i");
117 MODULE_PARM(rx_copybreak, "i");
118 MODULE_PARM(max_interrupt_work, "i");
119 MODULE_PARM(multicast_filter_limit, "i");
120 #endif
122 #define RUN_AT(x) (jiffies + (x))
123 /* Condensed bus+endian portability operations. */
124 #define virt_to_le32bus(addr) cpu_to_le32(virt_to_bus(addr))
125 #define le32bus_to_virt(addr) bus_to_virt(le32_to_cpu(addr))
127 #if (LINUX_VERSION_CODE < 0x20123)
128 #define test_and_set_bit(val, addr) set_bit(val, addr)
129 #define le16_to_cpu(val) (val)
130 #define cpu_to_le16(val) (val)
131 #define le32_to_cpu(val) (val)
132 #define cpu_to_le32(val) (val)
133 #define spin_lock_irqsave(&sp->lock, flags) save_flags(flags); cli();
134 #define spin_unlock_irqrestore(&sp->lock, flags); restore_flags(flags);
135 #endif
136 #if LINUX_VERSION_CODE < 0x20159
137 #define dev_free_skb(skb) dev_kfree_skb(skb, FREE_WRITE);
138 #else
139 #define dev_free_skb(skb) dev_kfree_skb(skb);
140 #endif
141 #if ! defined(CAP_NET_ADMIN)
142 #define capable(CAP_XXX) (suser())
143 #endif
144 #if ! defined(HAS_NETIF_QUEUE)
145 #define netif_wake_queue(dev) mark_bh(NET_BH);
146 #endif
148 /* The total I/O port extent of the board.
149 The registers beyond 0x18 only exist on the i82558. */
150 #define SPEEDO3_TOTAL_SIZE 0x20
152 int speedo_debug = 1;
155 Theory of Operation
157 I. Board Compatibility
159 This device driver is designed for the Intel i82557 "Speedo3" chip, Intel's
160 single-chip fast Ethernet controller for PCI, as used on the Intel
161 EtherExpress Pro 100 adapter.
163 II. Board-specific settings
165 PCI bus devices are configured by the system at boot time, so no jumpers
166 need to be set on the board. The system BIOS should be set to assign the
167 PCI INTA signal to an otherwise unused system IRQ line. While it's
168 possible to share PCI interrupt lines, it negatively impacts performance and
169 only recent kernels support it.
171 III. Driver operation
173 IIIA. General
174 The Speedo3 is very similar to other Intel network chips, that is to say
175 "apparently designed on a different planet". This chips retains the complex
176 Rx and Tx descriptors and multiple buffers pointers as previous chips, but
177 also has simplified Tx and Rx buffer modes. This driver uses the "flexible"
178 Tx mode, but in a simplified lower-overhead manner: it associates only a
179 single buffer descriptor with each frame descriptor.
181 Despite the extra space overhead in each receive skbuff, the driver must use
182 the simplified Rx buffer mode to assure that only a single data buffer is
183 associated with each RxFD. The driver implements this by reserving space
184 for the Rx descriptor at the head of each Rx skbuff.
186 The Speedo-3 has receive and command unit base addresses that are added to
187 almost all descriptor pointers. The driver sets these to zero, so that all
188 pointer fields are absolute addresses.
190 The System Control Block (SCB) of some previous Intel chips exists on the
191 chip in both PCI I/O and memory space. This driver uses the I/O space
192 registers, but might switch to memory mapped mode to better support non-x86
193 processors.
195 IIIB. Transmit structure
197 The driver must use the complex Tx command+descriptor mode in order to
198 have a indirect pointer to the skbuff data section. Each Tx command block
199 (TxCB) is associated with two immediately appended Tx Buffer Descriptor
200 (TxBD). A fixed ring of these TxCB+TxBD pairs are kept as part of the
201 speedo_private data structure for each adapter instance.
203 The newer i82558 explicitly supports this structure, and can read the two
204 TxBDs in the same PCI burst as the TxCB.
206 This ring structure is used for all normal transmit packets, but the
207 transmit packet descriptors aren't long enough for most non-Tx commands such
208 as CmdConfigure. This is complicated by the possibility that the chip has
209 already loaded the link address in the previous descriptor. So for these
210 commands we convert the next free descriptor on the ring to a NoOp, and point
211 that descriptor's link to the complex command.
213 An additional complexity of these non-transmit commands are that they may be
214 added asynchronous to the normal transmit queue, so we disable interrupts
215 whenever the Tx descriptor ring is manipulated.
217 A notable aspect of these special configure commands is that they do
218 work with the normal Tx ring entry scavenge method. The Tx ring scavenge
219 is done at interrupt time using the 'dirty_tx' index, and checking for the
220 command-complete bit. While the setup frames may have the NoOp command on the
221 Tx ring marked as complete, but not have completed the setup command, this
222 is not a problem. The tx_ring entry can be still safely reused, as the
223 tx_skbuff[] entry is always empty for config_cmd and mc_setup frames.
225 Commands may have bits set e.g. CmdSuspend in the command word to either
226 suspend or stop the transmit/command unit. This driver always flags the last
227 command with CmdSuspend, erases the CmdSuspend in the previous command, and
228 then issues a CU_RESUME.
229 Note: Watch out for the potential race condition here: imagine
230 erasing the previous suspend
231 the chip processes the previous command
232 the chip processes the final command, and suspends
233 doing the CU_RESUME
234 the chip processes the next-yet-valid post-final-command.
235 So blindly sending a CU_RESUME is only safe if we do it immediately after
236 after erasing the previous CmdSuspend, without the possibility of an
237 intervening delay. Thus the resume command is always within the
238 interrupts-disabled region. This is a timing dependence, but handling this
239 condition in a timing-independent way would considerably complicate the code.
241 Note: In previous generation Intel chips, restarting the command unit was a
242 notoriously slow process. This is presumably no longer true.
244 IIIC. Receive structure
246 Because of the bus-master support on the Speedo3 this driver uses the new
247 SKBUFF_RX_COPYBREAK scheme, rather than a fixed intermediate receive buffer.
248 This scheme allocates full-sized skbuffs as receive buffers. The value
249 SKBUFF_RX_COPYBREAK is used as the copying breakpoint: it is chosen to
250 trade-off the memory wasted by passing the full-sized skbuff to the queue
251 layer for all frames vs. the copying cost of copying a frame to a
252 correctly-sized skbuff.
254 For small frames the copying cost is negligible (esp. considering that we
255 are pre-loading the cache with immediately useful header information), so we
256 allocate a new, minimally-sized skbuff. For large frames the copying cost
257 is non-trivial, and the larger copy might flush the cache of useful data, so
258 we pass up the skbuff the packet was received into.
260 IIID. Synchronization
261 The driver runs as two independent, single-threaded flows of control. One
262 is the send-packet routine, which enforces single-threaded use by the
263 dev->tbusy flag. The other thread is the interrupt handler, which is single
264 threaded by the hardware and other software.
266 The send packet thread has partial control over the Tx ring and 'dev->tbusy'
267 flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
268 queue slot is empty, it clears the tbusy flag when finished otherwise it sets
269 the 'sp->tx_full' flag.
271 The interrupt handler has exclusive control over the Rx ring and records stats
272 from the Tx ring. (The Tx-done interrupt can't be selectively turned off, so
273 we can't avoid the interrupt overhead by having the Tx routine reap the Tx
274 stats.) After reaping the stats, it marks the queue entry as empty by setting
275 the 'base' to zero. Iff the 'sp->tx_full' flag is set, it clears both the
276 tx_full and tbusy flags.
278 IV. Notes
280 Thanks to Steve Williams of Intel for arranging the non-disclosure agreement
281 that stated that I could disclose the information. But I still resent
282 having to sign an Intel NDA when I'm helping Intel sell their own product!
286 /* This table drives the PCI probe routines. */
287 static struct net_device *
288 speedo_found1(int pci_bus, int pci_devfn, struct net_device *dev,
289 long ioaddr, int irq, int chip_idx, int fnd_cnt);
291 #ifdef USE_IO
292 #define SPEEDO_IOTYPE PCI_USES_MASTER|PCI_USES_IO|PCI_ADDR1
293 #define SPEEDO_SIZE 32
294 #else
295 #define SPEEDO_IOTYPE PCI_USES_MASTER|PCI_USES_MEM|PCI_ADDR0
296 #define SPEEDO_SIZE 0x1000
297 #endif
299 #if defined(HAS_PCI_NETIF)
300 struct pci_id_info static pci_tbl[] = {
301 { "Intel PCI EtherExpress Pro100",
302 { 0x12298086, 0xffffffff,}, SPEEDO_IOTYPE, SPEEDO_SIZE,
303 0, speedo_found1 },
304 {0,}, /* 0 terminated list. */
306 #else
307 enum pci_flags_bit {
308 PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
309 PCI_ADDR0=0x10<<0, PCI_ADDR1=0x10<<1, PCI_ADDR2=0x10<<2, PCI_ADDR3=0x10<<3,
311 struct pci_id_info {
312 const char *name;
313 u16 vendor_id, device_id, device_id_mask, flags;
314 int io_size;
315 struct net_device *(*probe1)(int pci_bus, int pci_devfn, struct net_device *dev,
316 long ioaddr, int irq, int chip_idx, int fnd_cnt);
317 } static pci_tbl[] = {
318 { "Intel PCI EtherExpress Pro100",
319 0x8086, 0x1229, 0xffff, PCI_USES_IO|PCI_USES_MASTER, 32, speedo_found1 },
320 {0,}, /* 0 terminated list. */
322 #endif
324 #ifndef USE_IO
325 #define inb readb
326 #define inw readw
327 #define inl readl
328 #define outb writeb
329 #define outw writew
330 #define outl writel
331 #endif
333 /* How to wait for the command unit to accept a command.
334 Typically this takes 0 ticks. */
335 static inline void wait_for_cmd_done(long cmd_ioaddr)
337 int wait = 100;
338 do ;
339 while(inb(cmd_ioaddr) && --wait >= 0);
342 /* Offsets to the various registers.
343 All accesses need not be longword aligned. */
344 enum speedo_offsets {
345 SCBStatus = 0, SCBCmd = 2, /* Rx/Command Unit command and status. */
346 SCBPointer = 4, /* General purpose pointer. */
347 SCBPort = 8, /* Misc. commands and operands. */
348 SCBflash = 12, SCBeeprom = 14, /* EEPROM and flash memory control. */
349 SCBCtrlMDI = 16, /* MDI interface control. */
350 SCBEarlyRx = 20, /* Early receive byte count. */
352 /* Commands that can be put in a command list entry. */
353 enum commands {
354 CmdNOp = 0, CmdIASetup = 0x10000, CmdConfigure = 0x20000,
355 CmdMulticastList = 0x30000, CmdTx = 0x40000, CmdTDR = 0x50000,
356 CmdDump = 0x60000, CmdDiagnose = 0x70000,
357 CmdSuspend = 0x40000000, /* Suspend after completion. */
358 CmdIntr = 0x20000000, /* Interrupt after completion. */
359 CmdTxFlex = 0x00080000, /* Use "Flexible mode" for CmdTx command. */
361 /* Do atomically if possible. */
362 #if defined(__i386__) || defined(__alpha__)
363 #define clear_suspend(cmd) clear_bit(30, &(cmd)->cmd_status)
364 #elif defined(__powerpc__)
365 #define clear_suspend(cmd) clear_bit(6, &(cmd)->cmd_status)
366 #else
367 #define clear_suspend(cmd) (cmd)->cmd_status &= cpu_to_le32(~CmdSuspend)
368 #endif
370 enum SCBCmdBits {
371 SCBMaskCmdDone=0x8000, SCBMaskRxDone=0x4000, SCBMaskCmdIdle=0x2000,
372 SCBMaskRxSuspend=0x1000, SCBMaskEarlyRx=0x0800, SCBMaskFlowCtl=0x0400,
373 SCBTriggerIntr=0x0200, SCBMaskAll=0x0100,
374 /* The rest are Rx and Tx commands. */
375 CUStart=0x0010, CUResume=0x0020, CUStatsAddr=0x0040, CUShowStats=0x0050,
376 CUCmdBase=0x0060, /* CU Base address (set to zero) . */
377 CUDumpStats=0x0070, /* Dump then reset stats counters. */
378 RxStart=0x0001, RxResume=0x0002, RxAbort=0x0004, RxAddrLoad=0x0006,
379 RxResumeNoResources=0x0007,
382 enum SCBPort_cmds {
383 PortReset=0, PortSelfTest=1, PortPartialReset=2, PortDump=3,
386 /* The Speedo3 Rx and Tx frame/buffer descriptors. */
387 struct descriptor { /* A generic descriptor. */
388 s32 cmd_status; /* All command and status fields. */
389 u32 link; /* struct descriptor * */
390 unsigned char params[0];
393 /* The Speedo3 Rx and Tx buffer descriptors. */
394 struct RxFD { /* Receive frame descriptor. */
395 s32 status;
396 u32 link; /* struct RxFD * */
397 u32 rx_buf_addr; /* void * */
398 u32 count;
401 /* Selected elements of the Tx/RxFD.status word. */
402 enum RxFD_bits {
403 RxComplete=0x8000, RxOK=0x2000,
404 RxErrCRC=0x0800, RxErrAlign=0x0400, RxErrTooBig=0x0200, RxErrSymbol=0x0010,
405 RxEth2Type=0x0020, RxNoMatch=0x0004, RxNoIAMatch=0x0002,
406 TxUnderrun=0x1000, StatusComplete=0x8000,
409 struct TxFD { /* Transmit frame descriptor set. */
410 s32 status;
411 u32 link; /* void * */
412 u32 tx_desc_addr; /* Always points to the tx_buf_addr element. */
413 s32 count; /* # of TBD (=1), Tx start thresh., etc. */
414 /* This constitutes two "TBD" entries -- we only use one. */
415 u32 tx_buf_addr0; /* void *, frame to be transmitted. */
416 s32 tx_buf_size0; /* Length of Tx frame. */
417 u32 tx_buf_addr1; /* void *, frame to be transmitted. */
418 s32 tx_buf_size1; /* Length of Tx frame. */
421 /* Elements of the dump_statistics block. This block must be lword aligned. */
422 struct speedo_stats {
423 u32 tx_good_frames;
424 u32 tx_coll16_errs;
425 u32 tx_late_colls;
426 u32 tx_underruns;
427 u32 tx_lost_carrier;
428 u32 tx_deferred;
429 u32 tx_one_colls;
430 u32 tx_multi_colls;
431 u32 tx_total_colls;
432 u32 rx_good_frames;
433 u32 rx_crc_errs;
434 u32 rx_align_errs;
435 u32 rx_resource_errs;
436 u32 rx_overrun_errs;
437 u32 rx_colls_errs;
438 u32 rx_runt_errs;
439 u32 done_marker;
442 /* Do not change the position (alignment) of the first few elements!
443 The later elements are grouped for cache locality. */
444 struct speedo_private {
445 struct TxFD tx_ring[TX_RING_SIZE]; /* Commands (usually CmdTxPacket). */
446 struct RxFD *rx_ringp[RX_RING_SIZE]; /* Rx descriptor, used as ring. */
447 /* The addresses of a Tx/Rx-in-place packets/buffers. */
448 struct sk_buff* tx_skbuff[TX_RING_SIZE];
449 struct sk_buff* rx_skbuff[RX_RING_SIZE];
450 struct descriptor *last_cmd; /* Last command sent. */
451 unsigned int cur_tx, dirty_tx; /* The ring entries to be free()ed. */
452 spinlock_t lock; /* Group with Tx control cache line. */
453 u32 tx_threshold; /* The value for txdesc.count. */
454 struct RxFD *last_rxf; /* Last command sent. */
455 unsigned int cur_rx, dirty_rx; /* The next free ring entry */
456 long last_rx_time; /* Last Rx, in jiffies, to handle Rx hang. */
457 const char *product_name;
458 struct net_device *next_module;
459 void *priv_addr; /* Unaligned address for kfree */
460 struct enet_statistics stats;
461 struct speedo_stats lstats;
462 int chip_id;
463 unsigned char pci_bus, pci_devfn, acpi_pwr;
464 struct timer_list timer; /* Media selection timer. */
465 int mc_setup_frm_len; /* The length of an allocated.. */
466 struct descriptor *mc_setup_frm; /* ..multicast setup frame. */
467 int mc_setup_busy; /* Avoid double-use of setup frame. */
468 int in_interrupt; /* Word-aligned dev->interrupt */
469 char rx_mode; /* Current PROMISC/ALLMULTI setting. */
470 unsigned int tx_full:1; /* The Tx queue is full. */
471 unsigned int full_duplex:1; /* Full-duplex operation requested. */
472 unsigned int flow_ctrl:1; /* Use 802.3x flow control. */
473 unsigned int rx_bug:1; /* Work around receiver hang errata. */
474 unsigned int rx_bug10:1; /* Receiver might hang at 10mbps. */
475 unsigned int rx_bug100:1; /* Receiver might hang at 100mbps. */
476 unsigned char default_port:8; /* Last dev->if_port value. */
477 unsigned short phy[2]; /* PHY media interfaces available. */
478 unsigned short advertising; /* Current PHY advertised caps. */
479 unsigned short partner; /* Link partner caps. */
480 long last_reset;
483 /* The parameters for a CmdConfigure operation.
484 There are so many options that it would be difficult to document each bit.
485 We mostly use the default or recommended settings. */
486 const char i82557_config_cmd[22] = {
487 22, 0x08, 0, 0, 0, 0, 0x32, 0x03, 1, /* 1=Use MII 0=Use AUI */
488 0, 0x2E, 0, 0x60, 0,
489 0xf2, 0x48, 0, 0x40, 0xf2, 0x80, /* 0x40=Force full-duplex */
490 0x3f, 0x05, };
491 const char i82558_config_cmd[22] = {
492 22, 0x08, 0, 1, 0, 0, 0x22, 0x03, 1, /* 1=Use MII 0=Use AUI */
493 0, 0x2E, 0, 0x60, 0x08, 0x88,
494 0x68, 0, 0x40, 0xf2, 0xBD, /* 0xBD->0xFD=Force full-duplex */
495 0x31, 0x05, };
497 /* PHY media interface chips. */
498 static const char *phys[] = {
499 "None", "i82553-A/B", "i82553-C", "i82503",
500 "DP83840", "80c240", "80c24", "i82555",
501 "unknown-8", "unknown-9", "DP83840A", "unknown-11",
502 "unknown-12", "unknown-13", "unknown-14", "unknown-15", };
503 enum phy_chips { NonSuchPhy=0, I82553AB, I82553C, I82503, DP83840, S80C240,
504 S80C24, I82555, DP83840A=10, };
505 static const char is_mii[] = { 0, 1, 1, 0, 1, 1, 0, 1 };
506 #define EE_READ_CMD (6)
508 static int do_eeprom_cmd(long ioaddr, int cmd, int cmd_len);
509 static int mdio_read(long ioaddr, int phy_id, int location);
510 static int mdio_write(long ioaddr, int phy_id, int location, int value);
511 static int speedo_open(struct net_device *dev);
512 static void speedo_resume(struct net_device *dev);
513 static void speedo_timer(unsigned long data);
514 static void speedo_init_rx_ring(struct net_device *dev);
515 static void speedo_tx_timeout(struct net_device *dev);
516 static int speedo_start_xmit(struct sk_buff *skb, struct net_device *dev);
517 static int speedo_rx(struct net_device *dev);
518 static void speedo_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
519 static int speedo_close(struct net_device *dev);
520 static struct enet_statistics *speedo_get_stats(struct net_device *dev);
521 static int speedo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
522 static void set_rx_mode(struct net_device *dev);
526 #ifdef honor_default_port
527 /* Optional driver feature to allow forcing the transceiver setting.
528 Not recommended. */
529 static int mii_ctrl[8] = { 0x3300, 0x3100, 0x0000, 0x0100,
530 0x2000, 0x2100, 0x0400, 0x3100};
531 #endif
533 /* A list of all installed Speedo devices, for removing the driver module. */
534 static struct net_device *root_speedo_dev = NULL;
536 #if ! defined(HAS_PCI_NETIF)
537 int eepro100_init(struct net_device *dev)
539 int cards_found = 0;
540 static int pci_index = 0;
542 if (! pcibios_present())
543 return cards_found;
545 for (; pci_index < 8; pci_index++) {
546 unsigned char pci_bus, pci_device_fn, pci_latency;
547 u32 pciaddr;
548 long ioaddr;
549 int irq;
551 u16 pci_command, new_command;
553 if (pcibios_find_device(PCI_VENDOR_ID_INTEL,
554 PCI_DEVICE_ID_INTEL_82557,
555 pci_index, &pci_bus,
556 &pci_device_fn))
557 break;
558 #if LINUX_VERSION_CODE >= 0x20155 || PCI_SUPPORT_1
560 struct pci_dev *pdev = pci_find_slot(pci_bus, pci_device_fn);
561 #ifdef USE_IO
562 pciaddr = pdev->resource[1].start;
563 #else
564 pciaddr = pdev->resource[0].start;
565 #endif
566 irq = pdev->irq;
568 #else
570 u8 pci_irq_line;
571 pcibios_read_config_byte(pci_bus, pci_device_fn,
572 PCI_INTERRUPT_LINE, &pci_irq_line);
573 /* Note: BASE_ADDRESS_0 is for memory-mapping the registers. */
574 #ifdef USE_IO
575 pcibios_read_config_dword(pci_bus, pci_device_fn,
576 PCI_BASE_ADDRESS_1, &pciaddr);
577 #else
578 pcibios_read_config_dword(pci_bus, pci_device_fn,
579 PCI_BASE_ADDRESS_0, &pciaddr);
580 #endif
581 irq = pci_irq_line;
583 #endif
584 /* Remove I/O space marker in bit 0. */
585 if (pciaddr & 1) {
586 ioaddr = pciaddr & ~3UL;
587 if (check_region(ioaddr, 32))
588 continue;
589 } else if ((ioaddr = (long)ioremap(pciaddr & ~0xfUL, 0x1000)) == 0) {
590 printk(KERN_INFO "Failed to map PCI address %#x.\n",
591 pciaddr);
592 continue;
594 if (speedo_debug > 2)
595 printk("Found Intel i82557 PCI Speedo at I/O %#lx, IRQ %d.\n",
596 ioaddr, irq);
598 /* Get and check the bus-master and latency values. */
599 pcibios_read_config_word(pci_bus, pci_device_fn,
600 PCI_COMMAND, &pci_command);
601 new_command = pci_command | PCI_COMMAND_MASTER|PCI_COMMAND_IO;
602 if (pci_command != new_command) {
603 printk(KERN_INFO " The PCI BIOS has not enabled this"
604 " device! Updating PCI command %4.4x->%4.4x.\n",
605 pci_command, new_command);
606 pcibios_write_config_word(pci_bus, pci_device_fn,
607 PCI_COMMAND, new_command);
609 pcibios_read_config_byte(pci_bus, pci_device_fn,
610 PCI_LATENCY_TIMER, &pci_latency);
611 if (pci_latency < 32) {
612 printk(" PCI latency timer (CFLT) is unreasonably low at %d."
613 " Setting to 32 clocks.\n", pci_latency);
614 pcibios_write_config_byte(pci_bus, pci_device_fn,
615 PCI_LATENCY_TIMER, 32);
616 } else if (speedo_debug > 1)
617 printk(" PCI latency timer (CFLT) is %#x.\n", pci_latency);
619 speedo_found1(pci_bus, pci_device_fn, dev, ioaddr, irq, 0,cards_found);
620 dev = NULL;
621 cards_found++;
624 return cards_found;
626 #endif
628 static struct net_device *
629 speedo_found1(int pci_bus, int pci_devfn, struct net_device *dev,
630 long ioaddr, int irq, int chip_idx, int card_idx)
632 struct speedo_private *sp;
633 const char *product;
634 int i, option;
635 u16 eeprom[0x100];
636 int acpi_idle_state = 0;
637 #ifndef MODULE
638 static int did_version = 0; /* Already printed version info. */
639 if (speedo_debug > 0 && did_version++ == 0)
640 printk(version);
641 #endif
643 dev = init_etherdev(dev, sizeof(struct speedo_private));
645 if (dev->mem_start > 0)
646 option = dev->mem_start;
647 else if (card_idx >= 0 && options[card_idx] >= 0)
648 option = options[card_idx];
649 else
650 option = 0;
652 #if defined(HAS_PCI_NETIF)
653 acpi_idle_state = acpi_set_pwr_state(pci_bus, pci_devfn, ACPI_D0);
654 #endif
656 /* Read the station address EEPROM before doing the reset.
657 Nominally his should even be done before accepting the device, but
658 then we wouldn't have a device name with which to report the error.
659 The size test is for 6 bit vs. 8 bit address serial EEPROMs.
662 u16 sum = 0;
663 int j;
664 int read_cmd, ee_size;
666 if ((do_eeprom_cmd(ioaddr, EE_READ_CMD << 24, 27) & 0xffe0000)
667 == 0xffe0000) {
668 ee_size = 0x100;
669 read_cmd = EE_READ_CMD << 24;
670 } else {
671 ee_size = 0x40;
672 read_cmd = EE_READ_CMD << 22;
675 for (j = 0, i = 0; i < ee_size; i++) {
676 u16 value = do_eeprom_cmd(ioaddr, read_cmd | (i << 16), 27);
677 eeprom[i] = value;
678 sum += value;
679 if (i < 3) {
680 dev->dev_addr[j++] = value;
681 dev->dev_addr[j++] = value >> 8;
684 if (sum != 0xBABA)
685 printk(KERN_WARNING "%s: Invalid EEPROM checksum %#4.4x, "
686 "check settings before activating this device!\n",
687 dev->name, sum);
688 /* Don't unregister_netdev(dev); as the EEPro may actually be
689 usable, especially if the MAC address is set later. */
692 /* Reset the chip: stop Tx and Rx processes and clear counters.
693 This takes less than 10usec and will easily finish before the next
694 action. */
695 outl(PortReset, ioaddr + SCBPort);
697 if (eeprom[3] & 0x0100)
698 product = "OEM i82557/i82558 10/100 Ethernet";
699 else
700 product = pci_tbl[chip_idx].name;
702 printk(KERN_INFO "%s: %s at %#3lx, ", dev->name, product, ioaddr);
704 for (i = 0; i < 5; i++)
705 printk("%2.2X:", dev->dev_addr[i]);
706 printk("%2.2X, IRQ %d.\n", dev->dev_addr[i], irq);
708 #ifndef kernel_bloat
709 /* OK, this is pure kernel bloat. I don't like it when other drivers
710 waste non-pageable kernel space to emit similar messages, but I need
711 them for bug reports. */
713 const char *connectors[] = {" RJ45", " BNC", " AUI", " MII"};
714 /* The self-test results must be paragraph aligned. */
715 s32 str[6], *volatile self_test_results;
716 int boguscnt = 16000; /* Timeout for set-test. */
717 if (eeprom[3] & 0x03)
718 printk(KERN_INFO " Receiver lock-up bug exists -- enabling"
719 " work-around.\n");
720 printk(KERN_INFO " Board assembly %4.4x%2.2x-%3.3d, Physical"
721 " connectors present:",
722 eeprom[8], eeprom[9]>>8, eeprom[9] & 0xff);
723 for (i = 0; i < 4; i++)
724 if (eeprom[5] & (1<<i))
725 printk(connectors[i]);
726 printk("\n"KERN_INFO" Primary interface chip %s PHY #%d.\n",
727 phys[(eeprom[6]>>8)&15], eeprom[6] & 0x1f);
728 if (eeprom[7] & 0x0700)
729 printk(KERN_INFO " Secondary interface chip %s.\n",
730 phys[(eeprom[7]>>8)&7]);
731 if (((eeprom[6]>>8) & 0x3f) == DP83840
732 || ((eeprom[6]>>8) & 0x3f) == DP83840A) {
733 int mdi_reg23 = mdio_read(ioaddr, eeprom[6] & 0x1f, 23) | 0x0422;
734 if (congenb)
735 mdi_reg23 |= 0x0100;
736 printk(KERN_INFO" DP83840 specific setup, setting register 23 to %4.4x.\n",
737 mdi_reg23);
738 mdio_write(ioaddr, eeprom[6] & 0x1f, 23, mdi_reg23);
740 if ((option >= 0) && (option & 0x70)) {
741 printk(KERN_INFO " Forcing %dMbs %s-duplex operation.\n",
742 (option & 0x20 ? 100 : 10),
743 (option & 0x10 ? "full" : "half"));
744 mdio_write(ioaddr, eeprom[6] & 0x1f, 0,
745 ((option & 0x20) ? 0x2000 : 0) | /* 100mbps? */
746 ((option & 0x10) ? 0x0100 : 0)); /* Full duplex? */
749 /* Perform a system self-test. */
750 self_test_results = (s32*) ((((long) str) + 15) & ~0xf);
751 self_test_results[0] = 0;
752 self_test_results[1] = -1;
753 outl(virt_to_bus(self_test_results) | PortSelfTest, ioaddr + SCBPort);
754 do {
755 udelay(10);
756 } while (self_test_results[1] == -1 && --boguscnt >= 0);
758 if (boguscnt < 0) { /* Test optimized out. */
759 printk(KERN_ERR "Self test failed, status %8.8x:\n"
760 KERN_ERR " Failure to initialize the i82557.\n"
761 KERN_ERR " Verify that the card is a bus-master"
762 " capable slot.\n",
763 self_test_results[1]);
764 } else
765 printk(KERN_INFO " General self-test: %s.\n"
766 KERN_INFO " Serial sub-system self-test: %s.\n"
767 KERN_INFO " Internal registers self-test: %s.\n"
768 KERN_INFO " ROM checksum self-test: %s (%#8.8x).\n",
769 self_test_results[1] & 0x1000 ? "failed" : "passed",
770 self_test_results[1] & 0x0020 ? "failed" : "passed",
771 self_test_results[1] & 0x0008 ? "failed" : "passed",
772 self_test_results[1] & 0x0004 ? "failed" : "passed",
773 self_test_results[0]);
775 #endif /* kernel_bloat */
777 outl(PortReset, ioaddr + SCBPort);
778 #if defined(HAS_PCI_NETIF)
779 /* Return the chip to its original power state. */
780 acpi_set_pwr_state(pci_bus, pci_devfn, acpi_idle_state);
781 #endif
783 /* We do a request_region() only to register /proc/ioports info. */
784 request_region(ioaddr, SPEEDO3_TOTAL_SIZE, "Intel Speedo3 Ethernet");
786 dev->base_addr = ioaddr;
787 dev->irq = irq;
789 sp = dev->priv;
790 if (dev->priv == NULL) {
791 void *mem = kmalloc(sizeof(*sp), GFP_KERNEL);
792 dev->priv = sp = mem; /* Cache align here if kmalloc does not. */
793 sp->priv_addr = mem;
795 memset(sp, 0, sizeof(*sp));
796 sp->next_module = root_speedo_dev;
797 root_speedo_dev = dev;
799 sp->pci_bus = pci_bus;
800 sp->pci_devfn = pci_devfn;
801 sp->chip_id = chip_idx;
802 sp->acpi_pwr = acpi_idle_state;
804 sp->full_duplex = option >= 0 && (option & 0x10) ? 1 : 0;
805 if (card_idx >= 0) {
806 if (full_duplex[card_idx] >= 0)
807 sp->full_duplex = full_duplex[card_idx];
809 sp->default_port = option >= 0 ? (option & 0x0f) : 0;
811 sp->phy[0] = eeprom[6];
812 sp->phy[1] = eeprom[7];
813 sp->rx_bug = (eeprom[3] & 0x03) == 3 ? 0 : 1;
815 if (sp->rx_bug)
816 printk(KERN_INFO " Receiver lock-up workaround activated.\n");
818 /* The Speedo-specific entries in the device structure. */
819 dev->open = &speedo_open;
820 dev->hard_start_xmit = &speedo_start_xmit;
821 dev->stop = &speedo_close;
822 dev->get_stats = &speedo_get_stats;
823 dev->set_multicast_list = &set_rx_mode;
824 dev->do_ioctl = &speedo_ioctl;
826 return dev;
829 /* Serial EEPROM section.
830 A "bit" grungy, but we work our way through bit-by-bit :->. */
831 /* EEPROM_Ctrl bits. */
832 #define EE_SHIFT_CLK 0x01 /* EEPROM shift clock. */
833 #define EE_CS 0x02 /* EEPROM chip select. */
834 #define EE_DATA_WRITE 0x04 /* EEPROM chip data in. */
835 #define EE_DATA_READ 0x08 /* EEPROM chip data out. */
836 #define EE_ENB (0x4800 | EE_CS)
837 #define EE_WRITE_0 0x4802
838 #define EE_WRITE_1 0x4806
839 #define EE_OFFSET SCBeeprom
841 /* Delay between EEPROM clock transitions.
842 The code works with no delay on 33Mhz PCI. */
843 #define eeprom_delay() inw(ee_addr)
845 static int do_eeprom_cmd(long ioaddr, int cmd, int cmd_len)
847 unsigned retval = 0;
848 long ee_addr = ioaddr + SCBeeprom;
850 outw(EE_ENB | EE_SHIFT_CLK, ee_addr);
852 /* Shift the command bits out. */
853 do {
854 short dataval = (cmd & (1 << cmd_len)) ? EE_WRITE_1 : EE_WRITE_0;
855 outw(dataval, ee_addr);
856 eeprom_delay();
857 outw(dataval | EE_SHIFT_CLK, ee_addr);
858 eeprom_delay();
859 retval = (retval << 1) | ((inw(ee_addr) & EE_DATA_READ) ? 1 : 0);
860 } while (--cmd_len >= 0);
861 outw(EE_ENB, ee_addr);
863 /* Terminate the EEPROM access. */
864 outw(EE_ENB & ~EE_CS, ee_addr);
865 return retval;
868 static int mdio_read(long ioaddr, int phy_id, int location)
870 int val, boguscnt = 64*10; /* <64 usec. to complete, typ 27 ticks */
871 outl(0x08000000 | (location<<16) | (phy_id<<21), ioaddr + SCBCtrlMDI);
872 do {
873 val = inl(ioaddr + SCBCtrlMDI);
874 if (--boguscnt < 0) {
875 printk(KERN_ERR " mdio_read() timed out with val = %8.8x.\n", val);
876 break;
878 } while (! (val & 0x10000000));
879 return val & 0xffff;
882 static int mdio_write(long ioaddr, int phy_id, int location, int value)
884 int val, boguscnt = 64*10; /* <64 usec. to complete, typ 27 ticks */
885 outl(0x04000000 | (location<<16) | (phy_id<<21) | value,
886 ioaddr + SCBCtrlMDI);
887 do {
888 val = inl(ioaddr + SCBCtrlMDI);
889 if (--boguscnt < 0) {
890 printk(KERN_ERR" mdio_write() timed out with val = %8.8x.\n", val);
891 break;
893 } while (! (val & 0x10000000));
894 return val & 0xffff;
898 static int
899 speedo_open(struct net_device *dev)
901 struct speedo_private *sp = (struct speedo_private *)dev->priv;
902 long ioaddr = dev->base_addr;
904 #if defined(HAS_PCI_NETIF)
905 acpi_set_pwr_state(sp->pci_bus, sp->pci_devfn, ACPI_D0);
906 #endif
908 if (speedo_debug > 1)
909 printk(KERN_DEBUG "%s: speedo_open() irq %d.\n", dev->name, dev->irq);
911 /* Set up the Tx queue early.. */
912 sp->cur_tx = 0;
913 sp->dirty_tx = 0;
914 sp->last_cmd = 0;
915 sp->tx_full = 0;
916 sp->lock = (spinlock_t) SPIN_LOCK_UNLOCKED;
917 sp->in_interrupt = 0;
919 /* .. we can safely take handler calls during init. */
920 if (request_irq(dev->irq, &speedo_interrupt, SA_SHIRQ, dev->name, dev)) {
921 return -EAGAIN;
923 MOD_INC_USE_COUNT;
925 dev->if_port = sp->default_port;
926 #if 0
927 /* With some transceivers we must retrigger negotiation to reset
928 power-up errors. */
929 if ((sp->phy[0] & 0x8000) == 0) {
930 int phy_addr = sp->phy[0] & 0x1f ;
931 /* Use 0x3300 for restarting NWay, other values to force xcvr:
932 0x0000 10-HD
933 0x0100 10-FD
934 0x2000 100-HD
935 0x2100 100-FD
937 #ifdef honor_default_port
938 mdio_write(ioaddr, phy_addr, 0, mii_ctrl[dev->default_port & 7]);
939 #else
940 mdio_write(ioaddr, phy_addr, 0, 0x3300);
941 #endif
943 #endif
945 speedo_init_rx_ring(dev);
947 /* Fire up the hardware. */
948 speedo_resume(dev);
950 dev->tbusy = 0;
951 dev->interrupt = 0;
952 dev->start = 1;
954 /* Setup the chip and configure the multicast list. */
955 sp->mc_setup_frm = NULL;
956 sp->mc_setup_frm_len = 0;
957 sp->mc_setup_busy = 0;
958 sp->rx_mode = -1; /* Invalid -> always reset the mode. */
959 sp->flow_ctrl = sp->partner = 0;
960 set_rx_mode(dev);
961 if ((sp->phy[0] & 0x8000) == 0)
962 sp->advertising = mdio_read(ioaddr, sp->phy[0] & 0x1f, 4);
964 if (speedo_debug > 2) {
965 printk(KERN_DEBUG "%s: Done speedo_open(), status %8.8x.\n",
966 dev->name, inw(ioaddr + SCBStatus));
969 /* Set the timer. The timer serves a dual purpose:
970 1) to monitor the media interface (e.g. link beat) and perhaps switch
971 to an alternate media type
972 2) to monitor Rx activity, and restart the Rx process if the receiver
973 hangs. */
974 init_timer(&sp->timer);
975 sp->timer.expires = RUN_AT((24*HZ)/10); /* 2.4 sec. */
976 sp->timer.data = (unsigned long)dev;
977 sp->timer.function = &speedo_timer; /* timer handler */
978 add_timer(&sp->timer);
980 /* No need to wait for the command unit to accept here. */
981 if ((sp->phy[0] & 0x8000) == 0)
982 mdio_read(ioaddr, sp->phy[0] & 0x1f, 0);
983 return 0;
986 /* Start the chip hardware after a full reset. */
987 static void speedo_resume(struct net_device *dev)
989 struct speedo_private *sp = (struct speedo_private *)dev->priv;
990 long ioaddr = dev->base_addr;
992 outw(SCBMaskAll, ioaddr + SCBCmd);
994 /* Start with a Tx threshold of 256 (0x..20.... 8 byte units). */
995 sp->tx_threshold = 0x01208000;
997 /* Set the segment registers to '0'. */
998 wait_for_cmd_done(ioaddr + SCBCmd);
999 outl(0, ioaddr + SCBPointer);
1000 outb(RxAddrLoad, ioaddr + SCBCmd);
1001 wait_for_cmd_done(ioaddr + SCBCmd);
1002 outb(CUCmdBase, ioaddr + SCBCmd);
1003 wait_for_cmd_done(ioaddr + SCBCmd);
1005 /* Load the statistics block and rx ring addresses. */
1006 outl(virt_to_bus(&sp->lstats), ioaddr + SCBPointer);
1007 outb(CUStatsAddr, ioaddr + SCBCmd);
1008 sp->lstats.done_marker = 0;
1009 wait_for_cmd_done(ioaddr + SCBCmd);
1011 outl(virt_to_bus(sp->rx_ringp[sp->cur_rx % RX_RING_SIZE]),
1012 ioaddr + SCBPointer);
1013 outb(RxStart, ioaddr + SCBCmd);
1014 wait_for_cmd_done(ioaddr + SCBCmd);
1016 outb(CUDumpStats, ioaddr + SCBCmd);
1018 /* Fill the first command with our physical address. */
1020 int entry = sp->cur_tx++ % TX_RING_SIZE;
1021 struct descriptor *cur_cmd = (struct descriptor *)&sp->tx_ring[entry];
1023 /* Avoid a bug(?!) here by marking the command already completed. */
1024 cur_cmd->cmd_status = cpu_to_le32((CmdSuspend | CmdIASetup) | 0xa000);
1025 cur_cmd->link =
1026 virt_to_le32bus(&sp->tx_ring[sp->cur_tx % TX_RING_SIZE]);
1027 memcpy(cur_cmd->params, dev->dev_addr, 6);
1028 if (sp->last_cmd)
1029 clear_suspend(sp->last_cmd);
1030 sp->last_cmd = cur_cmd;
1033 /* Start the chip's Tx process and unmask interrupts. */
1034 wait_for_cmd_done(ioaddr + SCBCmd);
1035 outl(virt_to_bus(&sp->tx_ring[sp->dirty_tx % TX_RING_SIZE]),
1036 ioaddr + SCBPointer);
1037 outw(CUStart, ioaddr + SCBCmd);
1040 /* Media monitoring and control. */
1041 static void speedo_timer(unsigned long data)
1043 struct net_device *dev = (struct net_device *)data;
1044 struct speedo_private *sp = (struct speedo_private *)dev->priv;
1045 long ioaddr = dev->base_addr;
1046 int phy_num = sp->phy[0] & 0x1f;
1048 /* We have MII and lost link beat. */
1049 if ((sp->phy[0] & 0x8000) == 0) {
1050 int partner = mdio_read(ioaddr, phy_num, 5);
1051 if (partner != sp->partner) {
1052 int flow_ctrl = sp->advertising & partner & 0x0400 ? 1 : 0;
1053 sp->partner = partner;
1054 if (flow_ctrl != sp->flow_ctrl) {
1055 sp->flow_ctrl = flow_ctrl;
1056 sp->rx_mode = -1; /* Trigger a reload. */
1058 /* Clear sticky bit. */
1059 mdio_read(ioaddr, phy_num, 1);
1060 /* If link beat has returned... */
1061 if (mdio_read(ioaddr, phy_num, 1) & 0x0004)
1062 dev->flags |= IFF_RUNNING;
1063 else
1064 dev->flags &= ~IFF_RUNNING;
1068 if (speedo_debug > 3) {
1069 printk(KERN_DEBUG "%s: Media control tick, status %4.4x.\n",
1070 dev->name, inw(ioaddr + SCBStatus));
1072 /* This has a small false-trigger window. */
1073 if (test_bit(0, (void*)&dev->tbusy) &&
1074 (jiffies - dev->trans_start) > TX_TIMEOUT) {
1075 speedo_tx_timeout(dev);
1076 sp->last_reset = jiffies;
1078 if (sp->rx_mode < 0 ||
1079 (sp->rx_bug && jiffies - sp->last_rx_time > 2*HZ)) {
1080 /* We haven't received a packet in a Long Time. We might have been
1081 bitten by the receiver hang bug. This can be cleared by sending
1082 a set multicast list command. */
1083 set_rx_mode(dev);
1085 /* We must continue to monitor the media. */
1086 sp->timer.expires = RUN_AT(2*HZ); /* 2.0 sec. */
1087 add_timer(&sp->timer);
1090 static void speedo_show_state(struct net_device *dev)
1092 struct speedo_private *sp = (struct speedo_private *)dev->priv;
1093 long ioaddr = dev->base_addr;
1094 int phy_num = sp->phy[0] & 0x1f;
1095 int i;
1097 /* Print a few items for debugging. */
1098 if (speedo_debug > 0) {
1099 int i;
1100 printk(KERN_DEBUG "%s: Tx ring dump, Tx queue %d / %d:\n", dev->name,
1101 sp->cur_tx, sp->dirty_tx);
1102 for (i = 0; i < TX_RING_SIZE; i++)
1103 printk(KERN_DEBUG "%s: %c%c%d %8.8x.\n", dev->name,
1104 i == sp->dirty_tx % TX_RING_SIZE ? '*' : ' ',
1105 i == sp->cur_tx % TX_RING_SIZE ? '=' : ' ',
1106 i, sp->tx_ring[i].status);
1108 printk(KERN_DEBUG "%s:Printing Rx ring (next to receive into %d).\n",
1109 dev->name, sp->cur_rx);
1111 for (i = 0; i < RX_RING_SIZE; i++)
1112 printk(KERN_DEBUG " Rx ring entry %d %8.8x.\n",
1113 i, (int)sp->rx_ringp[i]->status);
1115 for (i = 0; i < 16; i++) {
1116 if (i == 6) i = 21;
1117 printk(KERN_DEBUG " PHY index %d register %d is %4.4x.\n",
1118 phy_num, i, mdio_read(ioaddr, phy_num, i));
1123 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1124 static void
1125 speedo_init_rx_ring(struct net_device *dev)
1127 struct speedo_private *sp = (struct speedo_private *)dev->priv;
1128 struct RxFD *rxf, *last_rxf = NULL;
1129 int i;
1131 sp->cur_rx = 0;
1133 for (i = 0; i < RX_RING_SIZE; i++) {
1134 struct sk_buff *skb;
1135 skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD));
1136 sp->rx_skbuff[i] = skb;
1137 if (skb == NULL)
1138 break; /* OK. Just initially short of Rx bufs. */
1139 skb->dev = dev; /* Mark as being used by this device. */
1140 rxf = (struct RxFD *)skb->tail;
1141 sp->rx_ringp[i] = rxf;
1142 skb_reserve(skb, sizeof(struct RxFD));
1143 if (last_rxf)
1144 last_rxf->link = virt_to_le32bus(rxf);
1145 last_rxf = rxf;
1146 rxf->status = cpu_to_le32(0x00000001); /* '1' is flag value only. */
1147 rxf->link = 0; /* None yet. */
1148 /* This field unused by i82557, we use it as a consistency check. */
1149 #ifdef final_version
1150 rxf->rx_buf_addr = 0xffffffff;
1151 #else
1152 rxf->rx_buf_addr = virt_to_bus(skb->tail);
1153 #endif
1154 rxf->count = cpu_to_le32(PKT_BUF_SZ << 16);
1156 sp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1157 /* Mark the last entry as end-of-list. */
1158 last_rxf->status = cpu_to_le32(0xC0000002); /* '2' is flag value only. */
1159 sp->last_rxf = last_rxf;
1162 static void speedo_tx_timeout(struct net_device *dev)
1164 struct speedo_private *sp = (struct speedo_private *)dev->priv;
1165 long ioaddr = dev->base_addr;
1166 int status = inw(ioaddr + SCBStatus);
1168 /* Trigger a stats dump to give time before the reset. */
1169 speedo_get_stats(dev);
1171 printk(KERN_WARNING "%s: Transmit timed out: status %4.4x "
1172 " %4.4x at %d/%d command %8.8x.\n",
1173 dev->name, status, inw(ioaddr + SCBCmd),
1174 sp->dirty_tx, sp->cur_tx,
1175 sp->tx_ring[sp->dirty_tx % TX_RING_SIZE].status);
1176 speedo_show_state(dev);
1177 if ((status & 0x00C0) != 0x0080
1178 && (status & 0x003C) == 0x0010) {
1179 /* Only the command unit has stopped. */
1180 printk(KERN_WARNING "%s: Trying to restart the transmitter...\n",
1181 dev->name);
1182 outl(virt_to_bus(&sp->tx_ring[sp->dirty_tx % TX_RING_SIZE]),
1183 ioaddr + SCBPointer);
1184 outw(CUStart, ioaddr + SCBCmd);
1185 } else {
1186 /* Reset the Tx and Rx units. */
1187 outl(PortReset, ioaddr + SCBPort);
1188 if (speedo_debug > 0)
1189 speedo_show_state(dev);
1190 udelay(10);
1191 speedo_resume(dev);
1193 /* Reset the MII transceiver, suggested by Fred Young @ scalable.com. */
1194 if ((sp->phy[0] & 0x8000) == 0) {
1195 int phy_addr = sp->phy[0] & 0x1f;
1196 mdio_write(ioaddr, phy_addr, 0, 0x0400);
1197 mdio_write(ioaddr, phy_addr, 1, 0x0000);
1198 mdio_write(ioaddr, phy_addr, 4, 0x0000);
1199 mdio_write(ioaddr, phy_addr, 0, 0x8000);
1200 #ifdef honor_default_port
1201 mdio_write(ioaddr, phy_addr, 0, mii_ctrl[dev->default_port & 7]);
1202 #endif
1204 sp->stats.tx_errors++;
1205 dev->trans_start = jiffies;
1206 return;
1209 static int
1210 speedo_start_xmit(struct sk_buff *skb, struct net_device *dev)
1212 struct speedo_private *sp = (struct speedo_private *)dev->priv;
1213 long ioaddr = dev->base_addr;
1214 int entry;
1216 /* Block a timer-based transmit from overlapping. This could better be
1217 done with atomic_swap(1, dev->tbusy), but set_bit() works as well.
1218 If this ever occurs the queue layer is doing something evil! */
1219 if (test_and_set_bit(0, (void*)&dev->tbusy) != 0) {
1220 int tickssofar = jiffies - dev->trans_start;
1221 if (tickssofar < TX_TIMEOUT - 2)
1222 return 1;
1223 if (tickssofar < TX_TIMEOUT) {
1224 /* Reap sent packets from the full Tx queue. */
1225 outw(SCBTriggerIntr, ioaddr + SCBCmd);
1226 return 1;
1228 speedo_tx_timeout(dev);
1229 return 1;
1232 /* Caution: the write order is important here, set the base address
1233 with the "ownership" bits last. */
1235 { /* Prevent interrupts from changing the Tx ring from underneath us. */
1236 unsigned long flags;
1238 spin_lock_irqsave(&sp->lock, flags);
1239 /* Calculate the Tx descriptor entry. */
1240 entry = sp->cur_tx++ % TX_RING_SIZE;
1242 sp->tx_skbuff[entry] = skb;
1243 /* Todo: be a little more clever about setting the interrupt bit. */
1244 sp->tx_ring[entry].status =
1245 cpu_to_le32(CmdSuspend | CmdTx | CmdTxFlex);
1246 sp->tx_ring[entry].link =
1247 virt_to_le32bus(&sp->tx_ring[sp->cur_tx % TX_RING_SIZE]);
1248 sp->tx_ring[entry].tx_desc_addr =
1249 virt_to_le32bus(&sp->tx_ring[entry].tx_buf_addr0);
1250 /* The data region is always in one buffer descriptor. */
1251 sp->tx_ring[entry].count = cpu_to_le32(sp->tx_threshold);
1252 sp->tx_ring[entry].tx_buf_addr0 = virt_to_le32bus(skb->data);
1253 sp->tx_ring[entry].tx_buf_size0 = cpu_to_le32(skb->len);
1254 /* Todo: perhaps leave the interrupt bit set if the Tx queue is more
1255 than half full. Argument against: we should be receiving packets
1256 and scavenging the queue. Argument for: if so, it shouldn't
1257 matter. */
1258 /* Trigger the command unit resume. */
1260 struct descriptor *last_cmd = sp->last_cmd;
1261 sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
1262 clear_suspend(last_cmd);
1264 if (sp->cur_tx - sp->dirty_tx >= TX_QUEUE_LIMIT)
1265 sp->tx_full = 1;
1266 else
1267 clear_bit(0, (void*)&dev->tbusy);
1268 spin_unlock_irqrestore(&sp->lock, flags);
1270 wait_for_cmd_done(ioaddr + SCBCmd);
1271 outw(CUResume, ioaddr + SCBCmd);
1272 dev->trans_start = jiffies;
1274 return 0;
1277 /* The interrupt handler does all of the Rx thread work and cleans up
1278 after the Tx thread. */
1279 static void speedo_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
1281 struct net_device *dev = (struct net_device *)dev_instance;
1282 struct speedo_private *sp;
1283 long ioaddr, boguscnt = max_interrupt_work;
1284 unsigned short status;
1286 #ifndef final_version
1287 if (dev == NULL) {
1288 printk(KERN_ERR "speedo_interrupt(): irq %d for unknown device.\n", irq);
1289 return;
1291 #endif
1293 ioaddr = dev->base_addr;
1294 sp = (struct speedo_private *)dev->priv;
1295 #ifndef final_version
1296 /* A lock to prevent simultaneous entry on SMP machines. */
1297 if (test_and_set_bit(0, (void*)&sp->in_interrupt)) {
1298 printk(KERN_ERR"%s: SMP simultaneous entry of an interrupt handler.\n",
1299 dev->name);
1300 sp->in_interrupt = 0; /* Avoid halting machine. */
1301 return;
1303 dev->interrupt = 1;
1304 #endif
1306 do {
1307 status = inw(ioaddr + SCBStatus);
1308 /* Acknowledge all of the current interrupt sources ASAP. */
1309 outw(status & 0xfc00, ioaddr + SCBStatus);
1311 if (speedo_debug > 4)
1312 printk(KERN_DEBUG "%s: interrupt status=%#4.4x.\n",
1313 dev->name, status);
1315 if ((status & 0xfc00) == 0)
1316 break;
1318 if (status & 0x4000) /* Packet received. */
1319 speedo_rx(dev);
1321 if (status & 0x1000) {
1322 if ((status & 0x003c) == 0x0028) /* No more Rx buffers. */
1323 outw(RxResumeNoResources, ioaddr + SCBCmd);
1324 else if ((status & 0x003c) == 0x0008) { /* No resources (why?!) */
1325 /* No idea of what went wrong. Restart the receiver. */
1326 outl(virt_to_bus(sp->rx_ringp[sp->cur_rx % RX_RING_SIZE]),
1327 ioaddr + SCBPointer);
1328 outw(RxStart, ioaddr + SCBCmd);
1330 sp->stats.rx_errors++;
1333 /* User interrupt, Command/Tx unit interrupt or CU not active. */
1334 if (status & 0xA400) {
1335 unsigned int dirty_tx;
1336 spin_lock(&sp->lock);
1338 dirty_tx = sp->dirty_tx;
1339 while (sp->cur_tx - dirty_tx > 0) {
1340 int entry = dirty_tx % TX_RING_SIZE;
1341 int status = le32_to_cpu(sp->tx_ring[entry].status);
1343 if (speedo_debug > 5)
1344 printk(KERN_DEBUG " scavenge candidate %d status %4.4x.\n",
1345 entry, status);
1346 if ((status & StatusComplete) == 0)
1347 break; /* It still hasn't been processed. */
1348 if (status & TxUnderrun)
1349 if (sp->tx_threshold < 0x01e08000)
1350 sp->tx_threshold += 0x00040000;
1351 /* Free the original skb. */
1352 if (sp->tx_skbuff[entry]) {
1353 sp->stats.tx_packets++; /* Count only user packets. */
1354 #if LINUX_VERSION_CODE > 0x20127
1355 sp->stats.tx_bytes += sp->tx_skbuff[entry]->len;
1356 #endif
1357 dev_free_skb(sp->tx_skbuff[entry]);
1358 sp->tx_skbuff[entry] = 0;
1359 } else if ((status & 0x70000) == CmdNOp)
1360 sp->mc_setup_busy = 0;
1361 dirty_tx++;
1364 #ifndef final_version
1365 if (sp->cur_tx - dirty_tx > TX_RING_SIZE) {
1366 printk(KERN_ERR "out-of-sync dirty pointer, %d vs. %d,"
1367 " full=%d.\n",
1368 dirty_tx, sp->cur_tx, sp->tx_full);
1369 dirty_tx += TX_RING_SIZE;
1371 #endif
1373 sp->dirty_tx = dirty_tx;
1374 if (sp->tx_full
1375 && sp->cur_tx - dirty_tx < TX_QUEUE_LIMIT - 1) {
1376 /* The ring is no longer full, clear tbusy. */
1377 sp->tx_full = 0;
1378 clear_bit(0, (void*)&dev->tbusy);
1379 spin_unlock(&sp->lock);
1380 netif_wake_queue(dev);
1381 } else
1382 spin_unlock(&sp->lock);
1385 if (--boguscnt < 0) {
1386 printk(KERN_ERR "%s: Too much work at interrupt, status=0x%4.4x.\n",
1387 dev->name, status);
1388 /* Clear all interrupt sources. */
1389 outl(0xfc00, ioaddr + SCBStatus);
1390 break;
1392 } while (1);
1394 if (speedo_debug > 3)
1395 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1396 dev->name, inw(ioaddr + SCBStatus));
1398 dev->interrupt = 0;
1399 clear_bit(0, (void*)&sp->in_interrupt);
1400 return;
1403 static int
1404 speedo_rx(struct net_device *dev)
1406 struct speedo_private *sp = (struct speedo_private *)dev->priv;
1407 int entry = sp->cur_rx % RX_RING_SIZE;
1408 int status;
1409 int rx_work_limit = sp->dirty_rx + RX_RING_SIZE - sp->cur_rx;
1411 if (speedo_debug > 4)
1412 printk(KERN_DEBUG " In speedo_rx().\n");
1413 /* If we own the next entry, it's a new packet. Send it up. */
1414 while (sp->rx_ringp[entry] != NULL &&
1415 (status = le32_to_cpu(sp->rx_ringp[entry]->status)) & RxComplete) {
1416 int pkt_len = le32_to_cpu(sp->rx_ringp[entry]->count) & 0x3fff;
1418 if (--rx_work_limit < 0)
1419 break;
1420 if (speedo_debug > 4)
1421 printk(KERN_DEBUG " speedo_rx() status %8.8x len %d.\n", status,
1422 pkt_len);
1423 if ((status & (RxErrTooBig|RxOK|0x0f90)) != RxOK) {
1424 if (status & RxErrTooBig)
1425 printk(KERN_ERR "%s: Ethernet frame overran the Rx buffer, "
1426 "status %8.8x!\n", dev->name, status);
1427 else if ( ! (status & RxOK)) {
1428 /* There was a fatal error. This *should* be impossible. */
1429 sp->stats.rx_errors++;
1430 printk(KERN_ERR "%s: Anomalous event in speedo_rx(), "
1431 "status %8.8x.\n", dev->name, status);
1433 } else {
1434 struct sk_buff *skb;
1436 /* Check if the packet is long enough to just accept without
1437 copying to a properly sized skbuff. */
1438 if (pkt_len < rx_copybreak
1439 && (skb = dev_alloc_skb(pkt_len + 2)) != 0) {
1440 skb->dev = dev;
1441 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1442 /* 'skb_put()' points to the start of sk_buff data area. */
1443 #if 1 || USE_IP_CSUM
1444 /* Packet is in one chunk -- we can copy + cksum. */
1445 eth_copy_and_sum(skb, sp->rx_skbuff[entry]->tail, pkt_len, 0);
1446 skb_put(skb, pkt_len);
1447 #else
1448 memcpy(skb_put(skb, pkt_len), sp->rx_skbuff[entry]->tail,
1449 pkt_len);
1450 #endif
1451 } else {
1452 void *temp;
1453 /* Pass up the already-filled skbuff. */
1454 skb = sp->rx_skbuff[entry];
1455 if (skb == NULL) {
1456 printk(KERN_ERR "%s: Inconsistent Rx descriptor chain.\n",
1457 dev->name);
1458 break;
1460 sp->rx_skbuff[entry] = NULL;
1461 temp = skb_put(skb, pkt_len);
1462 #if !defined(final_version) && !defined(__powerpc__)
1463 if (bus_to_virt(sp->rx_ringp[entry]->rx_buf_addr) != temp)
1464 printk(KERN_ERR "%s: Rx consistency error -- the skbuff "
1465 "addresses do not match in speedo_rx: %p vs. %p "
1466 "/ %p.\n", dev->name,
1467 bus_to_virt(sp->rx_ringp[entry]->rx_buf_addr),
1468 skb->head, temp);
1469 #endif
1470 sp->rx_ringp[entry] = NULL;
1472 skb->protocol = eth_type_trans(skb, dev);
1473 netif_rx(skb);
1474 sp->stats.rx_packets++;
1475 #if LINUX_VERSION_CODE > 0x20127
1476 sp->stats.rx_bytes += pkt_len;
1477 #endif
1479 entry = (++sp->cur_rx) % RX_RING_SIZE;
1482 /* Refill the Rx ring buffers. */
1483 for (; sp->cur_rx - sp->dirty_rx > 0; sp->dirty_rx++) {
1484 struct RxFD *rxf;
1485 entry = sp->dirty_rx % RX_RING_SIZE;
1486 if (sp->rx_skbuff[entry] == NULL) {
1487 struct sk_buff *skb;
1488 /* Get a fresh skbuff to replace the consumed one. */
1489 skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD));
1490 sp->rx_skbuff[entry] = skb;
1491 if (skb == NULL) {
1492 sp->rx_ringp[entry] = NULL;
1493 break; /* Better luck next time! */
1495 rxf = sp->rx_ringp[entry] = (struct RxFD *)skb->tail;
1496 skb->dev = dev;
1497 skb_reserve(skb, sizeof(struct RxFD));
1498 rxf->rx_buf_addr = virt_to_le32bus(skb->tail);
1499 } else {
1500 rxf = sp->rx_ringp[entry];
1502 rxf->status = cpu_to_le32(0xC0000001); /* '1' for driver use only. */
1503 rxf->link = 0; /* None yet. */
1504 rxf->count = cpu_to_le32(PKT_BUF_SZ << 16);
1505 sp->last_rxf->link = virt_to_le32bus(rxf);
1506 sp->last_rxf->status &= cpu_to_le32(~0xC0000000);
1507 sp->last_rxf = rxf;
1510 sp->last_rx_time = jiffies;
1511 return 0;
1514 static int
1515 speedo_close(struct net_device *dev)
1517 long ioaddr = dev->base_addr;
1518 struct speedo_private *sp = (struct speedo_private *)dev->priv;
1519 int i;
1521 dev->start = 0;
1522 dev->tbusy = 1;
1524 if (speedo_debug > 1)
1525 printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x.\n",
1526 dev->name, inw(ioaddr + SCBStatus));
1528 /* Shut off the media monitoring timer. */
1529 del_timer(&sp->timer);
1531 /* Disable interrupts, and stop the chip's Rx process. */
1532 outw(SCBMaskAll, ioaddr + SCBCmd);
1533 outw(SCBMaskAll | RxAbort, ioaddr + SCBCmd);
1535 free_irq(dev->irq, dev);
1537 /* Free all the skbuffs in the Rx and Tx queues. */
1538 for (i = 0; i < RX_RING_SIZE; i++) {
1539 struct sk_buff *skb = sp->rx_skbuff[i];
1540 sp->rx_skbuff[i] = 0;
1541 /* Clear the Rx descriptors. */
1542 if (skb) {
1543 #if LINUX_VERSION_CODE < 0x20100
1544 skb->free = 1;
1545 #endif
1546 dev_free_skb(skb);
1550 for (i = 0; i < TX_RING_SIZE; i++) {
1551 struct sk_buff *skb = sp->tx_skbuff[i];
1552 sp->tx_skbuff[i] = 0;
1553 /* Clear the Tx descriptors. */
1554 if (skb)
1555 dev_free_skb(skb);
1557 if (sp->mc_setup_frm) {
1558 kfree(sp->mc_setup_frm);
1559 sp->mc_setup_frm_len = 0;
1562 /* Print a few items for debugging. */
1563 if (speedo_debug > 3)
1564 speedo_show_state(dev);
1566 #if defined(HAS_PCI_NETIF)
1567 /* Alt: acpi_set_pwr_state(pci_bus, pci_devfn, sp->acpi_pwr); */
1568 acpi_set_pwr_state(sp->pci_bus, sp->pci_devfn, ACPI_D2);
1569 #endif
1570 MOD_DEC_USE_COUNT;
1572 return 0;
1575 /* The Speedo-3 has an especially awkward and unusable method of getting
1576 statistics out of the chip. It takes an unpredictable length of time
1577 for the dump-stats command to complete. To avoid a busy-wait loop we
1578 update the stats with the previous dump results, and then trigger a
1579 new dump.
1581 These problems are mitigated by the current /proc implementation, which
1582 calls this routine first to judge the output length, and then to emit the
1583 output.
1585 Oh, and incoming frames are dropped while executing dump-stats!
1587 static struct enet_statistics *
1588 speedo_get_stats(struct net_device *dev)
1590 struct speedo_private *sp = (struct speedo_private *)dev->priv;
1591 long ioaddr = dev->base_addr;
1593 /* Update only if the previous dump finished. */
1594 if (sp->lstats.done_marker == le32_to_cpu(0xA007)) {
1595 sp->stats.tx_aborted_errors += le32_to_cpu(sp->lstats.tx_coll16_errs);
1596 sp->stats.tx_window_errors += le32_to_cpu(sp->lstats.tx_late_colls);
1597 sp->stats.tx_fifo_errors += le32_to_cpu(sp->lstats.tx_underruns);
1598 sp->stats.tx_fifo_errors += le32_to_cpu(sp->lstats.tx_lost_carrier);
1599 /*sp->stats.tx_deferred += le32_to_cpu(sp->lstats.tx_deferred);*/
1600 sp->stats.collisions += le32_to_cpu(sp->lstats.tx_total_colls);
1601 sp->stats.rx_crc_errors += le32_to_cpu(sp->lstats.rx_crc_errs);
1602 sp->stats.rx_frame_errors += le32_to_cpu(sp->lstats.rx_align_errs);
1603 sp->stats.rx_over_errors += le32_to_cpu(sp->lstats.rx_resource_errs);
1604 sp->stats.rx_fifo_errors += le32_to_cpu(sp->lstats.rx_overrun_errs);
1605 sp->stats.rx_length_errors += le32_to_cpu(sp->lstats.rx_runt_errs);
1606 sp->lstats.done_marker = 0x0000;
1607 if (dev->start) {
1608 wait_for_cmd_done(ioaddr + SCBCmd);
1609 outw(CUDumpStats, ioaddr + SCBCmd);
1612 return &sp->stats;
1615 static int speedo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1617 struct speedo_private *sp = (struct speedo_private *)dev->priv;
1618 long ioaddr = dev->base_addr;
1619 u16 *data = (u16 *)&rq->ifr_data;
1620 int phy = sp->phy[0] & 0x1f;
1621 #if defined(HAS_PCI_NETIF)
1622 int saved_acpi;
1623 #endif
1625 switch(cmd) {
1626 case SIOCDEVPRIVATE: /* Get the address of the PHY in use. */
1627 data[0] = phy;
1628 case SIOCDEVPRIVATE+1: /* Read the specified MII register. */
1629 #if defined(HAS_PCI_NETIF)
1630 saved_acpi = acpi_set_pwr_state(sp->pci_bus, sp->pci_devfn, ACPI_D0);
1631 data[3] = mdio_read(ioaddr, data[0], data[1]);
1632 acpi_set_pwr_state(sp->pci_bus, sp->pci_devfn, saved_acpi);
1633 #else
1634 data[3] = mdio_read(ioaddr, data[0], data[1]);
1635 #endif
1636 return 0;
1637 case SIOCDEVPRIVATE+2: /* Write the specified MII register */
1638 if (!capable(CAP_NET_ADMIN))
1639 return -EPERM;
1640 #if defined(HAS_PCI_NETIF)
1641 saved_acpi = acpi_set_pwr_state(sp->pci_bus, sp->pci_devfn, ACPI_D0);
1642 mdio_write(ioaddr, data[0], data[1], data[2]);
1643 acpi_set_pwr_state(sp->pci_bus, sp->pci_devfn, saved_acpi);
1644 #else
1645 mdio_write(ioaddr, data[0], data[1], data[2]);
1646 #endif
1647 return 0;
1648 default:
1649 return -EOPNOTSUPP;
1653 /* Set or clear the multicast filter for this adaptor.
1654 This is very ugly with Intel chips -- we usually have to execute an
1655 entire configuration command, plus process a multicast command.
1656 This is complicated. We must put a large configuration command and
1657 an arbitrarily-sized multicast command in the transmit list.
1658 To minimize the disruption -- the previous command might have already
1659 loaded the link -- we convert the current command block, normally a Tx
1660 command, into a no-op and link it to the new command.
1662 static void set_rx_mode(struct net_device *dev)
1664 struct speedo_private *sp = (struct speedo_private *)dev->priv;
1665 long ioaddr = dev->base_addr;
1666 struct descriptor *last_cmd;
1667 char new_rx_mode;
1668 unsigned long flags;
1669 int entry, i;
1671 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1672 new_rx_mode = 3;
1673 } else if ((dev->flags & IFF_ALLMULTI) ||
1674 dev->mc_count > multicast_filter_limit) {
1675 new_rx_mode = 1;
1676 } else
1677 new_rx_mode = 0;
1679 if (sp->cur_tx - sp->dirty_tx >= TX_RING_SIZE - 1) {
1680 /* The Tx ring is full -- don't add anything! Presumably the new mode
1681 is in config_cmd_data and will be added anyway. */
1682 sp->rx_mode = -1;
1683 return;
1686 if (new_rx_mode != sp->rx_mode) {
1687 u8 *config_cmd_data;
1689 spin_lock_irqsave(&sp->lock, flags);
1690 entry = sp->cur_tx++ % TX_RING_SIZE;
1691 last_cmd = sp->last_cmd;
1692 sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
1694 sp->tx_skbuff[entry] = 0; /* Redundant. */
1695 sp->tx_ring[entry].status = cpu_to_le32(CmdSuspend | CmdConfigure);
1696 sp->tx_ring[entry].link =
1697 virt_to_le32bus(&sp->tx_ring[(entry + 1) % TX_RING_SIZE]);
1698 config_cmd_data = (void *)&sp->tx_ring[entry].tx_desc_addr;
1699 /* Construct a full CmdConfig frame. */
1700 memcpy(config_cmd_data, i82558_config_cmd, sizeof(i82558_config_cmd));
1701 config_cmd_data[1] = (txfifo << 4) | rxfifo;
1702 config_cmd_data[4] = rxdmacount;
1703 config_cmd_data[5] = txdmacount + 0x80;
1704 config_cmd_data[15] |= (new_rx_mode & 2) ? 1 : 0;
1705 config_cmd_data[19] = sp->flow_ctrl ? 0xBD : 0x80;
1706 config_cmd_data[19] |= sp->full_duplex ? 0x40 : 0;
1707 config_cmd_data[21] = (new_rx_mode & 1) ? 0x0D : 0x05;
1708 if (sp->phy[0] & 0x8000) { /* Use the AUI port instead. */
1709 config_cmd_data[15] |= 0x80;
1710 config_cmd_data[8] = 0;
1712 /* Trigger the command unit resume. */
1713 wait_for_cmd_done(ioaddr + SCBCmd);
1714 clear_suspend(last_cmd);
1715 outw(CUResume, ioaddr + SCBCmd);
1716 spin_unlock_irqrestore(&sp->lock, flags);
1719 if (new_rx_mode == 0 && dev->mc_count < 4) {
1720 /* The simple case of 0-3 multicast list entries occurs often, and
1721 fits within one tx_ring[] entry. */
1722 struct dev_mc_list *mclist;
1723 u16 *setup_params, *eaddrs;
1725 spin_lock_irqsave(&sp->lock, flags);
1726 entry = sp->cur_tx++ % TX_RING_SIZE;
1727 last_cmd = sp->last_cmd;
1728 sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
1730 sp->tx_skbuff[entry] = 0;
1731 sp->tx_ring[entry].status = cpu_to_le32(CmdSuspend | CmdMulticastList);
1732 sp->tx_ring[entry].link =
1733 virt_to_le32bus(&sp->tx_ring[(entry + 1) % TX_RING_SIZE]);
1734 sp->tx_ring[entry].tx_desc_addr = 0; /* Really MC list count. */
1735 setup_params = (u16 *)&sp->tx_ring[entry].tx_desc_addr;
1736 *setup_params++ = cpu_to_le16(dev->mc_count*6);
1737 /* Fill in the multicast addresses. */
1738 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
1739 i++, mclist = mclist->next) {
1740 eaddrs = (u16 *)mclist->dmi_addr;
1741 *setup_params++ = *eaddrs++;
1742 *setup_params++ = *eaddrs++;
1743 *setup_params++ = *eaddrs++;
1746 wait_for_cmd_done(ioaddr + SCBCmd);
1747 clear_suspend(last_cmd);
1748 /* Immediately trigger the command unit resume. */
1749 outw(CUResume, ioaddr + SCBCmd);
1750 spin_unlock_irqrestore(&sp->lock, flags);
1751 } else if (new_rx_mode == 0) {
1752 struct dev_mc_list *mclist;
1753 u16 *setup_params, *eaddrs;
1754 struct descriptor *mc_setup_frm = sp->mc_setup_frm;
1755 int i;
1757 if (sp->mc_setup_frm_len < 10 + dev->mc_count*6
1758 || sp->mc_setup_frm == NULL) {
1759 /* Allocate a full setup frame, 10bytes + <max addrs>. */
1760 if (sp->mc_setup_frm)
1761 kfree(sp->mc_setup_frm);
1762 sp->mc_setup_busy = 0;
1763 sp->mc_setup_frm_len = 10 + multicast_filter_limit*6;
1764 sp->mc_setup_frm = kmalloc(sp->mc_setup_frm_len, GFP_ATOMIC);
1765 if (sp->mc_setup_frm == NULL) {
1766 printk(KERN_ERR "%s: Failed to allocate a setup frame.\n",
1767 dev->name);
1768 sp->rx_mode = -1; /* We failed, try again. */
1769 return;
1772 /* If we are busy, someone might be quickly adding to the MC list.
1773 Try again later when the list updates stop. */
1774 if (sp->mc_setup_busy) {
1775 sp->rx_mode = -1;
1776 return;
1778 mc_setup_frm = sp->mc_setup_frm;
1779 /* Fill the setup frame. */
1780 if (speedo_debug > 1)
1781 printk(KERN_DEBUG "%s: Constructing a setup frame at %p, "
1782 "%d bytes.\n",
1783 dev->name, sp->mc_setup_frm, sp->mc_setup_frm_len);
1784 mc_setup_frm->cmd_status =
1785 cpu_to_le32(CmdSuspend | CmdIntr | CmdMulticastList);
1786 /* Link set below. */
1787 setup_params = (u16 *)&mc_setup_frm->params;
1788 *setup_params++ = cpu_to_le16(dev->mc_count*6);
1789 /* Fill in the multicast addresses. */
1790 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
1791 i++, mclist = mclist->next) {
1792 eaddrs = (u16 *)mclist->dmi_addr;
1793 *setup_params++ = *eaddrs++;
1794 *setup_params++ = *eaddrs++;
1795 *setup_params++ = *eaddrs++;
1798 /* Disable interrupts while playing with the Tx Cmd list. */
1799 spin_lock_irqsave(&sp->lock, flags);
1800 entry = sp->cur_tx++ % TX_RING_SIZE;
1801 last_cmd = sp->last_cmd;
1802 sp->last_cmd = mc_setup_frm;
1803 sp->mc_setup_busy++;
1805 /* Change the command to a NoOp, pointing to the CmdMulti command. */
1806 sp->tx_skbuff[entry] = 0;
1807 sp->tx_ring[entry].status = cpu_to_le32(CmdNOp);
1808 sp->tx_ring[entry].link = virt_to_le32bus(mc_setup_frm);
1810 /* Set the link in the setup frame. */
1811 mc_setup_frm->link =
1812 virt_to_le32bus(&(sp->tx_ring[(entry+1) % TX_RING_SIZE]));
1814 wait_for_cmd_done(ioaddr + SCBCmd);
1815 clear_suspend(last_cmd);
1816 /* Immediately trigger the command unit resume. */
1817 outw(CUResume, ioaddr + SCBCmd);
1818 spin_unlock_irqrestore(&sp->lock, flags);
1819 if (speedo_debug > 5)
1820 printk(" CmdMCSetup frame length %d in entry %d.\n",
1821 dev->mc_count, entry);
1824 sp->rx_mode = new_rx_mode;
1827 #ifdef MODULE
1829 int init_module(void)
1831 int cards_found;
1833 if (debug >= 0)
1834 speedo_debug = debug;
1835 /* Always emit the version message. */
1836 if (speedo_debug)
1837 printk(KERN_INFO "%s", version);
1839 #if defined(HAS_PCI_NETIF)
1840 cards_found = netif_pci_probe(pci_tbl, NULL);
1841 if (cards_found < 0)
1842 printk(KERN_INFO "eepro100: No cards found, driver not installed.\n");
1843 return cards_found;
1844 #else
1845 cards_found = eepro100_init(NULL);
1846 if (cards_found <= 0) {
1847 printk(KERN_INFO "eepro100: No cards found, driver not installed.\n");
1848 return -ENODEV;
1850 #endif
1851 return 0;
1854 void cleanup_module(void)
1856 struct net_device *next_dev;
1858 /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
1859 while (root_speedo_dev) {
1860 struct speedo_private *sp = (void *)root_speedo_dev->priv;
1861 unregister_netdev(root_speedo_dev);
1862 #ifdef USE_IO
1863 release_region(root_speedo_dev->base_addr, SPEEDO3_TOTAL_SIZE);
1864 #else
1865 iounmap((char *)root_speedo_dev->base_addr);
1866 #endif
1867 #if defined(HAS_PCI_NETIF)
1868 acpi_set_pwr_state(sp->pci_bus, sp->pci_devfn, sp->acpi_pwr);
1869 #endif
1870 next_dev = sp->next_module;
1871 if (sp->priv_addr)
1872 kfree(sp->priv_addr);
1873 kfree(root_speedo_dev);
1874 root_speedo_dev = next_dev;
1878 #else /* not MODULE */
1880 int eepro100_probe(struct net_device *dev)
1882 int cards_found = 0;
1884 cards_found = eepro100_init(dev);
1886 /* Only emit the version if the driver is being used. */
1887 if (speedo_debug > 0 && cards_found)
1888 printk(version);
1890 return cards_found ? 0 : -ENODEV;
1892 #endif /* MODULE */
1895 * Local variables:
1896 * compile-command: "gcc -DMODULE -D__KERNEL__ -Wall -Wstrict-prototypes -O6 -c eepro100.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS` `[ -f ./pci-netif.h ] && echo -DHAS_PCI_NETIF`"
1897 * SMP-compile-command: "gcc -D__SMP__ -DMODULE -D__KERNEL__ -Wall -Wstrict-prototypes -O6 -c eepro100.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS`"
1898 * simple-compile-command: "gcc -DMODULE -D__KERNEL__ -O6 -c eepro100.c"
1899 * c-indent-level: 4
1900 * c-basic-offset: 4
1901 * tab-width: 4
1902 * End: