MOXA linux-2.6.x / linux-2.6.9-uc0 from sdlinux-moxaart.tgz
[linux-2.6.9-moxart.git] / drivers / net / eepro100.c
blob1132101ec73c0e6fa7fb0f7ee83cec899ea0c17a
1 /* drivers/net/eepro100.c: An Intel i82557-559 Ethernet driver for Linux. */
2 /*
3 Written 1996-1999 by Donald Becker.
5 The driver also contains updates by different kernel developers
6 (see incomplete list below).
7 Current maintainer is Andrey V. Savochkin <saw@saw.sw.com.sg>.
8 Please use this email address and linux-kernel mailing list for bug reports.
10 This software may be used and distributed according to the terms
11 of the GNU General Public License, incorporated herein by reference.
13 This driver is for the Intel EtherExpress Pro100 (Speedo3) design.
14 It should work with all i82557/558/559 boards.
16 Version history:
17 1998 Apr - 2000 Feb Andrey V. Savochkin <saw@saw.sw.com.sg>
18 Serious fixes for multicast filter list setting, TX timeout routine;
19 RX ring refilling logic; other stuff
20 2000 Feb Jeff Garzik <jgarzik@pobox.com>
21 Convert to new PCI driver interface
22 2000 Mar 24 Dragan Stancevic <visitor@valinux.com>
23 Disabled FC and ER, to avoid lockups when when we get FCP interrupts.
24 2000 Jul 17 Goutham Rao <goutham.rao@intel.com>
25 PCI DMA API fixes, adding pci_dma_sync_single calls where neccesary
26 2000 Aug 31 David Mosberger <davidm@hpl.hp.com>
27 rx_align support: enables rx DMA without causing unaligned accesses.
30 static const char *version =
31 "eepro100.c:v1.09j-t 9/29/99 Donald Becker http://www.scyld.com/network/eepro100.html\n"
32 "eepro100.c: $Revision: 1.36 $ 2000/11/17 Modified by Andrey V. Savochkin <saw@saw.sw.com.sg> and others\n";
34 /* A few user-configurable values that apply to all boards.
35 First set is undocumented and spelled per Intel recommendations. */
37 static int congenb /* = 0 */; /* Enable congestion control in the DP83840. */
38 static int txfifo = 8; /* Tx FIFO threshold in 4 byte units, 0-15 */
39 static int rxfifo = 8; /* Rx FIFO threshold, default 32 bytes. */
40 /* Tx/Rx DMA burst length, 0-127, 0 == no preemption, tx==128 -> disabled. */
41 static int txdmacount = 128;
42 static int rxdmacount /* = 0 */;
44 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__) || defined(__mips__) || \
45 defined(__arm__)
46 /* align rx buffers to 2 bytes so that IP header is aligned */
47 # define rx_align(skb) skb_reserve((skb), 2)
48 # define RxFD_ALIGNMENT __attribute__ ((aligned (2), packed))
49 #else
50 # define rx_align(skb)
51 # define RxFD_ALIGNMENT
52 #endif
54 /* Set the copy breakpoint for the copy-only-tiny-buffer Rx method.
55 Lower values use more memory, but are faster. */
56 static int rx_copybreak = 200;
58 /* Maximum events (Rx packets, etc.) to handle at each interrupt. */
59 static int max_interrupt_work = 20;
61 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast) */
62 static int multicast_filter_limit = 64;
64 /* 'options' is used to pass a transceiver override or full-duplex flag
65 e.g. "options=16" for FD, "options=32" for 100mbps-only. */
66 static int full_duplex[] = {-1, -1, -1, -1, -1, -1, -1, -1};
67 static int options[] = {-1, -1, -1, -1, -1, -1, -1, -1};
69 /* A few values that may be tweaked. */
70 /* The ring sizes should be a power of two for efficiency. */
71 #define TX_RING_SIZE 64
72 #define RX_RING_SIZE 64
73 /* How much slots multicast filter setup may take.
74 Do not descrease without changing set_rx_mode() implementaion. */
75 #define TX_MULTICAST_SIZE 2
76 #define TX_MULTICAST_RESERV (TX_MULTICAST_SIZE*2)
77 /* Actual number of TX packets queued, must be
78 <= TX_RING_SIZE-TX_MULTICAST_RESERV. */
79 #define TX_QUEUE_LIMIT (TX_RING_SIZE-TX_MULTICAST_RESERV)
80 /* Hysteresis marking queue as no longer full. */
81 #define TX_QUEUE_UNFULL (TX_QUEUE_LIMIT-4)
83 /* Operational parameters that usually are not changed. */
85 /* Time in jiffies before concluding the transmitter is hung. */
86 #define TX_TIMEOUT (2*HZ)
87 /* Size of an pre-allocated Rx buffer: <Ethernet MTU> + slack.*/
88 #define PKT_BUF_SZ 1536
90 #include <linux/config.h>
91 #include <linux/module.h>
93 #include <linux/kernel.h>
94 #include <linux/string.h>
95 #include <linux/errno.h>
96 #include <linux/ioport.h>
97 #include <linux/slab.h>
98 #include <linux/interrupt.h>
99 #include <linux/timer.h>
100 #include <linux/pci.h>
101 #include <linux/spinlock.h>
102 #include <linux/init.h>
103 #include <linux/mii.h>
104 #include <linux/delay.h>
106 #include <asm/bitops.h>
107 #include <asm/io.h>
108 #include <asm/uaccess.h>
109 #include <asm/irq.h>
111 #include <linux/netdevice.h>
112 #include <linux/etherdevice.h>
113 #include <linux/rtnetlink.h>
114 #include <linux/skbuff.h>
115 #include <linux/ethtool.h>
117 /* enable PIO instead of MMIO, if CONFIG_EEPRO100_PIO is selected */
118 #ifdef CONFIG_EEPRO100_PIO
119 #define USE_IO 1
120 #endif
122 static int debug = -1;
123 #define DEBUG_DEFAULT (NETIF_MSG_DRV | \
124 NETIF_MSG_HW | \
125 NETIF_MSG_RX_ERR | \
126 NETIF_MSG_TX_ERR)
127 #define DEBUG ((debug >= 0) ? (1<<debug)-1 : DEBUG_DEFAULT)
130 MODULE_AUTHOR("Maintainer: Andrey V. Savochkin <saw@saw.sw.com.sg>");
131 MODULE_DESCRIPTION("Intel i82557/i82558/i82559 PCI EtherExpressPro driver");
132 MODULE_LICENSE("GPL");
133 MODULE_PARM(debug, "i");
134 MODULE_PARM(options, "1-" __MODULE_STRING(8) "i");
135 MODULE_PARM(full_duplex, "1-" __MODULE_STRING(8) "i");
136 MODULE_PARM(congenb, "i");
137 MODULE_PARM(txfifo, "i");
138 MODULE_PARM(rxfifo, "i");
139 MODULE_PARM(txdmacount, "i");
140 MODULE_PARM(rxdmacount, "i");
141 MODULE_PARM(rx_copybreak, "i");
142 MODULE_PARM(max_interrupt_work, "i");
143 MODULE_PARM(multicast_filter_limit, "i");
144 MODULE_PARM_DESC(debug, "debug level (0-6)");
145 MODULE_PARM_DESC(options, "Bits 0-3: transceiver type, bit 4: full duplex, bit 5: 100Mbps");
146 MODULE_PARM_DESC(full_duplex, "full duplex setting(s) (1)");
147 MODULE_PARM_DESC(congenb, "Enable congestion control (1)");
148 MODULE_PARM_DESC(txfifo, "Tx FIFO threshold in 4 byte units, (0-15)");
149 MODULE_PARM_DESC(rxfifo, "Rx FIFO threshold in 4 byte units, (0-15)");
150 MODULE_PARM_DESC(txdmaccount, "Tx DMA burst length; 128 - disable (0-128)");
151 MODULE_PARM_DESC(rxdmaccount, "Rx DMA burst length; 128 - disable (0-128)");
152 MODULE_PARM_DESC(rx_copybreak, "copy breakpoint for copy-only-tiny-frames");
153 MODULE_PARM_DESC(max_interrupt_work, "maximum events handled per interrupt");
154 MODULE_PARM_DESC(multicast_filter_limit, "maximum number of filtered multicast addresses");
156 #define RUN_AT(x) (jiffies + (x))
158 /* ACPI power states don't universally work (yet) */
159 #ifndef CONFIG_PM
160 #undef pci_set_power_state
161 #define pci_set_power_state null_set_power_state
162 static inline int null_set_power_state(struct pci_dev *dev, int state)
164 return 0;
166 #endif /* CONFIG_PM */
168 #define netdevice_start(dev)
169 #define netdevice_stop(dev)
170 #define netif_set_tx_timeout(dev, tf, tm) \
171 do { \
172 (dev)->tx_timeout = (tf); \
173 (dev)->watchdog_timeo = (tm); \
174 } while(0)
179 Theory of Operation
181 I. Board Compatibility
183 This device driver is designed for the Intel i82557 "Speedo3" chip, Intel's
184 single-chip fast Ethernet controller for PCI, as used on the Intel
185 EtherExpress Pro 100 adapter.
187 II. Board-specific settings
189 PCI bus devices are configured by the system at boot time, so no jumpers
190 need to be set on the board. The system BIOS should be set to assign the
191 PCI INTA signal to an otherwise unused system IRQ line. While it's
192 possible to share PCI interrupt lines, it negatively impacts performance and
193 only recent kernels support it.
195 III. Driver operation
197 IIIA. General
198 The Speedo3 is very similar to other Intel network chips, that is to say
199 "apparently designed on a different planet". This chips retains the complex
200 Rx and Tx descriptors and multiple buffers pointers as previous chips, but
201 also has simplified Tx and Rx buffer modes. This driver uses the "flexible"
202 Tx mode, but in a simplified lower-overhead manner: it associates only a
203 single buffer descriptor with each frame descriptor.
205 Despite the extra space overhead in each receive skbuff, the driver must use
206 the simplified Rx buffer mode to assure that only a single data buffer is
207 associated with each RxFD. The driver implements this by reserving space
208 for the Rx descriptor at the head of each Rx skbuff.
210 The Speedo-3 has receive and command unit base addresses that are added to
211 almost all descriptor pointers. The driver sets these to zero, so that all
212 pointer fields are absolute addresses.
214 The System Control Block (SCB) of some previous Intel chips exists on the
215 chip in both PCI I/O and memory space. This driver uses the I/O space
216 registers, but might switch to memory mapped mode to better support non-x86
217 processors.
219 IIIB. Transmit structure
221 The driver must use the complex Tx command+descriptor mode in order to
222 have a indirect pointer to the skbuff data section. Each Tx command block
223 (TxCB) is associated with two immediately appended Tx Buffer Descriptor
224 (TxBD). A fixed ring of these TxCB+TxBD pairs are kept as part of the
225 speedo_private data structure for each adapter instance.
227 The newer i82558 explicitly supports this structure, and can read the two
228 TxBDs in the same PCI burst as the TxCB.
230 This ring structure is used for all normal transmit packets, but the
231 transmit packet descriptors aren't long enough for most non-Tx commands such
232 as CmdConfigure. This is complicated by the possibility that the chip has
233 already loaded the link address in the previous descriptor. So for these
234 commands we convert the next free descriptor on the ring to a NoOp, and point
235 that descriptor's link to the complex command.
237 An additional complexity of these non-transmit commands are that they may be
238 added asynchronous to the normal transmit queue, so we disable interrupts
239 whenever the Tx descriptor ring is manipulated.
241 A notable aspect of these special configure commands is that they do
242 work with the normal Tx ring entry scavenge method. The Tx ring scavenge
243 is done at interrupt time using the 'dirty_tx' index, and checking for the
244 command-complete bit. While the setup frames may have the NoOp command on the
245 Tx ring marked as complete, but not have completed the setup command, this
246 is not a problem. The tx_ring entry can be still safely reused, as the
247 tx_skbuff[] entry is always empty for config_cmd and mc_setup frames.
249 Commands may have bits set e.g. CmdSuspend in the command word to either
250 suspend or stop the transmit/command unit. This driver always flags the last
251 command with CmdSuspend, erases the CmdSuspend in the previous command, and
252 then issues a CU_RESUME.
253 Note: Watch out for the potential race condition here: imagine
254 erasing the previous suspend
255 the chip processes the previous command
256 the chip processes the final command, and suspends
257 doing the CU_RESUME
258 the chip processes the next-yet-valid post-final-command.
259 So blindly sending a CU_RESUME is only safe if we do it immediately after
260 after erasing the previous CmdSuspend, without the possibility of an
261 intervening delay. Thus the resume command is always within the
262 interrupts-disabled region. This is a timing dependence, but handling this
263 condition in a timing-independent way would considerably complicate the code.
265 Note: In previous generation Intel chips, restarting the command unit was a
266 notoriously slow process. This is presumably no longer true.
268 IIIC. Receive structure
270 Because of the bus-master support on the Speedo3 this driver uses the new
271 SKBUFF_RX_COPYBREAK scheme, rather than a fixed intermediate receive buffer.
272 This scheme allocates full-sized skbuffs as receive buffers. The value
273 SKBUFF_RX_COPYBREAK is used as the copying breakpoint: it is chosen to
274 trade-off the memory wasted by passing the full-sized skbuff to the queue
275 layer for all frames vs. the copying cost of copying a frame to a
276 correctly-sized skbuff.
278 For small frames the copying cost is negligible (esp. considering that we
279 are pre-loading the cache with immediately useful header information), so we
280 allocate a new, minimally-sized skbuff. For large frames the copying cost
281 is non-trivial, and the larger copy might flush the cache of useful data, so
282 we pass up the skbuff the packet was received into.
284 IV. Notes
286 Thanks to Steve Williams of Intel for arranging the non-disclosure agreement
287 that stated that I could disclose the information. But I still resent
288 having to sign an Intel NDA when I'm helping Intel sell their own product!
292 static int speedo_found1(struct pci_dev *pdev, long ioaddr, int fnd_cnt, int acpi_idle_state);
294 enum pci_flags_bit {
295 PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
296 PCI_ADDR0=0x10<<0, PCI_ADDR1=0x10<<1, PCI_ADDR2=0x10<<2, PCI_ADDR3=0x10<<3,
299 static inline unsigned int io_inw(unsigned long port)
301 return inw(port);
303 static inline void io_outw(unsigned int val, unsigned long port)
305 outw(val, port);
308 #ifndef USE_IO
309 /* Currently alpha headers define in/out macros.
310 Undefine them. 2000/03/30 SAW */
311 #undef inb
312 #undef inw
313 #undef inl
314 #undef outb
315 #undef outw
316 #undef outl
317 #define inb readb
318 #define inw readw
319 #define inl readl
320 #define outb writeb
321 #define outw writew
322 #define outl writel
323 #endif
325 /* Offsets to the various registers.
326 All accesses need not be longword aligned. */
327 enum speedo_offsets {
328 SCBStatus = 0, SCBCmd = 2, /* Rx/Command Unit command and status. */
329 SCBIntmask = 3,
330 SCBPointer = 4, /* General purpose pointer. */
331 SCBPort = 8, /* Misc. commands and operands. */
332 SCBflash = 12, SCBeeprom = 14, /* EEPROM and flash memory control. */
333 SCBCtrlMDI = 16, /* MDI interface control. */
334 SCBEarlyRx = 20, /* Early receive byte count. */
336 /* Commands that can be put in a command list entry. */
337 enum commands {
338 CmdNOp = 0, CmdIASetup = 0x10000, CmdConfigure = 0x20000,
339 CmdMulticastList = 0x30000, CmdTx = 0x40000, CmdTDR = 0x50000,
340 CmdDump = 0x60000, CmdDiagnose = 0x70000,
341 CmdSuspend = 0x40000000, /* Suspend after completion. */
342 CmdIntr = 0x20000000, /* Interrupt after completion. */
343 CmdTxFlex = 0x00080000, /* Use "Flexible mode" for CmdTx command. */
345 /* Clear CmdSuspend (1<<30) avoiding interference with the card access to the
346 status bits. Previous driver versions used separate 16 bit fields for
347 commands and statuses. --SAW
349 #if defined(__alpha__)
350 # define clear_suspend(cmd) clear_bit(30, &(cmd)->cmd_status);
351 #else
352 # if defined(__LITTLE_ENDIAN)
353 # define clear_suspend(cmd) ((__u16 *)&(cmd)->cmd_status)[1] &= ~0x4000
354 # elif defined(__BIG_ENDIAN)
355 # define clear_suspend(cmd) ((__u16 *)&(cmd)->cmd_status)[1] &= ~0x0040
356 # else
357 # error Unsupported byteorder
358 # endif
359 #endif
361 enum SCBCmdBits {
362 SCBMaskCmdDone=0x8000, SCBMaskRxDone=0x4000, SCBMaskCmdIdle=0x2000,
363 SCBMaskRxSuspend=0x1000, SCBMaskEarlyRx=0x0800, SCBMaskFlowCtl=0x0400,
364 SCBTriggerIntr=0x0200, SCBMaskAll=0x0100,
365 /* The rest are Rx and Tx commands. */
366 CUStart=0x0010, CUResume=0x0020, CUStatsAddr=0x0040, CUShowStats=0x0050,
367 CUCmdBase=0x0060, /* CU Base address (set to zero) . */
368 CUDumpStats=0x0070, /* Dump then reset stats counters. */
369 RxStart=0x0001, RxResume=0x0002, RxAbort=0x0004, RxAddrLoad=0x0006,
370 RxResumeNoResources=0x0007,
373 enum SCBPort_cmds {
374 PortReset=0, PortSelfTest=1, PortPartialReset=2, PortDump=3,
377 /* The Speedo3 Rx and Tx frame/buffer descriptors. */
378 struct descriptor { /* A generic descriptor. */
379 volatile s32 cmd_status; /* All command and status fields. */
380 u32 link; /* struct descriptor * */
381 unsigned char params[0];
384 /* The Speedo3 Rx and Tx buffer descriptors. */
385 struct RxFD { /* Receive frame descriptor. */
386 volatile s32 status;
387 u32 link; /* struct RxFD * */
388 u32 rx_buf_addr; /* void * */
389 u32 count;
390 } RxFD_ALIGNMENT;
392 /* Selected elements of the Tx/RxFD.status word. */
393 enum RxFD_bits {
394 RxComplete=0x8000, RxOK=0x2000,
395 RxErrCRC=0x0800, RxErrAlign=0x0400, RxErrTooBig=0x0200, RxErrSymbol=0x0010,
396 RxEth2Type=0x0020, RxNoMatch=0x0004, RxNoIAMatch=0x0002,
397 TxUnderrun=0x1000, StatusComplete=0x8000,
400 #define CONFIG_DATA_SIZE 22
401 struct TxFD { /* Transmit frame descriptor set. */
402 s32 status;
403 u32 link; /* void * */
404 u32 tx_desc_addr; /* Always points to the tx_buf_addr element. */
405 s32 count; /* # of TBD (=1), Tx start thresh., etc. */
406 /* This constitutes two "TBD" entries -- we only use one. */
407 #define TX_DESCR_BUF_OFFSET 16
408 u32 tx_buf_addr0; /* void *, frame to be transmitted. */
409 s32 tx_buf_size0; /* Length of Tx frame. */
410 u32 tx_buf_addr1; /* void *, frame to be transmitted. */
411 s32 tx_buf_size1; /* Length of Tx frame. */
412 /* the structure must have space for at least CONFIG_DATA_SIZE starting
413 * from tx_desc_addr field */
416 /* Multicast filter setting block. --SAW */
417 struct speedo_mc_block {
418 struct speedo_mc_block *next;
419 unsigned int tx;
420 dma_addr_t frame_dma;
421 unsigned int len;
422 struct descriptor frame __attribute__ ((__aligned__(16)));
425 /* Elements of the dump_statistics block. This block must be lword aligned. */
426 struct speedo_stats {
427 u32 tx_good_frames;
428 u32 tx_coll16_errs;
429 u32 tx_late_colls;
430 u32 tx_underruns;
431 u32 tx_lost_carrier;
432 u32 tx_deferred;
433 u32 tx_one_colls;
434 u32 tx_multi_colls;
435 u32 tx_total_colls;
436 u32 rx_good_frames;
437 u32 rx_crc_errs;
438 u32 rx_align_errs;
439 u32 rx_resource_errs;
440 u32 rx_overrun_errs;
441 u32 rx_colls_errs;
442 u32 rx_runt_errs;
443 u32 done_marker;
446 enum Rx_ring_state_bits {
447 RrNoMem=1, RrPostponed=2, RrNoResources=4, RrOOMReported=8,
450 /* Do not change the position (alignment) of the first few elements!
451 The later elements are grouped for cache locality.
453 Unfortunately, all the positions have been shifted since there.
454 A new re-alignment is required. 2000/03/06 SAW */
455 struct speedo_private {
456 struct TxFD *tx_ring; /* Commands (usually CmdTxPacket). */
457 struct RxFD *rx_ringp[RX_RING_SIZE]; /* Rx descriptor, used as ring. */
458 /* The addresses of a Tx/Rx-in-place packets/buffers. */
459 struct sk_buff *tx_skbuff[TX_RING_SIZE];
460 struct sk_buff *rx_skbuff[RX_RING_SIZE];
461 /* Mapped addresses of the rings. */
462 dma_addr_t tx_ring_dma;
463 #define TX_RING_ELEM_DMA(sp, n) ((sp)->tx_ring_dma + (n)*sizeof(struct TxFD))
464 dma_addr_t rx_ring_dma[RX_RING_SIZE];
465 struct descriptor *last_cmd; /* Last command sent. */
466 unsigned int cur_tx, dirty_tx; /* The ring entries to be free()ed. */
467 spinlock_t lock; /* Group with Tx control cache line. */
468 u32 tx_threshold; /* The value for txdesc.count. */
469 struct RxFD *last_rxf; /* Last filled RX buffer. */
470 dma_addr_t last_rxf_dma;
471 unsigned int cur_rx, dirty_rx; /* The next free ring entry */
472 long last_rx_time; /* Last Rx, in jiffies, to handle Rx hang. */
473 struct net_device_stats stats;
474 struct speedo_stats *lstats;
475 dma_addr_t lstats_dma;
476 int chip_id;
477 struct pci_dev *pdev;
478 struct timer_list timer; /* Media selection timer. */
479 struct speedo_mc_block *mc_setup_head; /* Multicast setup frame list head. */
480 struct speedo_mc_block *mc_setup_tail; /* Multicast setup frame list tail. */
481 long in_interrupt; /* Word-aligned dev->interrupt */
482 unsigned char acpi_pwr;
483 signed char rx_mode; /* Current PROMISC/ALLMULTI setting. */
484 unsigned int tx_full:1; /* The Tx queue is full. */
485 unsigned int flow_ctrl:1; /* Use 802.3x flow control. */
486 unsigned int rx_bug:1; /* Work around receiver hang errata. */
487 unsigned char default_port:8; /* Last dev->if_port value. */
488 unsigned char rx_ring_state; /* RX ring status flags. */
489 unsigned short phy[2]; /* PHY media interfaces available. */
490 unsigned short partner; /* Link partner caps. */
491 struct mii_if_info mii_if; /* MII API hooks, info */
492 u32 msg_enable; /* debug message level */
493 #ifdef CONFIG_PM
494 u32 pm_state[16];
495 #endif
498 /* The parameters for a CmdConfigure operation.
499 There are so many options that it would be difficult to document each bit.
500 We mostly use the default or recommended settings. */
501 static const char i82557_config_cmd[CONFIG_DATA_SIZE] = {
502 22, 0x08, 0, 0, 0, 0, 0x32, 0x03, 1, /* 1=Use MII 0=Use AUI */
503 0, 0x2E, 0, 0x60, 0,
504 0xf2, 0x48, 0, 0x40, 0xf2, 0x80, /* 0x40=Force full-duplex */
505 0x3f, 0x05, };
506 static const char i82558_config_cmd[CONFIG_DATA_SIZE] = {
507 22, 0x08, 0, 1, 0, 0, 0x22, 0x03, 1, /* 1=Use MII 0=Use AUI */
508 0, 0x2E, 0, 0x60, 0x08, 0x88,
509 0x68, 0, 0x40, 0xf2, 0x84, /* Disable FC */
510 0x31, 0x05, };
512 /* PHY media interface chips. */
513 static const char *phys[] = {
514 "None", "i82553-A/B", "i82553-C", "i82503",
515 "DP83840", "80c240", "80c24", "i82555",
516 "unknown-8", "unknown-9", "DP83840A", "unknown-11",
517 "unknown-12", "unknown-13", "unknown-14", "unknown-15", };
518 enum phy_chips { NonSuchPhy=0, I82553AB, I82553C, I82503, DP83840, S80C240,
519 S80C24, I82555, DP83840A=10, };
520 static const char is_mii[] = { 0, 1, 1, 0, 1, 1, 0, 1 };
521 #define EE_READ_CMD (6)
523 static int eepro100_init_one(struct pci_dev *pdev,
524 const struct pci_device_id *ent);
526 static int do_eeprom_cmd(long ioaddr, int cmd, int cmd_len);
527 static int mdio_read(struct net_device *dev, int phy_id, int location);
528 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
529 static int speedo_open(struct net_device *dev);
530 static void speedo_resume(struct net_device *dev);
531 static void speedo_timer(unsigned long data);
532 static void speedo_init_rx_ring(struct net_device *dev);
533 static void speedo_tx_timeout(struct net_device *dev);
534 static int speedo_start_xmit(struct sk_buff *skb, struct net_device *dev);
535 static void speedo_refill_rx_buffers(struct net_device *dev, int force);
536 static int speedo_rx(struct net_device *dev);
537 static void speedo_tx_buffer_gc(struct net_device *dev);
538 static irqreturn_t speedo_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
539 static int speedo_close(struct net_device *dev);
540 static struct net_device_stats *speedo_get_stats(struct net_device *dev);
541 static int speedo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
542 static void set_rx_mode(struct net_device *dev);
543 static void speedo_show_state(struct net_device *dev);
547 #ifdef honor_default_port
548 /* Optional driver feature to allow forcing the transceiver setting.
549 Not recommended. */
550 static int mii_ctrl[8] = { 0x3300, 0x3100, 0x0000, 0x0100,
551 0x2000, 0x2100, 0x0400, 0x3100};
552 #endif
554 /* How to wait for the command unit to accept a command.
555 Typically this takes 0 ticks. */
556 static inline unsigned char wait_for_cmd_done(struct net_device *dev)
558 int wait = 1000;
559 long cmd_ioaddr = dev->base_addr + SCBCmd;
560 unsigned char r;
562 do {
563 udelay(1);
564 r = inb(cmd_ioaddr);
565 } while(r && --wait >= 0);
567 if (wait < 0)
568 printk(KERN_ALERT "%s: wait_for_cmd_done timeout!\n", dev->name);
569 return r;
572 static int __devinit eepro100_init_one (struct pci_dev *pdev,
573 const struct pci_device_id *ent)
575 unsigned long ioaddr;
576 int irq;
577 int acpi_idle_state = 0, pm;
578 static int cards_found /* = 0 */;
580 #ifndef MODULE
581 /* when built-in, we only print version if device is found */
582 static int did_version;
583 if (did_version++ == 0)
584 printk(version);
585 #endif
587 /* save power state before pci_enable_device overwrites it */
588 pm = pci_find_capability(pdev, PCI_CAP_ID_PM);
589 if (pm) {
590 u16 pwr_command;
591 pci_read_config_word(pdev, pm + PCI_PM_CTRL, &pwr_command);
592 acpi_idle_state = pwr_command & PCI_PM_CTRL_STATE_MASK;
595 if (pci_enable_device(pdev))
596 goto err_out_free_mmio_region;
598 pci_set_master(pdev);
600 if (!request_region(pci_resource_start(pdev, 1),
601 pci_resource_len(pdev, 1), "eepro100")) {
602 printk (KERN_ERR "eepro100: cannot reserve I/O ports\n");
603 goto err_out_none;
605 if (!request_mem_region(pci_resource_start(pdev, 0),
606 pci_resource_len(pdev, 0), "eepro100")) {
607 printk (KERN_ERR "eepro100: cannot reserve MMIO region\n");
608 goto err_out_free_pio_region;
611 irq = pdev->irq;
612 #ifdef USE_IO
613 ioaddr = pci_resource_start(pdev, 1);
614 if (DEBUG & NETIF_MSG_PROBE)
615 printk("Found Intel i82557 PCI Speedo at I/O %#lx, IRQ %d.\n",
616 ioaddr, irq);
617 #else
618 ioaddr = (unsigned long)ioremap(pci_resource_start(pdev, 0),
619 pci_resource_len(pdev, 0));
620 if (!ioaddr) {
621 printk (KERN_ERR "eepro100: cannot remap MMIO region %lx @ %lx\n",
622 pci_resource_len(pdev, 0), pci_resource_start(pdev, 0));
623 goto err_out_free_mmio_region;
625 if (DEBUG & NETIF_MSG_PROBE)
626 printk("Found Intel i82557 PCI Speedo, MMIO at %#lx, IRQ %d.\n",
627 pci_resource_start(pdev, 0), irq);
628 #endif
631 if (speedo_found1(pdev, ioaddr, cards_found, acpi_idle_state) == 0)
632 cards_found++;
633 else
634 goto err_out_iounmap;
636 return 0;
638 err_out_iounmap: ;
639 #ifndef USE_IO
640 iounmap ((void *)ioaddr);
641 #endif
642 err_out_free_mmio_region:
643 release_mem_region(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
644 err_out_free_pio_region:
645 release_region(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
646 err_out_none:
647 return -ENODEV;
650 #ifdef CONFIG_NET_POLL_CONTROLLER
652 * Polling 'interrupt' - used by things like netconsole to send skbs
653 * without having to re-enable interrupts. It's not called while
654 * the interrupt routine is executing.
657 static void poll_speedo (struct net_device *dev)
659 /* disable_irq is not very nice, but with the funny lockless design
660 we have no other choice. */
661 disable_irq(dev->irq);
662 speedo_interrupt (dev->irq, dev, NULL);
663 enable_irq(dev->irq);
665 #endif
667 static int __devinit speedo_found1(struct pci_dev *pdev,
668 long ioaddr, int card_idx, int acpi_idle_state)
670 struct net_device *dev;
671 struct speedo_private *sp;
672 const char *product;
673 int i, option;
674 u16 eeprom[0x100];
675 int size;
676 void *tx_ring_space;
677 dma_addr_t tx_ring_dma;
679 size = TX_RING_SIZE * sizeof(struct TxFD) + sizeof(struct speedo_stats);
680 tx_ring_space = pci_alloc_consistent(pdev, size, &tx_ring_dma);
681 if (tx_ring_space == NULL)
682 return -1;
684 dev = alloc_etherdev(sizeof(struct speedo_private));
685 if (dev == NULL) {
686 printk(KERN_ERR "eepro100: Could not allocate ethernet device.\n");
687 pci_free_consistent(pdev, size, tx_ring_space, tx_ring_dma);
688 return -1;
691 SET_MODULE_OWNER(dev);
692 SET_NETDEV_DEV(dev, &pdev->dev);
694 if (dev->mem_start > 0)
695 option = dev->mem_start;
696 else if (card_idx >= 0 && options[card_idx] >= 0)
697 option = options[card_idx];
698 else
699 option = 0;
701 rtnl_lock();
702 if (dev_alloc_name(dev, dev->name) < 0)
703 goto err_free_unlock;
705 /* Read the station address EEPROM before doing the reset.
706 Nominally his should even be done before accepting the device, but
707 then we wouldn't have a device name with which to report the error.
708 The size test is for 6 bit vs. 8 bit address serial EEPROMs.
711 unsigned long iobase;
712 int read_cmd, ee_size;
713 u16 sum;
714 int j;
716 /* Use IO only to avoid postponed writes and satisfy EEPROM timing
717 requirements. */
718 iobase = pci_resource_start(pdev, 1);
719 if ((do_eeprom_cmd(iobase, EE_READ_CMD << 24, 27) & 0xffe0000)
720 == 0xffe0000) {
721 ee_size = 0x100;
722 read_cmd = EE_READ_CMD << 24;
723 } else {
724 ee_size = 0x40;
725 read_cmd = EE_READ_CMD << 22;
728 for (j = 0, i = 0, sum = 0; i < ee_size; i++) {
729 u16 value = do_eeprom_cmd(iobase, read_cmd | (i << 16), 27);
730 eeprom[i] = value;
731 sum += value;
732 if (i < 3) {
733 dev->dev_addr[j++] = value;
734 dev->dev_addr[j++] = value >> 8;
737 if (sum != 0xBABA)
738 printk(KERN_WARNING "%s: Invalid EEPROM checksum %#4.4x, "
739 "check settings before activating this device!\n",
740 dev->name, sum);
741 /* Don't unregister_netdev(dev); as the EEPro may actually be
742 usable, especially if the MAC address is set later.
743 On the other hand, it may be unusable if MDI data is corrupted. */
746 /* Reset the chip: stop Tx and Rx processes and clear counters.
747 This takes less than 10usec and will easily finish before the next
748 action. */
749 outl(PortReset, ioaddr + SCBPort);
750 inl(ioaddr + SCBPort);
751 udelay(10);
753 if (eeprom[3] & 0x0100)
754 product = "OEM i82557/i82558 10/100 Ethernet";
755 else
756 product = pci_name(pdev);
758 printk(KERN_INFO "%s: %s, ", dev->name, product);
760 for (i = 0; i < 5; i++)
761 printk("%2.2X:", dev->dev_addr[i]);
762 printk("%2.2X, ", dev->dev_addr[i]);
763 #ifdef USE_IO
764 printk("I/O at %#3lx, ", ioaddr);
765 #endif
766 printk("IRQ %d.\n", pdev->irq);
768 /* we must initialize base_addr early, for mdio_{read,write} */
769 dev->base_addr = ioaddr;
771 #if 1 || defined(kernel_bloat)
772 /* OK, this is pure kernel bloat. I don't like it when other drivers
773 waste non-pageable kernel space to emit similar messages, but I need
774 them for bug reports. */
776 const char *connectors[] = {" RJ45", " BNC", " AUI", " MII"};
777 /* The self-test results must be paragraph aligned. */
778 volatile s32 *self_test_results;
779 int boguscnt = 16000; /* Timeout for set-test. */
780 if ((eeprom[3] & 0x03) != 0x03)
781 printk(KERN_INFO " Receiver lock-up bug exists -- enabling"
782 " work-around.\n");
783 printk(KERN_INFO " Board assembly %4.4x%2.2x-%3.3d, Physical"
784 " connectors present:",
785 eeprom[8], eeprom[9]>>8, eeprom[9] & 0xff);
786 for (i = 0; i < 4; i++)
787 if (eeprom[5] & (1<<i))
788 printk(connectors[i]);
789 printk("\n"KERN_INFO" Primary interface chip %s PHY #%d.\n",
790 phys[(eeprom[6]>>8)&15], eeprom[6] & 0x1f);
791 if (eeprom[7] & 0x0700)
792 printk(KERN_INFO " Secondary interface chip %s.\n",
793 phys[(eeprom[7]>>8)&7]);
794 if (((eeprom[6]>>8) & 0x3f) == DP83840
795 || ((eeprom[6]>>8) & 0x3f) == DP83840A) {
796 int mdi_reg23 = mdio_read(dev, eeprom[6] & 0x1f, 23) | 0x0422;
797 if (congenb)
798 mdi_reg23 |= 0x0100;
799 printk(KERN_INFO" DP83840 specific setup, setting register 23 to %4.4x.\n",
800 mdi_reg23);
801 mdio_write(dev, eeprom[6] & 0x1f, 23, mdi_reg23);
803 if ((option >= 0) && (option & 0x70)) {
804 printk(KERN_INFO " Forcing %dMbs %s-duplex operation.\n",
805 (option & 0x20 ? 100 : 10),
806 (option & 0x10 ? "full" : "half"));
807 mdio_write(dev, eeprom[6] & 0x1f, MII_BMCR,
808 ((option & 0x20) ? 0x2000 : 0) | /* 100mbps? */
809 ((option & 0x10) ? 0x0100 : 0)); /* Full duplex? */
812 /* Perform a system self-test. */
813 self_test_results = (s32*) ((((long) tx_ring_space) + 15) & ~0xf);
814 self_test_results[0] = 0;
815 self_test_results[1] = -1;
816 outl(tx_ring_dma | PortSelfTest, ioaddr + SCBPort);
817 do {
818 udelay(10);
819 } while (self_test_results[1] == -1 && --boguscnt >= 0);
821 if (boguscnt < 0) { /* Test optimized out. */
822 printk(KERN_ERR "Self test failed, status %8.8x:\n"
823 KERN_ERR " Failure to initialize the i82557.\n"
824 KERN_ERR " Verify that the card is a bus-master"
825 " capable slot.\n",
826 self_test_results[1]);
827 } else
828 printk(KERN_INFO " General self-test: %s.\n"
829 KERN_INFO " Serial sub-system self-test: %s.\n"
830 KERN_INFO " Internal registers self-test: %s.\n"
831 KERN_INFO " ROM checksum self-test: %s (%#8.8x).\n",
832 self_test_results[1] & 0x1000 ? "failed" : "passed",
833 self_test_results[1] & 0x0020 ? "failed" : "passed",
834 self_test_results[1] & 0x0008 ? "failed" : "passed",
835 self_test_results[1] & 0x0004 ? "failed" : "passed",
836 self_test_results[0]);
838 #endif /* kernel_bloat */
840 outl(PortReset, ioaddr + SCBPort);
841 inl(ioaddr + SCBPort);
842 udelay(10);
844 /* Return the chip to its original power state. */
845 pci_set_power_state(pdev, acpi_idle_state);
847 pci_set_drvdata (pdev, dev);
848 SET_NETDEV_DEV(dev, &pdev->dev);
850 dev->irq = pdev->irq;
852 sp = netdev_priv(dev);
853 sp->pdev = pdev;
854 sp->msg_enable = DEBUG;
855 sp->acpi_pwr = acpi_idle_state;
856 sp->tx_ring = tx_ring_space;
857 sp->tx_ring_dma = tx_ring_dma;
858 sp->lstats = (struct speedo_stats *)(sp->tx_ring + TX_RING_SIZE);
859 sp->lstats_dma = TX_RING_ELEM_DMA(sp, TX_RING_SIZE);
860 init_timer(&sp->timer); /* used in ioctl() */
861 spin_lock_init(&sp->lock);
863 sp->mii_if.full_duplex = option >= 0 && (option & 0x10) ? 1 : 0;
864 if (card_idx >= 0) {
865 if (full_duplex[card_idx] >= 0)
866 sp->mii_if.full_duplex = full_duplex[card_idx];
868 sp->default_port = option >= 0 ? (option & 0x0f) : 0;
870 sp->phy[0] = eeprom[6];
871 sp->phy[1] = eeprom[7];
873 sp->mii_if.phy_id = eeprom[6] & 0x1f;
874 sp->mii_if.phy_id_mask = 0x1f;
875 sp->mii_if.reg_num_mask = 0x1f;
876 sp->mii_if.dev = dev;
877 sp->mii_if.mdio_read = mdio_read;
878 sp->mii_if.mdio_write = mdio_write;
880 sp->rx_bug = (eeprom[3] & 0x03) == 3 ? 0 : 1;
881 if (((pdev->device > 0x1030 && (pdev->device < 0x103F)))
882 || (pdev->device == 0x2449) || (pdev->device == 0x2459)
883 || (pdev->device == 0x245D)) {
884 sp->chip_id = 1;
887 if (sp->rx_bug)
888 printk(KERN_INFO " Receiver lock-up workaround activated.\n");
890 /* The Speedo-specific entries in the device structure. */
891 dev->open = &speedo_open;
892 dev->hard_start_xmit = &speedo_start_xmit;
893 netif_set_tx_timeout(dev, &speedo_tx_timeout, TX_TIMEOUT);
894 dev->stop = &speedo_close;
895 dev->get_stats = &speedo_get_stats;
896 dev->set_multicast_list = &set_rx_mode;
897 dev->do_ioctl = &speedo_ioctl;
898 #ifdef CONFIG_NET_POLL_CONTROLLER
899 dev->poll_controller = &poll_speedo;
900 #endif
902 if (register_netdevice(dev))
903 goto err_free_unlock;
904 rtnl_unlock();
906 return 0;
908 err_free_unlock:
909 rtnl_unlock();
910 free_netdev(dev);
911 return -1;
914 static void do_slow_command(struct net_device *dev, int cmd)
916 long cmd_ioaddr = dev->base_addr + SCBCmd;
917 int wait = 0;
919 if (inb(cmd_ioaddr) == 0) break;
920 while(++wait <= 200);
921 if (wait > 100)
922 printk(KERN_ERR "Command %4.4x never accepted (%d polls)!\n",
923 inb(cmd_ioaddr), wait);
925 outb(cmd, cmd_ioaddr);
927 for (wait = 0; wait <= 100; wait++)
928 if (inb(cmd_ioaddr) == 0) return;
929 for (; wait <= 20000; wait++)
930 if (inb(cmd_ioaddr) == 0) return;
931 else udelay(1);
932 printk(KERN_ERR "Command %4.4x was not accepted after %d polls!"
933 " Current status %8.8x.\n",
934 cmd, wait, inl(dev->base_addr + SCBStatus));
937 /* Serial EEPROM section.
938 A "bit" grungy, but we work our way through bit-by-bit :->. */
939 /* EEPROM_Ctrl bits. */
940 #define EE_SHIFT_CLK 0x01 /* EEPROM shift clock. */
941 #define EE_CS 0x02 /* EEPROM chip select. */
942 #define EE_DATA_WRITE 0x04 /* EEPROM chip data in. */
943 #define EE_DATA_READ 0x08 /* EEPROM chip data out. */
944 #define EE_ENB (0x4800 | EE_CS)
945 #define EE_WRITE_0 0x4802
946 #define EE_WRITE_1 0x4806
947 #define EE_OFFSET SCBeeprom
949 /* The fixes for the code were kindly provided by Dragan Stancevic
950 <visitor@valinux.com> to strictly follow Intel specifications of EEPROM
951 access timing.
952 The publicly available sheet 64486302 (sec. 3.1) specifies 1us access
953 interval for serial EEPROM. However, it looks like that there is an
954 additional requirement dictating larger udelay's in the code below.
955 2000/05/24 SAW */
956 static int __devinit do_eeprom_cmd(long ioaddr, int cmd, int cmd_len)
958 unsigned retval = 0;
959 long ee_addr = ioaddr + SCBeeprom;
961 io_outw(EE_ENB, ee_addr); udelay(2);
962 io_outw(EE_ENB | EE_SHIFT_CLK, ee_addr); udelay(2);
964 /* Shift the command bits out. */
965 do {
966 short dataval = (cmd & (1 << cmd_len)) ? EE_WRITE_1 : EE_WRITE_0;
967 io_outw(dataval, ee_addr); udelay(2);
968 io_outw(dataval | EE_SHIFT_CLK, ee_addr); udelay(2);
969 retval = (retval << 1) | ((io_inw(ee_addr) & EE_DATA_READ) ? 1 : 0);
970 } while (--cmd_len >= 0);
971 io_outw(EE_ENB, ee_addr); udelay(2);
973 /* Terminate the EEPROM access. */
974 io_outw(EE_ENB & ~EE_CS, ee_addr);
975 return retval;
978 static int mdio_read(struct net_device *dev, int phy_id, int location)
980 long ioaddr = dev->base_addr;
981 int val, boguscnt = 64*10; /* <64 usec. to complete, typ 27 ticks */
982 outl(0x08000000 | (location<<16) | (phy_id<<21), ioaddr + SCBCtrlMDI);
983 do {
984 val = inl(ioaddr + SCBCtrlMDI);
985 if (--boguscnt < 0) {
986 printk(KERN_ERR " mdio_read() timed out with val = %8.8x.\n", val);
987 break;
989 } while (! (val & 0x10000000));
990 return val & 0xffff;
993 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
995 long ioaddr = dev->base_addr;
996 int val, boguscnt = 64*10; /* <64 usec. to complete, typ 27 ticks */
997 outl(0x04000000 | (location<<16) | (phy_id<<21) | value,
998 ioaddr + SCBCtrlMDI);
999 do {
1000 val = inl(ioaddr + SCBCtrlMDI);
1001 if (--boguscnt < 0) {
1002 printk(KERN_ERR" mdio_write() timed out with val = %8.8x.\n", val);
1003 break;
1005 } while (! (val & 0x10000000));
1008 static int
1009 speedo_open(struct net_device *dev)
1011 struct speedo_private *sp = netdev_priv(dev);
1012 long ioaddr = dev->base_addr;
1013 int retval;
1015 if (netif_msg_ifup(sp))
1016 printk(KERN_DEBUG "%s: speedo_open() irq %d.\n", dev->name, dev->irq);
1018 pci_set_power_state(sp->pdev, 0);
1020 /* Set up the Tx queue early.. */
1021 sp->cur_tx = 0;
1022 sp->dirty_tx = 0;
1023 sp->last_cmd = NULL;
1024 sp->tx_full = 0;
1025 sp->in_interrupt = 0;
1027 /* .. we can safely take handler calls during init. */
1028 retval = request_irq(dev->irq, &speedo_interrupt, SA_SHIRQ, dev->name, dev);
1029 if (retval) {
1030 return retval;
1033 dev->if_port = sp->default_port;
1035 #ifdef oh_no_you_dont_unless_you_honour_the_options_passed_in_to_us
1036 /* Retrigger negotiation to reset previous errors. */
1037 if ((sp->phy[0] & 0x8000) == 0) {
1038 int phy_addr = sp->phy[0] & 0x1f ;
1039 /* Use 0x3300 for restarting NWay, other values to force xcvr:
1040 0x0000 10-HD
1041 0x0100 10-FD
1042 0x2000 100-HD
1043 0x2100 100-FD
1045 #ifdef honor_default_port
1046 mdio_write(dev, phy_addr, MII_BMCR, mii_ctrl[dev->default_port & 7]);
1047 #else
1048 mdio_write(dev, phy_addr, MII_BMCR, 0x3300);
1049 #endif
1051 #endif
1053 speedo_init_rx_ring(dev);
1055 /* Fire up the hardware. */
1056 outw(SCBMaskAll, ioaddr + SCBCmd);
1057 speedo_resume(dev);
1059 netdevice_start(dev);
1060 netif_start_queue(dev);
1062 /* Setup the chip and configure the multicast list. */
1063 sp->mc_setup_head = NULL;
1064 sp->mc_setup_tail = NULL;
1065 sp->flow_ctrl = sp->partner = 0;
1066 sp->rx_mode = -1; /* Invalid -> always reset the mode. */
1067 set_rx_mode(dev);
1068 if ((sp->phy[0] & 0x8000) == 0)
1069 sp->mii_if.advertising = mdio_read(dev, sp->phy[0] & 0x1f, MII_ADVERTISE);
1071 mii_check_link(&sp->mii_if);
1073 if (netif_msg_ifup(sp)) {
1074 printk(KERN_DEBUG "%s: Done speedo_open(), status %8.8x.\n",
1075 dev->name, inw(ioaddr + SCBStatus));
1078 /* Set the timer. The timer serves a dual purpose:
1079 1) to monitor the media interface (e.g. link beat) and perhaps switch
1080 to an alternate media type
1081 2) to monitor Rx activity, and restart the Rx process if the receiver
1082 hangs. */
1083 sp->timer.expires = RUN_AT((24*HZ)/10); /* 2.4 sec. */
1084 sp->timer.data = (unsigned long)dev;
1085 sp->timer.function = &speedo_timer; /* timer handler */
1086 add_timer(&sp->timer);
1088 /* No need to wait for the command unit to accept here. */
1089 if ((sp->phy[0] & 0x8000) == 0)
1090 mdio_read(dev, sp->phy[0] & 0x1f, MII_BMCR);
1092 return 0;
1095 /* Start the chip hardware after a full reset. */
1096 static void speedo_resume(struct net_device *dev)
1098 struct speedo_private *sp = netdev_priv(dev);
1099 long ioaddr = dev->base_addr;
1101 /* Start with a Tx threshold of 256 (0x..20.... 8 byte units). */
1102 sp->tx_threshold = 0x01208000;
1104 /* Set the segment registers to '0'. */
1105 if (wait_for_cmd_done(dev) != 0) {
1106 outl(PortPartialReset, ioaddr + SCBPort);
1107 udelay(10);
1110 outl(0, ioaddr + SCBPointer);
1111 inl(ioaddr + SCBPointer); /* Flush to PCI. */
1112 udelay(10); /* Bogus, but it avoids the bug. */
1114 /* Note: these next two operations can take a while. */
1115 do_slow_command(dev, RxAddrLoad);
1116 do_slow_command(dev, CUCmdBase);
1118 /* Load the statistics block and rx ring addresses. */
1119 outl(sp->lstats_dma, ioaddr + SCBPointer);
1120 inl(ioaddr + SCBPointer); /* Flush to PCI */
1122 outb(CUStatsAddr, ioaddr + SCBCmd);
1123 sp->lstats->done_marker = 0;
1124 wait_for_cmd_done(dev);
1126 if (sp->rx_ringp[sp->cur_rx % RX_RING_SIZE] == NULL) {
1127 if (netif_msg_rx_err(sp))
1128 printk(KERN_DEBUG "%s: NULL cur_rx in speedo_resume().\n",
1129 dev->name);
1130 } else {
1131 outl(sp->rx_ring_dma[sp->cur_rx % RX_RING_SIZE],
1132 ioaddr + SCBPointer);
1133 inl(ioaddr + SCBPointer); /* Flush to PCI */
1136 /* Note: RxStart should complete instantly. */
1137 do_slow_command(dev, RxStart);
1138 do_slow_command(dev, CUDumpStats);
1140 /* Fill the first command with our physical address. */
1142 struct descriptor *ias_cmd;
1144 ias_cmd =
1145 (struct descriptor *)&sp->tx_ring[sp->cur_tx++ % TX_RING_SIZE];
1146 /* Avoid a bug(?!) here by marking the command already completed. */
1147 ias_cmd->cmd_status = cpu_to_le32((CmdSuspend | CmdIASetup) | 0xa000);
1148 ias_cmd->link =
1149 cpu_to_le32(TX_RING_ELEM_DMA(sp, sp->cur_tx % TX_RING_SIZE));
1150 memcpy(ias_cmd->params, dev->dev_addr, 6);
1151 if (sp->last_cmd)
1152 clear_suspend(sp->last_cmd);
1153 sp->last_cmd = ias_cmd;
1156 /* Start the chip's Tx process and unmask interrupts. */
1157 outl(TX_RING_ELEM_DMA(sp, sp->dirty_tx % TX_RING_SIZE),
1158 ioaddr + SCBPointer);
1159 /* We are not ACK-ing FCP and ER in the interrupt handler yet so they should
1160 remain masked --Dragan */
1161 outw(CUStart | SCBMaskEarlyRx | SCBMaskFlowCtl, ioaddr + SCBCmd);
1165 * Sometimes the receiver stops making progress. This routine knows how to
1166 * get it going again, without losing packets or being otherwise nasty like
1167 * a chip reset would be. Previously the driver had a whole sequence
1168 * of if RxSuspended, if it's no buffers do one thing, if it's no resources,
1169 * do another, etc. But those things don't really matter. Separate logic
1170 * in the ISR provides for allocating buffers--the other half of operation
1171 * is just making sure the receiver is active. speedo_rx_soft_reset does that.
1172 * This problem with the old, more involved algorithm is shown up under
1173 * ping floods on the order of 60K packets/second on a 100Mbps fdx network.
1175 static void
1176 speedo_rx_soft_reset(struct net_device *dev)
1178 struct speedo_private *sp = netdev_priv(dev);
1179 struct RxFD *rfd;
1180 long ioaddr;
1182 ioaddr = dev->base_addr;
1183 if (wait_for_cmd_done(dev) != 0) {
1184 printk("%s: previous command stalled\n", dev->name);
1185 return;
1188 * Put the hardware into a known state.
1190 outb(RxAbort, ioaddr + SCBCmd);
1192 rfd = sp->rx_ringp[sp->cur_rx % RX_RING_SIZE];
1194 rfd->rx_buf_addr = 0xffffffff;
1196 if (wait_for_cmd_done(dev) != 0) {
1197 printk("%s: RxAbort command stalled\n", dev->name);
1198 return;
1200 outl(sp->rx_ring_dma[sp->cur_rx % RX_RING_SIZE],
1201 ioaddr + SCBPointer);
1202 outb(RxStart, ioaddr + SCBCmd);
1206 /* Media monitoring and control. */
1207 static void speedo_timer(unsigned long data)
1209 struct net_device *dev = (struct net_device *)data;
1210 struct speedo_private *sp = netdev_priv(dev);
1211 long ioaddr = dev->base_addr;
1212 int phy_num = sp->phy[0] & 0x1f;
1214 /* We have MII and lost link beat. */
1215 if ((sp->phy[0] & 0x8000) == 0) {
1216 int partner = mdio_read(dev, phy_num, MII_LPA);
1217 if (partner != sp->partner) {
1218 int flow_ctrl = sp->mii_if.advertising & partner & 0x0400 ? 1 : 0;
1219 if (netif_msg_link(sp)) {
1220 printk(KERN_DEBUG "%s: Link status change.\n", dev->name);
1221 printk(KERN_DEBUG "%s: Old partner %x, new %x, adv %x.\n",
1222 dev->name, sp->partner, partner, sp->mii_if.advertising);
1224 sp->partner = partner;
1225 if (flow_ctrl != sp->flow_ctrl) {
1226 sp->flow_ctrl = flow_ctrl;
1227 sp->rx_mode = -1; /* Trigger a reload. */
1231 mii_check_link(&sp->mii_if);
1232 if (netif_msg_timer(sp)) {
1233 printk(KERN_DEBUG "%s: Media control tick, status %4.4x.\n",
1234 dev->name, inw(ioaddr + SCBStatus));
1236 if (sp->rx_mode < 0 ||
1237 (sp->rx_bug && jiffies - sp->last_rx_time > 2*HZ)) {
1238 /* We haven't received a packet in a Long Time. We might have been
1239 bitten by the receiver hang bug. This can be cleared by sending
1240 a set multicast list command. */
1241 if (netif_msg_timer(sp))
1242 printk(KERN_DEBUG "%s: Sending a multicast list set command"
1243 " from a timer routine,"
1244 " m=%d, j=%ld, l=%ld.\n",
1245 dev->name, sp->rx_mode, jiffies, sp->last_rx_time);
1246 set_rx_mode(dev);
1248 /* We must continue to monitor the media. */
1249 sp->timer.expires = RUN_AT(2*HZ); /* 2.0 sec. */
1250 add_timer(&sp->timer);
1253 static void speedo_show_state(struct net_device *dev)
1255 struct speedo_private *sp = netdev_priv(dev);
1256 int i;
1258 if (netif_msg_pktdata(sp)) {
1259 printk(KERN_DEBUG "%s: Tx ring dump, Tx queue %u / %u:\n",
1260 dev->name, sp->cur_tx, sp->dirty_tx);
1261 for (i = 0; i < TX_RING_SIZE; i++)
1262 printk(KERN_DEBUG "%s: %c%c%2d %8.8x.\n", dev->name,
1263 i == sp->dirty_tx % TX_RING_SIZE ? '*' : ' ',
1264 i == sp->cur_tx % TX_RING_SIZE ? '=' : ' ',
1265 i, sp->tx_ring[i].status);
1267 printk(KERN_DEBUG "%s: Printing Rx ring"
1268 " (next to receive into %u, dirty index %u).\n",
1269 dev->name, sp->cur_rx, sp->dirty_rx);
1270 for (i = 0; i < RX_RING_SIZE; i++)
1271 printk(KERN_DEBUG "%s: %c%c%c%2d %8.8x.\n", dev->name,
1272 sp->rx_ringp[i] == sp->last_rxf ? 'l' : ' ',
1273 i == sp->dirty_rx % RX_RING_SIZE ? '*' : ' ',
1274 i == sp->cur_rx % RX_RING_SIZE ? '=' : ' ',
1275 i, (sp->rx_ringp[i] != NULL) ?
1276 (unsigned)sp->rx_ringp[i]->status : 0);
1279 #if 0
1281 long ioaddr = dev->base_addr;
1282 int phy_num = sp->phy[0] & 0x1f;
1283 for (i = 0; i < 16; i++) {
1284 /* FIXME: what does it mean? --SAW */
1285 if (i == 6) i = 21;
1286 printk(KERN_DEBUG "%s: PHY index %d register %d is %4.4x.\n",
1287 dev->name, phy_num, i, mdio_read(dev, phy_num, i));
1290 #endif
1294 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1295 static void
1296 speedo_init_rx_ring(struct net_device *dev)
1298 struct speedo_private *sp = netdev_priv(dev);
1299 struct RxFD *rxf, *last_rxf = NULL;
1300 dma_addr_t last_rxf_dma = 0 /* to shut up the compiler */;
1301 int i;
1303 sp->cur_rx = 0;
1305 for (i = 0; i < RX_RING_SIZE; i++) {
1306 struct sk_buff *skb;
1307 skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD));
1308 /* XXX: do we really want to call this before the NULL check? --hch */
1309 rx_align(skb); /* Align IP on 16 byte boundary */
1310 sp->rx_skbuff[i] = skb;
1311 if (skb == NULL)
1312 break; /* OK. Just initially short of Rx bufs. */
1313 skb->dev = dev; /* Mark as being used by this device. */
1314 rxf = (struct RxFD *)skb->tail;
1315 sp->rx_ringp[i] = rxf;
1316 sp->rx_ring_dma[i] =
1317 pci_map_single(sp->pdev, rxf,
1318 PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_BIDIRECTIONAL);
1319 skb_reserve(skb, sizeof(struct RxFD));
1320 if (last_rxf) {
1321 last_rxf->link = cpu_to_le32(sp->rx_ring_dma[i]);
1322 pci_dma_sync_single_for_device(sp->pdev, last_rxf_dma,
1323 sizeof(struct RxFD), PCI_DMA_TODEVICE);
1325 last_rxf = rxf;
1326 last_rxf_dma = sp->rx_ring_dma[i];
1327 rxf->status = cpu_to_le32(0x00000001); /* '1' is flag value only. */
1328 rxf->link = 0; /* None yet. */
1329 /* This field unused by i82557. */
1330 rxf->rx_buf_addr = 0xffffffff;
1331 rxf->count = cpu_to_le32(PKT_BUF_SZ << 16);
1332 pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[i],
1333 sizeof(struct RxFD), PCI_DMA_TODEVICE);
1335 sp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1336 /* Mark the last entry as end-of-list. */
1337 last_rxf->status = cpu_to_le32(0xC0000002); /* '2' is flag value only. */
1338 pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[RX_RING_SIZE-1],
1339 sizeof(struct RxFD), PCI_DMA_TODEVICE);
1340 sp->last_rxf = last_rxf;
1341 sp->last_rxf_dma = last_rxf_dma;
1344 static void speedo_purge_tx(struct net_device *dev)
1346 struct speedo_private *sp = netdev_priv(dev);
1347 int entry;
1349 while ((int)(sp->cur_tx - sp->dirty_tx) > 0) {
1350 entry = sp->dirty_tx % TX_RING_SIZE;
1351 if (sp->tx_skbuff[entry]) {
1352 sp->stats.tx_errors++;
1353 pci_unmap_single(sp->pdev,
1354 le32_to_cpu(sp->tx_ring[entry].tx_buf_addr0),
1355 sp->tx_skbuff[entry]->len, PCI_DMA_TODEVICE);
1356 dev_kfree_skb_irq(sp->tx_skbuff[entry]);
1357 sp->tx_skbuff[entry] = NULL;
1359 sp->dirty_tx++;
1361 while (sp->mc_setup_head != NULL) {
1362 struct speedo_mc_block *t;
1363 if (netif_msg_tx_err(sp))
1364 printk(KERN_DEBUG "%s: freeing mc frame.\n", dev->name);
1365 pci_unmap_single(sp->pdev, sp->mc_setup_head->frame_dma,
1366 sp->mc_setup_head->len, PCI_DMA_TODEVICE);
1367 t = sp->mc_setup_head->next;
1368 kfree(sp->mc_setup_head);
1369 sp->mc_setup_head = t;
1371 sp->mc_setup_tail = NULL;
1372 sp->tx_full = 0;
1373 netif_wake_queue(dev);
1376 static void reset_mii(struct net_device *dev)
1378 struct speedo_private *sp = netdev_priv(dev);
1380 /* Reset the MII transceiver, suggested by Fred Young @ scalable.com. */
1381 if ((sp->phy[0] & 0x8000) == 0) {
1382 int phy_addr = sp->phy[0] & 0x1f;
1383 int advertising = mdio_read(dev, phy_addr, MII_ADVERTISE);
1384 int mii_bmcr = mdio_read(dev, phy_addr, MII_BMCR);
1385 mdio_write(dev, phy_addr, MII_BMCR, 0x0400);
1386 mdio_write(dev, phy_addr, MII_BMSR, 0x0000);
1387 mdio_write(dev, phy_addr, MII_ADVERTISE, 0x0000);
1388 mdio_write(dev, phy_addr, MII_BMCR, 0x8000);
1389 #ifdef honor_default_port
1390 mdio_write(dev, phy_addr, MII_BMCR, mii_ctrl[dev->default_port & 7]);
1391 #else
1392 mdio_read(dev, phy_addr, MII_BMCR);
1393 mdio_write(dev, phy_addr, MII_BMCR, mii_bmcr);
1394 mdio_write(dev, phy_addr, MII_ADVERTISE, advertising);
1395 #endif
1399 static void speedo_tx_timeout(struct net_device *dev)
1401 struct speedo_private *sp = netdev_priv(dev);
1402 long ioaddr = dev->base_addr;
1403 int status = inw(ioaddr + SCBStatus);
1404 unsigned long flags;
1406 if (netif_msg_tx_err(sp)) {
1407 printk(KERN_WARNING "%s: Transmit timed out: status %4.4x "
1408 " %4.4x at %d/%d command %8.8x.\n",
1409 dev->name, status, inw(ioaddr + SCBCmd),
1410 sp->dirty_tx, sp->cur_tx,
1411 sp->tx_ring[sp->dirty_tx % TX_RING_SIZE].status);
1414 speedo_show_state(dev);
1415 #if 0
1416 if ((status & 0x00C0) != 0x0080
1417 && (status & 0x003C) == 0x0010) {
1418 /* Only the command unit has stopped. */
1419 printk(KERN_WARNING "%s: Trying to restart the transmitter...\n",
1420 dev->name);
1421 outl(TX_RING_ELEM_DMA(sp, dirty_tx % TX_RING_SIZE]),
1422 ioaddr + SCBPointer);
1423 outw(CUStart, ioaddr + SCBCmd);
1424 reset_mii(dev);
1425 } else {
1426 #else
1428 #endif
1429 del_timer_sync(&sp->timer);
1430 /* Reset the Tx and Rx units. */
1431 outl(PortReset, ioaddr + SCBPort);
1432 /* We may get spurious interrupts here. But I don't think that they
1433 may do much harm. 1999/12/09 SAW */
1434 udelay(10);
1435 /* Disable interrupts. */
1436 outw(SCBMaskAll, ioaddr + SCBCmd);
1437 synchronize_irq(dev->irq);
1438 speedo_tx_buffer_gc(dev);
1439 /* Free as much as possible.
1440 It helps to recover from a hang because of out-of-memory.
1441 It also simplifies speedo_resume() in case TX ring is full or
1442 close-to-be full. */
1443 speedo_purge_tx(dev);
1444 speedo_refill_rx_buffers(dev, 1);
1445 spin_lock_irqsave(&sp->lock, flags);
1446 speedo_resume(dev);
1447 sp->rx_mode = -1;
1448 dev->trans_start = jiffies;
1449 spin_unlock_irqrestore(&sp->lock, flags);
1450 set_rx_mode(dev); /* it takes the spinlock itself --SAW */
1451 /* Reset MII transceiver. Do it before starting the timer to serialize
1452 mdio_xxx operations. Yes, it's a paranoya :-) 2000/05/09 SAW */
1453 reset_mii(dev);
1454 sp->timer.expires = RUN_AT(2*HZ);
1455 add_timer(&sp->timer);
1457 return;
1460 static int
1461 speedo_start_xmit(struct sk_buff *skb, struct net_device *dev)
1463 struct speedo_private *sp = netdev_priv(dev);
1464 long ioaddr = dev->base_addr;
1465 int entry;
1467 /* Prevent interrupts from changing the Tx ring from underneath us. */
1468 unsigned long flags;
1470 spin_lock_irqsave(&sp->lock, flags);
1472 /* Check if there are enough space. */
1473 if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
1474 printk(KERN_ERR "%s: incorrect tbusy state, fixed.\n", dev->name);
1475 netif_stop_queue(dev);
1476 sp->tx_full = 1;
1477 spin_unlock_irqrestore(&sp->lock, flags);
1478 return 1;
1481 /* Calculate the Tx descriptor entry. */
1482 entry = sp->cur_tx++ % TX_RING_SIZE;
1484 sp->tx_skbuff[entry] = skb;
1485 sp->tx_ring[entry].status =
1486 cpu_to_le32(CmdSuspend | CmdTx | CmdTxFlex);
1487 if (!(entry & ((TX_RING_SIZE>>2)-1)))
1488 sp->tx_ring[entry].status |= cpu_to_le32(CmdIntr);
1489 sp->tx_ring[entry].link =
1490 cpu_to_le32(TX_RING_ELEM_DMA(sp, sp->cur_tx % TX_RING_SIZE));
1491 sp->tx_ring[entry].tx_desc_addr =
1492 cpu_to_le32(TX_RING_ELEM_DMA(sp, entry) + TX_DESCR_BUF_OFFSET);
1493 /* The data region is always in one buffer descriptor. */
1494 sp->tx_ring[entry].count = cpu_to_le32(sp->tx_threshold);
1495 sp->tx_ring[entry].tx_buf_addr0 =
1496 cpu_to_le32(pci_map_single(sp->pdev, skb->data,
1497 skb->len, PCI_DMA_TODEVICE));
1498 sp->tx_ring[entry].tx_buf_size0 = cpu_to_le32(skb->len);
1500 /* workaround for hardware bug on 10 mbit half duplex */
1502 if ((sp->partner == 0) && (sp->chip_id == 1)) {
1503 wait_for_cmd_done(dev);
1504 outb(0 , ioaddr + SCBCmd);
1505 udelay(1);
1508 /* Trigger the command unit resume. */
1509 wait_for_cmd_done(dev);
1510 clear_suspend(sp->last_cmd);
1511 /* We want the time window between clearing suspend flag on the previous
1512 command and resuming CU to be as small as possible.
1513 Interrupts in between are very undesired. --SAW */
1514 outb(CUResume, ioaddr + SCBCmd);
1515 sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
1517 /* Leave room for set_rx_mode(). If there is no more space than reserved
1518 for multicast filter mark the ring as full. */
1519 if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
1520 netif_stop_queue(dev);
1521 sp->tx_full = 1;
1524 spin_unlock_irqrestore(&sp->lock, flags);
1526 dev->trans_start = jiffies;
1528 return 0;
1531 static void speedo_tx_buffer_gc(struct net_device *dev)
1533 unsigned int dirty_tx;
1534 struct speedo_private *sp = netdev_priv(dev);
1536 dirty_tx = sp->dirty_tx;
1537 while ((int)(sp->cur_tx - dirty_tx) > 0) {
1538 int entry = dirty_tx % TX_RING_SIZE;
1539 int status = le32_to_cpu(sp->tx_ring[entry].status);
1541 if (netif_msg_tx_done(sp))
1542 printk(KERN_DEBUG " scavenge candidate %d status %4.4x.\n",
1543 entry, status);
1544 if ((status & StatusComplete) == 0)
1545 break; /* It still hasn't been processed. */
1546 if (status & TxUnderrun)
1547 if (sp->tx_threshold < 0x01e08000) {
1548 if (netif_msg_tx_err(sp))
1549 printk(KERN_DEBUG "%s: TX underrun, threshold adjusted.\n",
1550 dev->name);
1551 sp->tx_threshold += 0x00040000;
1553 /* Free the original skb. */
1554 if (sp->tx_skbuff[entry]) {
1555 sp->stats.tx_packets++; /* Count only user packets. */
1556 sp->stats.tx_bytes += sp->tx_skbuff[entry]->len;
1557 pci_unmap_single(sp->pdev,
1558 le32_to_cpu(sp->tx_ring[entry].tx_buf_addr0),
1559 sp->tx_skbuff[entry]->len, PCI_DMA_TODEVICE);
1560 dev_kfree_skb_irq(sp->tx_skbuff[entry]);
1561 sp->tx_skbuff[entry] = NULL;
1563 dirty_tx++;
1566 if (netif_msg_tx_err(sp) && (int)(sp->cur_tx - dirty_tx) > TX_RING_SIZE) {
1567 printk(KERN_ERR "out-of-sync dirty pointer, %d vs. %d,"
1568 " full=%d.\n",
1569 dirty_tx, sp->cur_tx, sp->tx_full);
1570 dirty_tx += TX_RING_SIZE;
1573 while (sp->mc_setup_head != NULL
1574 && (int)(dirty_tx - sp->mc_setup_head->tx - 1) > 0) {
1575 struct speedo_mc_block *t;
1576 if (netif_msg_tx_err(sp))
1577 printk(KERN_DEBUG "%s: freeing mc frame.\n", dev->name);
1578 pci_unmap_single(sp->pdev, sp->mc_setup_head->frame_dma,
1579 sp->mc_setup_head->len, PCI_DMA_TODEVICE);
1580 t = sp->mc_setup_head->next;
1581 kfree(sp->mc_setup_head);
1582 sp->mc_setup_head = t;
1584 if (sp->mc_setup_head == NULL)
1585 sp->mc_setup_tail = NULL;
1587 sp->dirty_tx = dirty_tx;
1590 /* The interrupt handler does all of the Rx thread work and cleans up
1591 after the Tx thread. */
1592 static irqreturn_t speedo_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
1594 struct net_device *dev = (struct net_device *)dev_instance;
1595 struct speedo_private *sp;
1596 long ioaddr, boguscnt = max_interrupt_work;
1597 unsigned short status;
1598 unsigned int handled = 0;
1600 ioaddr = dev->base_addr;
1601 sp = netdev_priv(dev);
1603 #ifndef final_version
1604 /* A lock to prevent simultaneous entry on SMP machines. */
1605 if (test_and_set_bit(0, (void*)&sp->in_interrupt)) {
1606 printk(KERN_ERR"%s: SMP simultaneous entry of an interrupt handler.\n",
1607 dev->name);
1608 sp->in_interrupt = 0; /* Avoid halting machine. */
1609 return IRQ_NONE;
1611 #endif
1613 do {
1614 status = inw(ioaddr + SCBStatus);
1615 /* Acknowledge all of the current interrupt sources ASAP. */
1616 /* Will change from 0xfc00 to 0xff00 when we start handling
1617 FCP and ER interrupts --Dragan */
1618 outw(status & 0xfc00, ioaddr + SCBStatus);
1620 if (netif_msg_intr(sp))
1621 printk(KERN_DEBUG "%s: interrupt status=%#4.4x.\n",
1622 dev->name, status);
1624 if ((status & 0xfc00) == 0)
1625 break;
1626 handled = 1;
1629 if ((status & 0x5000) || /* Packet received, or Rx error. */
1630 (sp->rx_ring_state&(RrNoMem|RrPostponed)) == RrPostponed)
1631 /* Need to gather the postponed packet. */
1632 speedo_rx(dev);
1634 /* Always check if all rx buffers are allocated. --SAW */
1635 speedo_refill_rx_buffers(dev, 0);
1637 spin_lock(&sp->lock);
1639 * The chip may have suspended reception for various reasons.
1640 * Check for that, and re-prime it should this be the case.
1642 switch ((status >> 2) & 0xf) {
1643 case 0: /* Idle */
1644 break;
1645 case 1: /* Suspended */
1646 case 2: /* No resources (RxFDs) */
1647 case 9: /* Suspended with no more RBDs */
1648 case 10: /* No resources due to no RBDs */
1649 case 12: /* Ready with no RBDs */
1650 speedo_rx_soft_reset(dev);
1651 break;
1652 case 3: case 5: case 6: case 7: case 8:
1653 case 11: case 13: case 14: case 15:
1654 /* these are all reserved values */
1655 break;
1659 /* User interrupt, Command/Tx unit interrupt or CU not active. */
1660 if (status & 0xA400) {
1661 speedo_tx_buffer_gc(dev);
1662 if (sp->tx_full
1663 && (int)(sp->cur_tx - sp->dirty_tx) < TX_QUEUE_UNFULL) {
1664 /* The ring is no longer full. */
1665 sp->tx_full = 0;
1666 netif_wake_queue(dev); /* Attention: under a spinlock. --SAW */
1670 spin_unlock(&sp->lock);
1672 if (--boguscnt < 0) {
1673 printk(KERN_ERR "%s: Too much work at interrupt, status=0x%4.4x.\n",
1674 dev->name, status);
1675 /* Clear all interrupt sources. */
1676 /* Will change from 0xfc00 to 0xff00 when we start handling
1677 FCP and ER interrupts --Dragan */
1678 outw(0xfc00, ioaddr + SCBStatus);
1679 break;
1681 } while (1);
1683 if (netif_msg_intr(sp))
1684 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1685 dev->name, inw(ioaddr + SCBStatus));
1687 clear_bit(0, (void*)&sp->in_interrupt);
1688 return IRQ_RETVAL(handled);
1691 static inline struct RxFD *speedo_rx_alloc(struct net_device *dev, int entry)
1693 struct speedo_private *sp = netdev_priv(dev);
1694 struct RxFD *rxf;
1695 struct sk_buff *skb;
1696 /* Get a fresh skbuff to replace the consumed one. */
1697 skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD));
1698 /* XXX: do we really want to call this before the NULL check? --hch */
1699 rx_align(skb); /* Align IP on 16 byte boundary */
1700 sp->rx_skbuff[entry] = skb;
1701 if (skb == NULL) {
1702 sp->rx_ringp[entry] = NULL;
1703 return NULL;
1705 rxf = sp->rx_ringp[entry] = (struct RxFD *)skb->tail;
1706 sp->rx_ring_dma[entry] =
1707 pci_map_single(sp->pdev, rxf,
1708 PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_FROMDEVICE);
1709 skb->dev = dev;
1710 skb_reserve(skb, sizeof(struct RxFD));
1711 rxf->rx_buf_addr = 0xffffffff;
1712 pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[entry],
1713 sizeof(struct RxFD), PCI_DMA_TODEVICE);
1714 return rxf;
1717 static inline void speedo_rx_link(struct net_device *dev, int entry,
1718 struct RxFD *rxf, dma_addr_t rxf_dma)
1720 struct speedo_private *sp = netdev_priv(dev);
1721 rxf->status = cpu_to_le32(0xC0000001); /* '1' for driver use only. */
1722 rxf->link = 0; /* None yet. */
1723 rxf->count = cpu_to_le32(PKT_BUF_SZ << 16);
1724 sp->last_rxf->link = cpu_to_le32(rxf_dma);
1725 sp->last_rxf->status &= cpu_to_le32(~0xC0000000);
1726 pci_dma_sync_single_for_device(sp->pdev, sp->last_rxf_dma,
1727 sizeof(struct RxFD), PCI_DMA_TODEVICE);
1728 sp->last_rxf = rxf;
1729 sp->last_rxf_dma = rxf_dma;
1732 static int speedo_refill_rx_buf(struct net_device *dev, int force)
1734 struct speedo_private *sp = netdev_priv(dev);
1735 int entry;
1736 struct RxFD *rxf;
1738 entry = sp->dirty_rx % RX_RING_SIZE;
1739 if (sp->rx_skbuff[entry] == NULL) {
1740 rxf = speedo_rx_alloc(dev, entry);
1741 if (rxf == NULL) {
1742 unsigned int forw;
1743 int forw_entry;
1744 if (netif_msg_rx_err(sp) || !(sp->rx_ring_state & RrOOMReported)) {
1745 printk(KERN_WARNING "%s: can't fill rx buffer (force %d)!\n",
1746 dev->name, force);
1747 sp->rx_ring_state |= RrOOMReported;
1749 speedo_show_state(dev);
1750 if (!force)
1751 return -1; /* Better luck next time! */
1752 /* Borrow an skb from one of next entries. */
1753 for (forw = sp->dirty_rx + 1; forw != sp->cur_rx; forw++)
1754 if (sp->rx_skbuff[forw % RX_RING_SIZE] != NULL)
1755 break;
1756 if (forw == sp->cur_rx)
1757 return -1;
1758 forw_entry = forw % RX_RING_SIZE;
1759 sp->rx_skbuff[entry] = sp->rx_skbuff[forw_entry];
1760 sp->rx_skbuff[forw_entry] = NULL;
1761 rxf = sp->rx_ringp[forw_entry];
1762 sp->rx_ringp[forw_entry] = NULL;
1763 sp->rx_ringp[entry] = rxf;
1765 } else {
1766 rxf = sp->rx_ringp[entry];
1768 speedo_rx_link(dev, entry, rxf, sp->rx_ring_dma[entry]);
1769 sp->dirty_rx++;
1770 sp->rx_ring_state &= ~(RrNoMem|RrOOMReported); /* Mark the progress. */
1771 return 0;
1774 static void speedo_refill_rx_buffers(struct net_device *dev, int force)
1776 struct speedo_private *sp = netdev_priv(dev);
1778 /* Refill the RX ring. */
1779 while ((int)(sp->cur_rx - sp->dirty_rx) > 0 &&
1780 speedo_refill_rx_buf(dev, force) != -1);
1783 static int
1784 speedo_rx(struct net_device *dev)
1786 struct speedo_private *sp = netdev_priv(dev);
1787 int entry = sp->cur_rx % RX_RING_SIZE;
1788 int rx_work_limit = sp->dirty_rx + RX_RING_SIZE - sp->cur_rx;
1789 int alloc_ok = 1;
1790 int npkts = 0;
1792 if (netif_msg_intr(sp))
1793 printk(KERN_DEBUG " In speedo_rx().\n");
1794 /* If we own the next entry, it's a new packet. Send it up. */
1795 while (sp->rx_ringp[entry] != NULL) {
1796 int status;
1797 int pkt_len;
1799 pci_dma_sync_single_for_cpu(sp->pdev, sp->rx_ring_dma[entry],
1800 sizeof(struct RxFD), PCI_DMA_FROMDEVICE);
1801 status = le32_to_cpu(sp->rx_ringp[entry]->status);
1802 pkt_len = le32_to_cpu(sp->rx_ringp[entry]->count) & 0x3fff;
1804 if (!(status & RxComplete))
1805 break;
1807 if (--rx_work_limit < 0)
1808 break;
1810 /* Check for a rare out-of-memory case: the current buffer is
1811 the last buffer allocated in the RX ring. --SAW */
1812 if (sp->last_rxf == sp->rx_ringp[entry]) {
1813 /* Postpone the packet. It'll be reaped at an interrupt when this
1814 packet is no longer the last packet in the ring. */
1815 if (netif_msg_rx_err(sp))
1816 printk(KERN_DEBUG "%s: RX packet postponed!\n",
1817 dev->name);
1818 sp->rx_ring_state |= RrPostponed;
1819 break;
1822 if (netif_msg_rx_status(sp))
1823 printk(KERN_DEBUG " speedo_rx() status %8.8x len %d.\n", status,
1824 pkt_len);
1825 if ((status & (RxErrTooBig|RxOK|0x0f90)) != RxOK) {
1826 if (status & RxErrTooBig)
1827 printk(KERN_ERR "%s: Ethernet frame overran the Rx buffer, "
1828 "status %8.8x!\n", dev->name, status);
1829 else if (! (status & RxOK)) {
1830 /* There was a fatal error. This *should* be impossible. */
1831 sp->stats.rx_errors++;
1832 printk(KERN_ERR "%s: Anomalous event in speedo_rx(), "
1833 "status %8.8x.\n",
1834 dev->name, status);
1836 } else {
1837 struct sk_buff *skb;
1839 /* Check if the packet is long enough to just accept without
1840 copying to a properly sized skbuff. */
1841 if (pkt_len < rx_copybreak
1842 && (skb = dev_alloc_skb(pkt_len + 2)) != 0) {
1843 skb->dev = dev;
1844 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1845 /* 'skb_put()' points to the start of sk_buff data area. */
1846 pci_dma_sync_single_for_cpu(sp->pdev, sp->rx_ring_dma[entry],
1847 sizeof(struct RxFD) + pkt_len,
1848 PCI_DMA_FROMDEVICE);
1850 #if 1 || USE_IP_CSUM
1851 /* Packet is in one chunk -- we can copy + cksum. */
1852 eth_copy_and_sum(skb, sp->rx_skbuff[entry]->tail, pkt_len, 0);
1853 skb_put(skb, pkt_len);
1854 #else
1855 memcpy(skb_put(skb, pkt_len), sp->rx_skbuff[entry]->tail,
1856 pkt_len);
1857 #endif
1858 pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[entry],
1859 sizeof(struct RxFD) + pkt_len,
1860 PCI_DMA_FROMDEVICE);
1861 npkts++;
1862 } else {
1863 /* Pass up the already-filled skbuff. */
1864 skb = sp->rx_skbuff[entry];
1865 if (skb == NULL) {
1866 printk(KERN_ERR "%s: Inconsistent Rx descriptor chain.\n",
1867 dev->name);
1868 break;
1870 sp->rx_skbuff[entry] = NULL;
1871 skb_put(skb, pkt_len);
1872 npkts++;
1873 sp->rx_ringp[entry] = NULL;
1874 pci_unmap_single(sp->pdev, sp->rx_ring_dma[entry],
1875 PKT_BUF_SZ + sizeof(struct RxFD),
1876 PCI_DMA_FROMDEVICE);
1878 skb->protocol = eth_type_trans(skb, dev);
1879 netif_rx(skb);
1880 dev->last_rx = jiffies;
1881 sp->stats.rx_packets++;
1882 sp->stats.rx_bytes += pkt_len;
1884 entry = (++sp->cur_rx) % RX_RING_SIZE;
1885 sp->rx_ring_state &= ~RrPostponed;
1886 /* Refill the recently taken buffers.
1887 Do it one-by-one to handle traffic bursts better. */
1888 if (alloc_ok && speedo_refill_rx_buf(dev, 0) == -1)
1889 alloc_ok = 0;
1892 /* Try hard to refill the recently taken buffers. */
1893 speedo_refill_rx_buffers(dev, 1);
1895 if (npkts)
1896 sp->last_rx_time = jiffies;
1898 return 0;
1901 static int
1902 speedo_close(struct net_device *dev)
1904 long ioaddr = dev->base_addr;
1905 struct speedo_private *sp = netdev_priv(dev);
1906 int i;
1908 netdevice_stop(dev);
1909 netif_stop_queue(dev);
1911 if (netif_msg_ifdown(sp))
1912 printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x.\n",
1913 dev->name, inw(ioaddr + SCBStatus));
1915 /* Shut off the media monitoring timer. */
1916 del_timer_sync(&sp->timer);
1918 outw(SCBMaskAll, ioaddr + SCBCmd);
1920 /* Shutting down the chip nicely fails to disable flow control. So.. */
1921 outl(PortPartialReset, ioaddr + SCBPort);
1922 inl(ioaddr + SCBPort); /* flush posted write */
1924 * The chip requires a 10 microsecond quiet period. Wait here!
1926 udelay(10);
1928 free_irq(dev->irq, dev);
1929 speedo_show_state(dev);
1931 /* Free all the skbuffs in the Rx and Tx queues. */
1932 for (i = 0; i < RX_RING_SIZE; i++) {
1933 struct sk_buff *skb = sp->rx_skbuff[i];
1934 sp->rx_skbuff[i] = NULL;
1935 /* Clear the Rx descriptors. */
1936 if (skb) {
1937 pci_unmap_single(sp->pdev,
1938 sp->rx_ring_dma[i],
1939 PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_FROMDEVICE);
1940 dev_kfree_skb(skb);
1944 for (i = 0; i < TX_RING_SIZE; i++) {
1945 struct sk_buff *skb = sp->tx_skbuff[i];
1946 sp->tx_skbuff[i] = NULL;
1947 /* Clear the Tx descriptors. */
1948 if (skb) {
1949 pci_unmap_single(sp->pdev,
1950 le32_to_cpu(sp->tx_ring[i].tx_buf_addr0),
1951 skb->len, PCI_DMA_TODEVICE);
1952 dev_kfree_skb(skb);
1956 /* Free multicast setting blocks. */
1957 for (i = 0; sp->mc_setup_head != NULL; i++) {
1958 struct speedo_mc_block *t;
1959 t = sp->mc_setup_head->next;
1960 kfree(sp->mc_setup_head);
1961 sp->mc_setup_head = t;
1963 sp->mc_setup_tail = NULL;
1964 if (netif_msg_ifdown(sp))
1965 printk(KERN_DEBUG "%s: %d multicast blocks dropped.\n", dev->name, i);
1967 pci_set_power_state(sp->pdev, 2);
1969 return 0;
1972 /* The Speedo-3 has an especially awkward and unusable method of getting
1973 statistics out of the chip. It takes an unpredictable length of time
1974 for the dump-stats command to complete. To avoid a busy-wait loop we
1975 update the stats with the previous dump results, and then trigger a
1976 new dump.
1978 Oh, and incoming frames are dropped while executing dump-stats!
1980 static struct net_device_stats *
1981 speedo_get_stats(struct net_device *dev)
1983 struct speedo_private *sp = netdev_priv(dev);
1984 long ioaddr = dev->base_addr;
1986 /* Update only if the previous dump finished. */
1987 if (sp->lstats->done_marker == le32_to_cpu(0xA007)) {
1988 sp->stats.tx_aborted_errors += le32_to_cpu(sp->lstats->tx_coll16_errs);
1989 sp->stats.tx_window_errors += le32_to_cpu(sp->lstats->tx_late_colls);
1990 sp->stats.tx_fifo_errors += le32_to_cpu(sp->lstats->tx_underruns);
1991 sp->stats.tx_fifo_errors += le32_to_cpu(sp->lstats->tx_lost_carrier);
1992 /*sp->stats.tx_deferred += le32_to_cpu(sp->lstats->tx_deferred);*/
1993 sp->stats.collisions += le32_to_cpu(sp->lstats->tx_total_colls);
1994 sp->stats.rx_crc_errors += le32_to_cpu(sp->lstats->rx_crc_errs);
1995 sp->stats.rx_frame_errors += le32_to_cpu(sp->lstats->rx_align_errs);
1996 sp->stats.rx_over_errors += le32_to_cpu(sp->lstats->rx_resource_errs);
1997 sp->stats.rx_fifo_errors += le32_to_cpu(sp->lstats->rx_overrun_errs);
1998 sp->stats.rx_length_errors += le32_to_cpu(sp->lstats->rx_runt_errs);
1999 sp->lstats->done_marker = 0x0000;
2000 if (netif_running(dev)) {
2001 unsigned long flags;
2002 /* Take a spinlock to make wait_for_cmd_done and sending the
2003 command atomic. --SAW */
2004 spin_lock_irqsave(&sp->lock, flags);
2005 wait_for_cmd_done(dev);
2006 outb(CUDumpStats, ioaddr + SCBCmd);
2007 spin_unlock_irqrestore(&sp->lock, flags);
2010 return &sp->stats;
2013 static int netdev_ethtool_ioctl(struct net_device *dev, void __user *useraddr)
2015 u32 ethcmd;
2016 struct speedo_private *sp = netdev_priv(dev);
2018 if (copy_from_user(&ethcmd, useraddr, sizeof(ethcmd)))
2019 return -EFAULT;
2021 switch (ethcmd) {
2022 /* get driver-specific version/etc. info */
2023 case ETHTOOL_GDRVINFO: {
2024 struct ethtool_drvinfo info = {ETHTOOL_GDRVINFO};
2025 strncpy(info.driver, "eepro100", sizeof(info.driver)-1);
2026 strncpy(info.version, version, sizeof(info.version)-1);
2027 if (sp && sp->pdev)
2028 strcpy(info.bus_info, pci_name(sp->pdev));
2029 if (copy_to_user(useraddr, &info, sizeof(info)))
2030 return -EFAULT;
2031 return 0;
2034 /* get settings */
2035 case ETHTOOL_GSET: {
2036 struct ethtool_cmd ecmd = { ETHTOOL_GSET };
2037 spin_lock_irq(&sp->lock);
2038 mii_ethtool_gset(&sp->mii_if, &ecmd);
2039 spin_unlock_irq(&sp->lock);
2040 if (copy_to_user(useraddr, &ecmd, sizeof(ecmd)))
2041 return -EFAULT;
2042 return 0;
2044 /* set settings */
2045 case ETHTOOL_SSET: {
2046 int r;
2047 struct ethtool_cmd ecmd;
2048 if (copy_from_user(&ecmd, useraddr, sizeof(ecmd)))
2049 return -EFAULT;
2050 spin_lock_irq(&sp->lock);
2051 r = mii_ethtool_sset(&sp->mii_if, &ecmd);
2052 spin_unlock_irq(&sp->lock);
2053 return r;
2055 /* restart autonegotiation */
2056 case ETHTOOL_NWAY_RST: {
2057 return mii_nway_restart(&sp->mii_if);
2059 /* get link status */
2060 case ETHTOOL_GLINK: {
2061 struct ethtool_value edata = {ETHTOOL_GLINK};
2062 edata.data = mii_link_ok(&sp->mii_if);
2063 if (copy_to_user(useraddr, &edata, sizeof(edata)))
2064 return -EFAULT;
2065 return 0;
2067 /* get message-level */
2068 case ETHTOOL_GMSGLVL: {
2069 struct ethtool_value edata = {ETHTOOL_GMSGLVL};
2070 edata.data = sp->msg_enable;
2071 if (copy_to_user(useraddr, &edata, sizeof(edata)))
2072 return -EFAULT;
2073 return 0;
2075 /* set message-level */
2076 case ETHTOOL_SMSGLVL: {
2077 struct ethtool_value edata;
2078 if (copy_from_user(&edata, useraddr, sizeof(edata)))
2079 return -EFAULT;
2080 sp->msg_enable = edata.data;
2081 return 0;
2086 return -EOPNOTSUPP;
2089 static int speedo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2091 struct speedo_private *sp = netdev_priv(dev);
2092 struct mii_ioctl_data *data = if_mii(rq);
2093 int phy = sp->phy[0] & 0x1f;
2094 int saved_acpi;
2095 int t;
2097 switch(cmd) {
2098 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
2099 data->phy_id = phy;
2101 case SIOCGMIIREG: /* Read MII PHY register. */
2102 /* FIXME: these operations need to be serialized with MDIO
2103 access from the timeout handler.
2104 They are currently serialized only with MDIO access from the
2105 timer routine. 2000/05/09 SAW */
2106 saved_acpi = pci_set_power_state(sp->pdev, 0);
2107 t = del_timer_sync(&sp->timer);
2108 data->val_out = mdio_read(dev, data->phy_id & 0x1f, data->reg_num & 0x1f);
2109 if (t)
2110 add_timer(&sp->timer); /* may be set to the past --SAW */
2111 pci_set_power_state(sp->pdev, saved_acpi);
2112 return 0;
2114 case SIOCSMIIREG: /* Write MII PHY register. */
2115 if (!capable(CAP_NET_ADMIN))
2116 return -EPERM;
2117 saved_acpi = pci_set_power_state(sp->pdev, 0);
2118 t = del_timer_sync(&sp->timer);
2119 mdio_write(dev, data->phy_id, data->reg_num, data->val_in);
2120 if (t)
2121 add_timer(&sp->timer); /* may be set to the past --SAW */
2122 pci_set_power_state(sp->pdev, saved_acpi);
2123 return 0;
2124 case SIOCETHTOOL:
2125 return netdev_ethtool_ioctl(dev, rq->ifr_data);
2126 default:
2127 return -EOPNOTSUPP;
2131 /* Set or clear the multicast filter for this adaptor.
2132 This is very ugly with Intel chips -- we usually have to execute an
2133 entire configuration command, plus process a multicast command.
2134 This is complicated. We must put a large configuration command and
2135 an arbitrarily-sized multicast command in the transmit list.
2136 To minimize the disruption -- the previous command might have already
2137 loaded the link -- we convert the current command block, normally a Tx
2138 command, into a no-op and link it to the new command.
2140 static void set_rx_mode(struct net_device *dev)
2142 struct speedo_private *sp = netdev_priv(dev);
2143 long ioaddr = dev->base_addr;
2144 struct descriptor *last_cmd;
2145 char new_rx_mode;
2146 unsigned long flags;
2147 int entry, i;
2149 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
2150 new_rx_mode = 3;
2151 } else if ((dev->flags & IFF_ALLMULTI) ||
2152 dev->mc_count > multicast_filter_limit) {
2153 new_rx_mode = 1;
2154 } else
2155 new_rx_mode = 0;
2157 if (netif_msg_rx_status(sp))
2158 printk(KERN_DEBUG "%s: set_rx_mode %d -> %d\n", dev->name,
2159 sp->rx_mode, new_rx_mode);
2161 if ((int)(sp->cur_tx - sp->dirty_tx) > TX_RING_SIZE - TX_MULTICAST_SIZE) {
2162 /* The Tx ring is full -- don't add anything! Hope the mode will be
2163 * set again later. */
2164 sp->rx_mode = -1;
2165 return;
2168 if (new_rx_mode != sp->rx_mode) {
2169 u8 *config_cmd_data;
2171 spin_lock_irqsave(&sp->lock, flags);
2172 entry = sp->cur_tx++ % TX_RING_SIZE;
2173 last_cmd = sp->last_cmd;
2174 sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
2176 sp->tx_skbuff[entry] = NULL; /* Redundant. */
2177 sp->tx_ring[entry].status = cpu_to_le32(CmdSuspend | CmdConfigure);
2178 sp->tx_ring[entry].link =
2179 cpu_to_le32(TX_RING_ELEM_DMA(sp, (entry + 1) % TX_RING_SIZE));
2180 config_cmd_data = (void *)&sp->tx_ring[entry].tx_desc_addr;
2181 /* Construct a full CmdConfig frame. */
2182 memcpy(config_cmd_data, i82558_config_cmd, CONFIG_DATA_SIZE);
2183 config_cmd_data[1] = (txfifo << 4) | rxfifo;
2184 config_cmd_data[4] = rxdmacount;
2185 config_cmd_data[5] = txdmacount + 0x80;
2186 config_cmd_data[15] |= (new_rx_mode & 2) ? 1 : 0;
2187 /* 0x80 doesn't disable FC 0x84 does.
2188 Disable Flow control since we are not ACK-ing any FC interrupts
2189 for now. --Dragan */
2190 config_cmd_data[19] = 0x84;
2191 config_cmd_data[19] |= sp->mii_if.full_duplex ? 0x40 : 0;
2192 config_cmd_data[21] = (new_rx_mode & 1) ? 0x0D : 0x05;
2193 if (sp->phy[0] & 0x8000) { /* Use the AUI port instead. */
2194 config_cmd_data[15] |= 0x80;
2195 config_cmd_data[8] = 0;
2197 /* Trigger the command unit resume. */
2198 wait_for_cmd_done(dev);
2199 clear_suspend(last_cmd);
2200 outb(CUResume, ioaddr + SCBCmd);
2201 if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
2202 netif_stop_queue(dev);
2203 sp->tx_full = 1;
2205 spin_unlock_irqrestore(&sp->lock, flags);
2208 if (new_rx_mode == 0 && dev->mc_count < 4) {
2209 /* The simple case of 0-3 multicast list entries occurs often, and
2210 fits within one tx_ring[] entry. */
2211 struct dev_mc_list *mclist;
2212 u16 *setup_params, *eaddrs;
2214 spin_lock_irqsave(&sp->lock, flags);
2215 entry = sp->cur_tx++ % TX_RING_SIZE;
2216 last_cmd = sp->last_cmd;
2217 sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
2219 sp->tx_skbuff[entry] = NULL;
2220 sp->tx_ring[entry].status = cpu_to_le32(CmdSuspend | CmdMulticastList);
2221 sp->tx_ring[entry].link =
2222 cpu_to_le32(TX_RING_ELEM_DMA(sp, (entry + 1) % TX_RING_SIZE));
2223 sp->tx_ring[entry].tx_desc_addr = 0; /* Really MC list count. */
2224 setup_params = (u16 *)&sp->tx_ring[entry].tx_desc_addr;
2225 *setup_params++ = cpu_to_le16(dev->mc_count*6);
2226 /* Fill in the multicast addresses. */
2227 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
2228 i++, mclist = mclist->next) {
2229 eaddrs = (u16 *)mclist->dmi_addr;
2230 *setup_params++ = *eaddrs++;
2231 *setup_params++ = *eaddrs++;
2232 *setup_params++ = *eaddrs++;
2235 wait_for_cmd_done(dev);
2236 clear_suspend(last_cmd);
2237 /* Immediately trigger the command unit resume. */
2238 outb(CUResume, ioaddr + SCBCmd);
2240 if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
2241 netif_stop_queue(dev);
2242 sp->tx_full = 1;
2244 spin_unlock_irqrestore(&sp->lock, flags);
2245 } else if (new_rx_mode == 0) {
2246 struct dev_mc_list *mclist;
2247 u16 *setup_params, *eaddrs;
2248 struct speedo_mc_block *mc_blk;
2249 struct descriptor *mc_setup_frm;
2250 int i;
2252 mc_blk = kmalloc(sizeof(*mc_blk) + 2 + multicast_filter_limit*6,
2253 GFP_ATOMIC);
2254 if (mc_blk == NULL) {
2255 printk(KERN_ERR "%s: Failed to allocate a setup frame.\n",
2256 dev->name);
2257 sp->rx_mode = -1; /* We failed, try again. */
2258 return;
2260 mc_blk->next = NULL;
2261 mc_blk->len = 2 + multicast_filter_limit*6;
2262 mc_blk->frame_dma =
2263 pci_map_single(sp->pdev, &mc_blk->frame, mc_blk->len,
2264 PCI_DMA_TODEVICE);
2265 mc_setup_frm = &mc_blk->frame;
2267 /* Fill the setup frame. */
2268 if (netif_msg_ifup(sp))
2269 printk(KERN_DEBUG "%s: Constructing a setup frame at %p.\n",
2270 dev->name, mc_setup_frm);
2271 mc_setup_frm->cmd_status =
2272 cpu_to_le32(CmdSuspend | CmdIntr | CmdMulticastList);
2273 /* Link set below. */
2274 setup_params = (u16 *)&mc_setup_frm->params;
2275 *setup_params++ = cpu_to_le16(dev->mc_count*6);
2276 /* Fill in the multicast addresses. */
2277 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
2278 i++, mclist = mclist->next) {
2279 eaddrs = (u16 *)mclist->dmi_addr;
2280 *setup_params++ = *eaddrs++;
2281 *setup_params++ = *eaddrs++;
2282 *setup_params++ = *eaddrs++;
2285 /* Disable interrupts while playing with the Tx Cmd list. */
2286 spin_lock_irqsave(&sp->lock, flags);
2288 if (sp->mc_setup_tail)
2289 sp->mc_setup_tail->next = mc_blk;
2290 else
2291 sp->mc_setup_head = mc_blk;
2292 sp->mc_setup_tail = mc_blk;
2293 mc_blk->tx = sp->cur_tx;
2295 entry = sp->cur_tx++ % TX_RING_SIZE;
2296 last_cmd = sp->last_cmd;
2297 sp->last_cmd = mc_setup_frm;
2299 /* Change the command to a NoOp, pointing to the CmdMulti command. */
2300 sp->tx_skbuff[entry] = NULL;
2301 sp->tx_ring[entry].status = cpu_to_le32(CmdNOp);
2302 sp->tx_ring[entry].link = cpu_to_le32(mc_blk->frame_dma);
2304 /* Set the link in the setup frame. */
2305 mc_setup_frm->link =
2306 cpu_to_le32(TX_RING_ELEM_DMA(sp, (entry + 1) % TX_RING_SIZE));
2308 pci_dma_sync_single_for_device(sp->pdev, mc_blk->frame_dma,
2309 mc_blk->len, PCI_DMA_TODEVICE);
2311 wait_for_cmd_done(dev);
2312 clear_suspend(last_cmd);
2313 /* Immediately trigger the command unit resume. */
2314 outb(CUResume, ioaddr + SCBCmd);
2316 if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
2317 netif_stop_queue(dev);
2318 sp->tx_full = 1;
2320 spin_unlock_irqrestore(&sp->lock, flags);
2322 if (netif_msg_rx_status(sp))
2323 printk(" CmdMCSetup frame length %d in entry %d.\n",
2324 dev->mc_count, entry);
2327 sp->rx_mode = new_rx_mode;
2330 #ifdef CONFIG_PM
2331 static int eepro100_suspend(struct pci_dev *pdev, u32 state)
2333 struct net_device *dev = pci_get_drvdata (pdev);
2334 struct speedo_private *sp = netdev_priv(dev);
2335 long ioaddr = dev->base_addr;
2337 pci_save_state(pdev, sp->pm_state);
2339 if (!netif_running(dev))
2340 return 0;
2342 del_timer_sync(&sp->timer);
2344 netif_device_detach(dev);
2345 outl(PortPartialReset, ioaddr + SCBPort);
2347 /* XXX call pci_set_power_state ()? */
2348 return 0;
2351 static int eepro100_resume(struct pci_dev *pdev)
2353 struct net_device *dev = pci_get_drvdata (pdev);
2354 struct speedo_private *sp = netdev_priv(dev);
2355 long ioaddr = dev->base_addr;
2357 pci_restore_state(pdev, sp->pm_state);
2359 if (!netif_running(dev))
2360 return 0;
2362 /* I'm absolutely uncertain if this part of code may work.
2363 The problems are:
2364 - correct hardware reinitialization;
2365 - correct driver behavior between different steps of the
2366 reinitialization;
2367 - serialization with other driver calls.
2368 2000/03/08 SAW */
2369 outw(SCBMaskAll, ioaddr + SCBCmd);
2370 speedo_resume(dev);
2371 netif_device_attach(dev);
2372 sp->rx_mode = -1;
2373 sp->flow_ctrl = sp->partner = 0;
2374 set_rx_mode(dev);
2375 sp->timer.expires = RUN_AT(2*HZ);
2376 add_timer(&sp->timer);
2377 return 0;
2379 #endif /* CONFIG_PM */
2381 static void __devexit eepro100_remove_one (struct pci_dev *pdev)
2383 struct net_device *dev = pci_get_drvdata (pdev);
2384 struct speedo_private *sp = netdev_priv(dev);
2386 unregister_netdev(dev);
2388 release_region(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
2389 release_mem_region(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
2391 #ifndef USE_IO
2392 iounmap((char *)dev->base_addr);
2393 #endif
2395 pci_free_consistent(pdev, TX_RING_SIZE * sizeof(struct TxFD)
2396 + sizeof(struct speedo_stats),
2397 sp->tx_ring, sp->tx_ring_dma);
2398 pci_disable_device(pdev);
2399 free_netdev(dev);
2402 static struct pci_device_id eepro100_pci_tbl[] = {
2403 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82557,
2404 PCI_ANY_ID, PCI_ANY_ID, },
2405 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82559ER,
2406 PCI_ANY_ID, PCI_ANY_ID, },
2407 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_7,
2408 PCI_ANY_ID, PCI_ANY_ID, },
2409 { PCI_VENDOR_ID_INTEL, 0x1029, PCI_ANY_ID, PCI_ANY_ID, },
2410 { PCI_VENDOR_ID_INTEL, 0x1030, PCI_ANY_ID, PCI_ANY_ID, },
2411 { PCI_VENDOR_ID_INTEL, 0x1031, PCI_ANY_ID, PCI_ANY_ID, },
2412 { PCI_VENDOR_ID_INTEL, 0x1032, PCI_ANY_ID, PCI_ANY_ID, },
2413 { PCI_VENDOR_ID_INTEL, 0x1033, PCI_ANY_ID, PCI_ANY_ID, },
2414 { PCI_VENDOR_ID_INTEL, 0x1034, PCI_ANY_ID, PCI_ANY_ID, },
2415 { PCI_VENDOR_ID_INTEL, 0x1035, PCI_ANY_ID, PCI_ANY_ID, },
2416 { PCI_VENDOR_ID_INTEL, 0x1036, PCI_ANY_ID, PCI_ANY_ID, },
2417 { PCI_VENDOR_ID_INTEL, 0x1037, PCI_ANY_ID, PCI_ANY_ID, },
2418 { PCI_VENDOR_ID_INTEL, 0x1038, PCI_ANY_ID, PCI_ANY_ID, },
2419 { PCI_VENDOR_ID_INTEL, 0x1039, PCI_ANY_ID, PCI_ANY_ID, },
2420 { PCI_VENDOR_ID_INTEL, 0x103A, PCI_ANY_ID, PCI_ANY_ID, },
2421 { PCI_VENDOR_ID_INTEL, 0x103B, PCI_ANY_ID, PCI_ANY_ID, },
2422 { PCI_VENDOR_ID_INTEL, 0x103C, PCI_ANY_ID, PCI_ANY_ID, },
2423 { PCI_VENDOR_ID_INTEL, 0x103D, PCI_ANY_ID, PCI_ANY_ID, },
2424 { PCI_VENDOR_ID_INTEL, 0x103E, PCI_ANY_ID, PCI_ANY_ID, },
2425 { PCI_VENDOR_ID_INTEL, 0x1050, PCI_ANY_ID, PCI_ANY_ID, },
2426 { PCI_VENDOR_ID_INTEL, 0x1059, PCI_ANY_ID, PCI_ANY_ID, },
2427 { PCI_VENDOR_ID_INTEL, 0x1227, PCI_ANY_ID, PCI_ANY_ID, },
2428 { PCI_VENDOR_ID_INTEL, 0x1228, PCI_ANY_ID, PCI_ANY_ID, },
2429 { PCI_VENDOR_ID_INTEL, 0x2449, PCI_ANY_ID, PCI_ANY_ID, },
2430 { PCI_VENDOR_ID_INTEL, 0x2459, PCI_ANY_ID, PCI_ANY_ID, },
2431 { PCI_VENDOR_ID_INTEL, 0x245D, PCI_ANY_ID, PCI_ANY_ID, },
2432 { PCI_VENDOR_ID_INTEL, 0x5200, PCI_ANY_ID, PCI_ANY_ID, },
2433 { PCI_VENDOR_ID_INTEL, 0x5201, PCI_ANY_ID, PCI_ANY_ID, },
2434 { 0,}
2436 MODULE_DEVICE_TABLE(pci, eepro100_pci_tbl);
2438 static struct pci_driver eepro100_driver = {
2439 .name = "eepro100",
2440 .id_table = eepro100_pci_tbl,
2441 .probe = eepro100_init_one,
2442 .remove = __devexit_p(eepro100_remove_one),
2443 #ifdef CONFIG_PM
2444 .suspend = eepro100_suspend,
2445 .resume = eepro100_resume,
2446 #endif /* CONFIG_PM */
2449 static int __init eepro100_init_module(void)
2451 #ifdef MODULE
2452 printk(version);
2453 #endif
2454 return pci_module_init(&eepro100_driver);
2457 static void __exit eepro100_cleanup_module(void)
2459 pci_unregister_driver(&eepro100_driver);
2462 module_init(eepro100_init_module);
2463 module_exit(eepro100_cleanup_module);
2466 * Local variables:
2467 * compile-command: "gcc -DMODULE -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -c eepro100.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS`"
2468 * c-indent-level: 4
2469 * c-basic-offset: 4
2470 * tab-width: 4
2471 * End: