More meth updates.
[linux-2.6/linux-mips.git] / drivers / net / eepro100.c
blobac4cd99cc89b8519fea22429d7beee5521d27d69
1 /* drivers/net/eepro100.c: An Intel i82557-559 Ethernet driver for Linux. */
2 /*
3 Written 1996-1999 by Donald Becker.
5 The driver also contains updates by different kernel developers
6 (see incomplete list below).
7 Current maintainer is Andrey V. Savochkin <saw@saw.sw.com.sg>.
8 Please use this email address and linux-kernel mailing list for bug reports.
10 This software may be used and distributed according to the terms
11 of the GNU General Public License, incorporated herein by reference.
13 This driver is for the Intel EtherExpress Pro100 (Speedo3) design.
14 It should work with all i82557/558/559 boards.
16 Version history:
17 1998 Apr - 2000 Feb Andrey V. Savochkin <saw@saw.sw.com.sg>
18 Serious fixes for multicast filter list setting, TX timeout routine;
19 RX ring refilling logic; other stuff
20 2000 Feb Jeff Garzik <jgarzik@pobox.com>
21 Convert to new PCI driver interface
22 2000 Mar 24 Dragan Stancevic <visitor@valinux.com>
23 Disabled FC and ER, to avoid lockups when when we get FCP interrupts.
24 2000 Jul 17 Goutham Rao <goutham.rao@intel.com>
25 PCI DMA API fixes, adding pci_dma_sync_single calls where neccesary
26 2000 Aug 31 David Mosberger <davidm@hpl.hp.com>
27 rx_align support: enables rx DMA without causing unaligned accesses.
30 static const char *version =
31 "eepro100.c:v1.09j-t 9/29/99 Donald Becker http://www.scyld.com/network/eepro100.html\n"
32 "eepro100.c: $Revision: 1.36 $ 2000/11/17 Modified by Andrey V. Savochkin <saw@saw.sw.com.sg> and others\n";
34 /* A few user-configurable values that apply to all boards.
35 First set is undocumented and spelled per Intel recommendations. */
37 static int congenb /* = 0 */; /* Enable congestion control in the DP83840. */
38 static int txfifo = 8; /* Tx FIFO threshold in 4 byte units, 0-15 */
39 static int rxfifo = 8; /* Rx FIFO threshold, default 32 bytes. */
40 /* Tx/Rx DMA burst length, 0-127, 0 == no preemption, tx==128 -> disabled. */
41 static int txdmacount = 128;
42 static int rxdmacount /* = 0 */;
44 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__) || defined(__mips__) || \
45 defined(__arm__)
46 /* align rx buffers to 2 bytes so that IP header is aligned */
47 # define rx_align(skb) skb_reserve((skb), 2)
48 # define RxFD_ALIGNMENT __attribute__ ((aligned (2), packed))
49 #else
50 # define rx_align(skb)
51 # define RxFD_ALIGNMENT
52 #endif
54 /* Set the copy breakpoint for the copy-only-tiny-buffer Rx method.
55 Lower values use more memory, but are faster. */
56 static int rx_copybreak = 200;
58 /* Maximum events (Rx packets, etc.) to handle at each interrupt. */
59 static int max_interrupt_work = 20;
61 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast) */
62 static int multicast_filter_limit = 64;
64 /* 'options' is used to pass a transceiver override or full-duplex flag
65 e.g. "options=16" for FD, "options=32" for 100mbps-only. */
66 static int full_duplex[] = {-1, -1, -1, -1, -1, -1, -1, -1};
67 static int options[] = {-1, -1, -1, -1, -1, -1, -1, -1};
69 /* A few values that may be tweaked. */
70 /* The ring sizes should be a power of two for efficiency. */
71 #define TX_RING_SIZE 64
72 #define RX_RING_SIZE 64
73 /* How much slots multicast filter setup may take.
74 Do not descrease without changing set_rx_mode() implementaion. */
75 #define TX_MULTICAST_SIZE 2
76 #define TX_MULTICAST_RESERV (TX_MULTICAST_SIZE*2)
77 /* Actual number of TX packets queued, must be
78 <= TX_RING_SIZE-TX_MULTICAST_RESERV. */
79 #define TX_QUEUE_LIMIT (TX_RING_SIZE-TX_MULTICAST_RESERV)
80 /* Hysteresis marking queue as no longer full. */
81 #define TX_QUEUE_UNFULL (TX_QUEUE_LIMIT-4)
83 /* Operational parameters that usually are not changed. */
85 /* Time in jiffies before concluding the transmitter is hung. */
86 #define TX_TIMEOUT (2*HZ)
87 /* Size of an pre-allocated Rx buffer: <Ethernet MTU> + slack.*/
88 #define PKT_BUF_SZ 1536
90 #if !defined(__OPTIMIZE__) || !defined(__KERNEL__)
91 #warning You must compile this file with the correct options!
92 #warning See the last lines of the source file.
93 #error You must compile this driver with "-O".
94 #endif
96 #include <linux/config.h>
97 #include <linux/version.h>
98 #include <linux/module.h>
100 #include <linux/kernel.h>
101 #include <linux/string.h>
102 #include <linux/errno.h>
103 #include <linux/ioport.h>
104 #include <linux/slab.h>
105 #include <linux/interrupt.h>
106 #include <linux/timer.h>
107 #include <linux/pci.h>
108 #include <linux/spinlock.h>
109 #include <linux/init.h>
110 #include <linux/mii.h>
111 #include <linux/delay.h>
113 #include <asm/bitops.h>
114 #include <asm/io.h>
115 #include <asm/uaccess.h>
117 #include <linux/netdevice.h>
118 #include <linux/etherdevice.h>
119 #include <linux/skbuff.h>
120 #include <linux/ethtool.h>
121 #include <linux/mii.h>
123 /* enable PIO instead of MMIO, if CONFIG_EEPRO100_PIO is selected */
124 #ifdef CONFIG_EEPRO100_PIO
125 #define USE_IO 1
126 #endif
128 static int debug = -1;
129 #define DEBUG_DEFAULT (NETIF_MSG_DRV | \
130 NETIF_MSG_HW | \
131 NETIF_MSG_RX_ERR | \
132 NETIF_MSG_TX_ERR)
133 #define DEBUG ((debug >= 0) ? (1<<debug)-1 : DEBUG_DEFAULT)
136 MODULE_AUTHOR("Maintainer: Andrey V. Savochkin <saw@saw.sw.com.sg>");
137 MODULE_DESCRIPTION("Intel i82557/i82558/i82559 PCI EtherExpressPro driver");
138 MODULE_LICENSE("GPL");
139 MODULE_PARM(debug, "i");
140 MODULE_PARM(options, "1-" __MODULE_STRING(8) "i");
141 MODULE_PARM(full_duplex, "1-" __MODULE_STRING(8) "i");
142 MODULE_PARM(congenb, "i");
143 MODULE_PARM(txfifo, "i");
144 MODULE_PARM(rxfifo, "i");
145 MODULE_PARM(txdmacount, "i");
146 MODULE_PARM(rxdmacount, "i");
147 MODULE_PARM(rx_copybreak, "i");
148 MODULE_PARM(max_interrupt_work, "i");
149 MODULE_PARM(multicast_filter_limit, "i");
150 MODULE_PARM_DESC(debug, "debug level (0-6)");
151 MODULE_PARM_DESC(options, "Bits 0-3: transceiver type, bit 4: full duplex, bit 5: 100Mbps");
152 MODULE_PARM_DESC(full_duplex, "full duplex setting(s) (1)");
153 MODULE_PARM_DESC(congenb, "Enable congestion control (1)");
154 MODULE_PARM_DESC(txfifo, "Tx FIFO threshold in 4 byte units, (0-15)");
155 MODULE_PARM_DESC(rxfifo, "Rx FIFO threshold in 4 byte units, (0-15)");
156 MODULE_PARM_DESC(txdmaccount, "Tx DMA burst length; 128 - disable (0-128)");
157 MODULE_PARM_DESC(rxdmaccount, "Rx DMA burst length; 128 - disable (0-128)");
158 MODULE_PARM_DESC(rx_copybreak, "copy breakpoint for copy-only-tiny-frames");
159 MODULE_PARM_DESC(max_interrupt_work, "maximum events handled per interrupt");
160 MODULE_PARM_DESC(multicast_filter_limit, "maximum number of filtered multicast addresses");
162 #define RUN_AT(x) (jiffies + (x))
164 /* ACPI power states don't universally work (yet) */
165 #ifndef CONFIG_PM
166 #undef pci_set_power_state
167 #define pci_set_power_state null_set_power_state
168 static inline int null_set_power_state(struct pci_dev *dev, int state)
170 return 0;
172 #endif /* CONFIG_PM */
174 #define netdevice_start(dev)
175 #define netdevice_stop(dev)
176 #define netif_set_tx_timeout(dev, tf, tm) \
177 do { \
178 (dev)->tx_timeout = (tf); \
179 (dev)->watchdog_timeo = (tm); \
180 } while(0)
185 Theory of Operation
187 I. Board Compatibility
189 This device driver is designed for the Intel i82557 "Speedo3" chip, Intel's
190 single-chip fast Ethernet controller for PCI, as used on the Intel
191 EtherExpress Pro 100 adapter.
193 II. Board-specific settings
195 PCI bus devices are configured by the system at boot time, so no jumpers
196 need to be set on the board. The system BIOS should be set to assign the
197 PCI INTA signal to an otherwise unused system IRQ line. While it's
198 possible to share PCI interrupt lines, it negatively impacts performance and
199 only recent kernels support it.
201 III. Driver operation
203 IIIA. General
204 The Speedo3 is very similar to other Intel network chips, that is to say
205 "apparently designed on a different planet". This chips retains the complex
206 Rx and Tx descriptors and multiple buffers pointers as previous chips, but
207 also has simplified Tx and Rx buffer modes. This driver uses the "flexible"
208 Tx mode, but in a simplified lower-overhead manner: it associates only a
209 single buffer descriptor with each frame descriptor.
211 Despite the extra space overhead in each receive skbuff, the driver must use
212 the simplified Rx buffer mode to assure that only a single data buffer is
213 associated with each RxFD. The driver implements this by reserving space
214 for the Rx descriptor at the head of each Rx skbuff.
216 The Speedo-3 has receive and command unit base addresses that are added to
217 almost all descriptor pointers. The driver sets these to zero, so that all
218 pointer fields are absolute addresses.
220 The System Control Block (SCB) of some previous Intel chips exists on the
221 chip in both PCI I/O and memory space. This driver uses the I/O space
222 registers, but might switch to memory mapped mode to better support non-x86
223 processors.
225 IIIB. Transmit structure
227 The driver must use the complex Tx command+descriptor mode in order to
228 have a indirect pointer to the skbuff data section. Each Tx command block
229 (TxCB) is associated with two immediately appended Tx Buffer Descriptor
230 (TxBD). A fixed ring of these TxCB+TxBD pairs are kept as part of the
231 speedo_private data structure for each adapter instance.
233 The newer i82558 explicitly supports this structure, and can read the two
234 TxBDs in the same PCI burst as the TxCB.
236 This ring structure is used for all normal transmit packets, but the
237 transmit packet descriptors aren't long enough for most non-Tx commands such
238 as CmdConfigure. This is complicated by the possibility that the chip has
239 already loaded the link address in the previous descriptor. So for these
240 commands we convert the next free descriptor on the ring to a NoOp, and point
241 that descriptor's link to the complex command.
243 An additional complexity of these non-transmit commands are that they may be
244 added asynchronous to the normal transmit queue, so we disable interrupts
245 whenever the Tx descriptor ring is manipulated.
247 A notable aspect of these special configure commands is that they do
248 work with the normal Tx ring entry scavenge method. The Tx ring scavenge
249 is done at interrupt time using the 'dirty_tx' index, and checking for the
250 command-complete bit. While the setup frames may have the NoOp command on the
251 Tx ring marked as complete, but not have completed the setup command, this
252 is not a problem. The tx_ring entry can be still safely reused, as the
253 tx_skbuff[] entry is always empty for config_cmd and mc_setup frames.
255 Commands may have bits set e.g. CmdSuspend in the command word to either
256 suspend or stop the transmit/command unit. This driver always flags the last
257 command with CmdSuspend, erases the CmdSuspend in the previous command, and
258 then issues a CU_RESUME.
259 Note: Watch out for the potential race condition here: imagine
260 erasing the previous suspend
261 the chip processes the previous command
262 the chip processes the final command, and suspends
263 doing the CU_RESUME
264 the chip processes the next-yet-valid post-final-command.
265 So blindly sending a CU_RESUME is only safe if we do it immediately after
266 after erasing the previous CmdSuspend, without the possibility of an
267 intervening delay. Thus the resume command is always within the
268 interrupts-disabled region. This is a timing dependence, but handling this
269 condition in a timing-independent way would considerably complicate the code.
271 Note: In previous generation Intel chips, restarting the command unit was a
272 notoriously slow process. This is presumably no longer true.
274 IIIC. Receive structure
276 Because of the bus-master support on the Speedo3 this driver uses the new
277 SKBUFF_RX_COPYBREAK scheme, rather than a fixed intermediate receive buffer.
278 This scheme allocates full-sized skbuffs as receive buffers. The value
279 SKBUFF_RX_COPYBREAK is used as the copying breakpoint: it is chosen to
280 trade-off the memory wasted by passing the full-sized skbuff to the queue
281 layer for all frames vs. the copying cost of copying a frame to a
282 correctly-sized skbuff.
284 For small frames the copying cost is negligible (esp. considering that we
285 are pre-loading the cache with immediately useful header information), so we
286 allocate a new, minimally-sized skbuff. For large frames the copying cost
287 is non-trivial, and the larger copy might flush the cache of useful data, so
288 we pass up the skbuff the packet was received into.
290 IV. Notes
292 Thanks to Steve Williams of Intel for arranging the non-disclosure agreement
293 that stated that I could disclose the information. But I still resent
294 having to sign an Intel NDA when I'm helping Intel sell their own product!
298 static int speedo_found1(struct pci_dev *pdev, long ioaddr, int fnd_cnt, int acpi_idle_state);
300 enum pci_flags_bit {
301 PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
302 PCI_ADDR0=0x10<<0, PCI_ADDR1=0x10<<1, PCI_ADDR2=0x10<<2, PCI_ADDR3=0x10<<3,
305 static inline unsigned int io_inw(unsigned long port)
307 return inw(port);
309 static inline void io_outw(unsigned int val, unsigned long port)
311 outw(val, port);
314 #ifndef USE_IO
315 /* Currently alpha headers define in/out macros.
316 Undefine them. 2000/03/30 SAW */
317 #undef inb
318 #undef inw
319 #undef inl
320 #undef outb
321 #undef outw
322 #undef outl
323 #define inb readb
324 #define inw readw
325 #define inl readl
326 #define outb writeb
327 #define outw writew
328 #define outl writel
329 #endif
331 /* Offsets to the various registers.
332 All accesses need not be longword aligned. */
333 enum speedo_offsets {
334 SCBStatus = 0, SCBCmd = 2, /* Rx/Command Unit command and status. */
335 SCBIntmask = 3,
336 SCBPointer = 4, /* General purpose pointer. */
337 SCBPort = 8, /* Misc. commands and operands. */
338 SCBflash = 12, SCBeeprom = 14, /* EEPROM and flash memory control. */
339 SCBCtrlMDI = 16, /* MDI interface control. */
340 SCBEarlyRx = 20, /* Early receive byte count. */
342 /* Commands that can be put in a command list entry. */
343 enum commands {
344 CmdNOp = 0, CmdIASetup = 0x10000, CmdConfigure = 0x20000,
345 CmdMulticastList = 0x30000, CmdTx = 0x40000, CmdTDR = 0x50000,
346 CmdDump = 0x60000, CmdDiagnose = 0x70000,
347 CmdSuspend = 0x40000000, /* Suspend after completion. */
348 CmdIntr = 0x20000000, /* Interrupt after completion. */
349 CmdTxFlex = 0x00080000, /* Use "Flexible mode" for CmdTx command. */
351 /* Clear CmdSuspend (1<<30) avoiding interference with the card access to the
352 status bits. Previous driver versions used separate 16 bit fields for
353 commands and statuses. --SAW
355 #if defined(__alpha__)
356 # define clear_suspend(cmd) clear_bit(30, &(cmd)->cmd_status);
357 #else
358 # if defined(__LITTLE_ENDIAN)
359 # define clear_suspend(cmd) ((__u16 *)&(cmd)->cmd_status)[1] &= ~0x4000
360 # elif defined(__BIG_ENDIAN)
361 # define clear_suspend(cmd) ((__u16 *)&(cmd)->cmd_status)[1] &= ~0x0040
362 # else
363 # error Unsupported byteorder
364 # endif
365 #endif
367 enum SCBCmdBits {
368 SCBMaskCmdDone=0x8000, SCBMaskRxDone=0x4000, SCBMaskCmdIdle=0x2000,
369 SCBMaskRxSuspend=0x1000, SCBMaskEarlyRx=0x0800, SCBMaskFlowCtl=0x0400,
370 SCBTriggerIntr=0x0200, SCBMaskAll=0x0100,
371 /* The rest are Rx and Tx commands. */
372 CUStart=0x0010, CUResume=0x0020, CUStatsAddr=0x0040, CUShowStats=0x0050,
373 CUCmdBase=0x0060, /* CU Base address (set to zero) . */
374 CUDumpStats=0x0070, /* Dump then reset stats counters. */
375 RxStart=0x0001, RxResume=0x0002, RxAbort=0x0004, RxAddrLoad=0x0006,
376 RxResumeNoResources=0x0007,
379 enum SCBPort_cmds {
380 PortReset=0, PortSelfTest=1, PortPartialReset=2, PortDump=3,
383 /* The Speedo3 Rx and Tx frame/buffer descriptors. */
384 struct descriptor { /* A generic descriptor. */
385 volatile s32 cmd_status; /* All command and status fields. */
386 u32 link; /* struct descriptor * */
387 unsigned char params[0];
390 /* The Speedo3 Rx and Tx buffer descriptors. */
391 struct RxFD { /* Receive frame descriptor. */
392 volatile s32 status;
393 u32 link; /* struct RxFD * */
394 u32 rx_buf_addr; /* void * */
395 u32 count;
396 } RxFD_ALIGNMENT;
398 /* Selected elements of the Tx/RxFD.status word. */
399 enum RxFD_bits {
400 RxComplete=0x8000, RxOK=0x2000,
401 RxErrCRC=0x0800, RxErrAlign=0x0400, RxErrTooBig=0x0200, RxErrSymbol=0x0010,
402 RxEth2Type=0x0020, RxNoMatch=0x0004, RxNoIAMatch=0x0002,
403 TxUnderrun=0x1000, StatusComplete=0x8000,
406 #define CONFIG_DATA_SIZE 22
407 struct TxFD { /* Transmit frame descriptor set. */
408 s32 status;
409 u32 link; /* void * */
410 u32 tx_desc_addr; /* Always points to the tx_buf_addr element. */
411 s32 count; /* # of TBD (=1), Tx start thresh., etc. */
412 /* This constitutes two "TBD" entries -- we only use one. */
413 #define TX_DESCR_BUF_OFFSET 16
414 u32 tx_buf_addr0; /* void *, frame to be transmitted. */
415 s32 tx_buf_size0; /* Length of Tx frame. */
416 u32 tx_buf_addr1; /* void *, frame to be transmitted. */
417 s32 tx_buf_size1; /* Length of Tx frame. */
418 /* the structure must have space for at least CONFIG_DATA_SIZE starting
419 * from tx_desc_addr field */
422 /* Multicast filter setting block. --SAW */
423 struct speedo_mc_block {
424 struct speedo_mc_block *next;
425 unsigned int tx;
426 dma_addr_t frame_dma;
427 unsigned int len;
428 struct descriptor frame __attribute__ ((__aligned__(16)));
431 /* Elements of the dump_statistics block. This block must be lword aligned. */
432 struct speedo_stats {
433 u32 tx_good_frames;
434 u32 tx_coll16_errs;
435 u32 tx_late_colls;
436 u32 tx_underruns;
437 u32 tx_lost_carrier;
438 u32 tx_deferred;
439 u32 tx_one_colls;
440 u32 tx_multi_colls;
441 u32 tx_total_colls;
442 u32 rx_good_frames;
443 u32 rx_crc_errs;
444 u32 rx_align_errs;
445 u32 rx_resource_errs;
446 u32 rx_overrun_errs;
447 u32 rx_colls_errs;
448 u32 rx_runt_errs;
449 u32 done_marker;
452 enum Rx_ring_state_bits {
453 RrNoMem=1, RrPostponed=2, RrNoResources=4, RrOOMReported=8,
456 /* Do not change the position (alignment) of the first few elements!
457 The later elements are grouped for cache locality.
459 Unfortunately, all the positions have been shifted since there.
460 A new re-alignment is required. 2000/03/06 SAW */
461 struct speedo_private {
462 struct TxFD *tx_ring; /* Commands (usually CmdTxPacket). */
463 struct RxFD *rx_ringp[RX_RING_SIZE]; /* Rx descriptor, used as ring. */
464 /* The addresses of a Tx/Rx-in-place packets/buffers. */
465 struct sk_buff *tx_skbuff[TX_RING_SIZE];
466 struct sk_buff *rx_skbuff[RX_RING_SIZE];
467 /* Mapped addresses of the rings. */
468 dma_addr_t tx_ring_dma;
469 #define TX_RING_ELEM_DMA(sp, n) ((sp)->tx_ring_dma + (n)*sizeof(struct TxFD))
470 dma_addr_t rx_ring_dma[RX_RING_SIZE];
471 struct descriptor *last_cmd; /* Last command sent. */
472 unsigned int cur_tx, dirty_tx; /* The ring entries to be free()ed. */
473 spinlock_t lock; /* Group with Tx control cache line. */
474 u32 tx_threshold; /* The value for txdesc.count. */
475 struct RxFD *last_rxf; /* Last filled RX buffer. */
476 dma_addr_t last_rxf_dma;
477 unsigned int cur_rx, dirty_rx; /* The next free ring entry */
478 long last_rx_time; /* Last Rx, in jiffies, to handle Rx hang. */
479 struct net_device_stats stats;
480 struct speedo_stats *lstats;
481 dma_addr_t lstats_dma;
482 int chip_id;
483 struct pci_dev *pdev;
484 struct timer_list timer; /* Media selection timer. */
485 struct speedo_mc_block *mc_setup_head; /* Multicast setup frame list head. */
486 struct speedo_mc_block *mc_setup_tail; /* Multicast setup frame list tail. */
487 long in_interrupt; /* Word-aligned dev->interrupt */
488 unsigned char acpi_pwr;
489 signed char rx_mode; /* Current PROMISC/ALLMULTI setting. */
490 unsigned int tx_full:1; /* The Tx queue is full. */
491 unsigned int flow_ctrl:1; /* Use 802.3x flow control. */
492 unsigned int rx_bug:1; /* Work around receiver hang errata. */
493 unsigned char default_port:8; /* Last dev->if_port value. */
494 unsigned char rx_ring_state; /* RX ring status flags. */
495 unsigned short phy[2]; /* PHY media interfaces available. */
496 unsigned short partner; /* Link partner caps. */
497 struct mii_if_info mii_if; /* MII API hooks, info */
498 u32 msg_enable; /* debug message level */
499 #ifdef CONFIG_PM
500 u32 pm_state[16];
501 #endif
504 /* The parameters for a CmdConfigure operation.
505 There are so many options that it would be difficult to document each bit.
506 We mostly use the default or recommended settings. */
507 static const char i82557_config_cmd[CONFIG_DATA_SIZE] = {
508 22, 0x08, 0, 0, 0, 0, 0x32, 0x03, 1, /* 1=Use MII 0=Use AUI */
509 0, 0x2E, 0, 0x60, 0,
510 0xf2, 0x48, 0, 0x40, 0xf2, 0x80, /* 0x40=Force full-duplex */
511 0x3f, 0x05, };
512 static const char i82558_config_cmd[CONFIG_DATA_SIZE] = {
513 22, 0x08, 0, 1, 0, 0, 0x22, 0x03, 1, /* 1=Use MII 0=Use AUI */
514 0, 0x2E, 0, 0x60, 0x08, 0x88,
515 0x68, 0, 0x40, 0xf2, 0x84, /* Disable FC */
516 0x31, 0x05, };
518 /* PHY media interface chips. */
519 static const char *phys[] = {
520 "None", "i82553-A/B", "i82553-C", "i82503",
521 "DP83840", "80c240", "80c24", "i82555",
522 "unknown-8", "unknown-9", "DP83840A", "unknown-11",
523 "unknown-12", "unknown-13", "unknown-14", "unknown-15", };
524 enum phy_chips { NonSuchPhy=0, I82553AB, I82553C, I82503, DP83840, S80C240,
525 S80C24, I82555, DP83840A=10, };
526 static const char is_mii[] = { 0, 1, 1, 0, 1, 1, 0, 1 };
527 #define EE_READ_CMD (6)
529 static int eepro100_init_one(struct pci_dev *pdev,
530 const struct pci_device_id *ent);
532 static int do_eeprom_cmd(long ioaddr, int cmd, int cmd_len);
533 static int mdio_read(struct net_device *dev, int phy_id, int location);
534 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
535 static int speedo_open(struct net_device *dev);
536 static void speedo_resume(struct net_device *dev);
537 static void speedo_timer(unsigned long data);
538 static void speedo_init_rx_ring(struct net_device *dev);
539 static void speedo_tx_timeout(struct net_device *dev);
540 static int speedo_start_xmit(struct sk_buff *skb, struct net_device *dev);
541 static void speedo_refill_rx_buffers(struct net_device *dev, int force);
542 static int speedo_rx(struct net_device *dev);
543 static void speedo_tx_buffer_gc(struct net_device *dev);
544 static irqreturn_t speedo_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
545 static int speedo_close(struct net_device *dev);
546 static struct net_device_stats *speedo_get_stats(struct net_device *dev);
547 static int speedo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
548 static void set_rx_mode(struct net_device *dev);
549 static void speedo_show_state(struct net_device *dev);
553 #ifdef honor_default_port
554 /* Optional driver feature to allow forcing the transceiver setting.
555 Not recommended. */
556 static int mii_ctrl[8] = { 0x3300, 0x3100, 0x0000, 0x0100,
557 0x2000, 0x2100, 0x0400, 0x3100};
558 #endif
560 /* How to wait for the command unit to accept a command.
561 Typically this takes 0 ticks. */
562 static inline unsigned char wait_for_cmd_done(struct net_device *dev)
564 int wait = 1000;
565 long cmd_ioaddr = dev->base_addr + SCBCmd;
566 unsigned char r;
568 do {
569 udelay(1);
570 r = inb(cmd_ioaddr);
571 } while(r && --wait >= 0);
573 if (wait < 0)
574 printk(KERN_ALERT "%s: wait_for_cmd_done timeout!\n", dev->name);
575 return r;
578 static int __devinit eepro100_init_one (struct pci_dev *pdev,
579 const struct pci_device_id *ent)
581 unsigned long ioaddr;
582 int irq;
583 int acpi_idle_state = 0, pm;
584 static int cards_found /* = 0 */;
586 #ifndef MODULE
587 /* when built-in, we only print version if device is found */
588 static int did_version;
589 if (did_version++ == 0)
590 printk(version);
591 #endif
593 /* save power state before pci_enable_device overwrites it */
594 pm = pci_find_capability(pdev, PCI_CAP_ID_PM);
595 if (pm) {
596 u16 pwr_command;
597 pci_read_config_word(pdev, pm + PCI_PM_CTRL, &pwr_command);
598 acpi_idle_state = pwr_command & PCI_PM_CTRL_STATE_MASK;
601 if (pci_enable_device(pdev))
602 goto err_out_free_mmio_region;
604 pci_set_master(pdev);
606 if (!request_region(pci_resource_start(pdev, 1),
607 pci_resource_len(pdev, 1), "eepro100")) {
608 printk (KERN_ERR "eepro100: cannot reserve I/O ports\n");
609 goto err_out_none;
611 if (!request_mem_region(pci_resource_start(pdev, 0),
612 pci_resource_len(pdev, 0), "eepro100")) {
613 printk (KERN_ERR "eepro100: cannot reserve MMIO region\n");
614 goto err_out_free_pio_region;
617 irq = pdev->irq;
618 #ifdef USE_IO
619 ioaddr = pci_resource_start(pdev, 1);
620 if (DEBUG & NETIF_MSG_PROBE)
621 printk("Found Intel i82557 PCI Speedo at I/O %#lx, IRQ %d.\n",
622 ioaddr, irq);
623 #else
624 ioaddr = (unsigned long)ioremap(pci_resource_start(pdev, 0),
625 pci_resource_len(pdev, 0));
626 if (!ioaddr) {
627 printk (KERN_ERR "eepro100: cannot remap MMIO region %lx @ %lx\n",
628 pci_resource_len(pdev, 0), pci_resource_start(pdev, 0));
629 goto err_out_free_mmio_region;
631 if (DEBUG & NETIF_MSG_PROBE)
632 printk("Found Intel i82557 PCI Speedo, MMIO at %#lx, IRQ %d.\n",
633 pci_resource_start(pdev, 0), irq);
634 #endif
637 if (speedo_found1(pdev, ioaddr, cards_found, acpi_idle_state) == 0)
638 cards_found++;
639 else
640 goto err_out_iounmap;
642 return 0;
644 err_out_iounmap: ;
645 #ifndef USE_IO
646 iounmap ((void *)ioaddr);
647 #endif
648 err_out_free_mmio_region:
649 release_mem_region(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
650 err_out_free_pio_region:
651 release_region(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
652 err_out_none:
653 return -ENODEV;
656 static int __devinit speedo_found1(struct pci_dev *pdev,
657 long ioaddr, int card_idx, int acpi_idle_state)
659 struct net_device *dev;
660 struct speedo_private *sp;
661 const char *product;
662 int i, option;
663 u16 eeprom[0x100];
664 int size;
665 void *tx_ring_space;
666 dma_addr_t tx_ring_dma;
668 size = TX_RING_SIZE * sizeof(struct TxFD) + sizeof(struct speedo_stats);
669 tx_ring_space = pci_alloc_consistent(pdev, size, &tx_ring_dma);
670 if (tx_ring_space == NULL)
671 return -1;
673 dev = init_etherdev(NULL, sizeof(struct speedo_private));
674 if (dev == NULL) {
675 printk(KERN_ERR "eepro100: Could not allocate ethernet device.\n");
676 pci_free_consistent(pdev, size, tx_ring_space, tx_ring_dma);
677 return -1;
680 SET_MODULE_OWNER(dev);
681 SET_NETDEV_DEV(dev, &pdev->dev);
683 if (dev->mem_start > 0)
684 option = dev->mem_start;
685 else if (card_idx >= 0 && options[card_idx] >= 0)
686 option = options[card_idx];
687 else
688 option = 0;
690 /* Read the station address EEPROM before doing the reset.
691 Nominally his should even be done before accepting the device, but
692 then we wouldn't have a device name with which to report the error.
693 The size test is for 6 bit vs. 8 bit address serial EEPROMs.
696 unsigned long iobase;
697 int read_cmd, ee_size;
698 u16 sum;
699 int j;
701 /* Use IO only to avoid postponed writes and satisfy EEPROM timing
702 requirements. */
703 iobase = pci_resource_start(pdev, 1);
704 if ((do_eeprom_cmd(iobase, EE_READ_CMD << 24, 27) & 0xffe0000)
705 == 0xffe0000) {
706 ee_size = 0x100;
707 read_cmd = EE_READ_CMD << 24;
708 } else {
709 ee_size = 0x40;
710 read_cmd = EE_READ_CMD << 22;
713 for (j = 0, i = 0, sum = 0; i < ee_size; i++) {
714 u16 value = do_eeprom_cmd(iobase, read_cmd | (i << 16), 27);
715 eeprom[i] = value;
716 sum += value;
717 if (i < 3) {
718 dev->dev_addr[j++] = value;
719 dev->dev_addr[j++] = value >> 8;
722 if (sum != 0xBABA)
723 printk(KERN_WARNING "%s: Invalid EEPROM checksum %#4.4x, "
724 "check settings before activating this device!\n",
725 dev->name, sum);
726 /* Don't unregister_netdev(dev); as the EEPro may actually be
727 usable, especially if the MAC address is set later.
728 On the other hand, it may be unusable if MDI data is corrupted. */
731 /* Reset the chip: stop Tx and Rx processes and clear counters.
732 This takes less than 10usec and will easily finish before the next
733 action. */
734 outl(PortReset, ioaddr + SCBPort);
735 inl(ioaddr + SCBPort);
736 udelay(10);
738 if (eeprom[3] & 0x0100)
739 product = "OEM i82557/i82558 10/100 Ethernet";
740 else
741 product = pdev->dev.name;
743 printk(KERN_INFO "%s: %s, ", dev->name, product);
745 for (i = 0; i < 5; i++)
746 printk("%2.2X:", dev->dev_addr[i]);
747 printk("%2.2X, ", dev->dev_addr[i]);
748 #ifdef USE_IO
749 printk("I/O at %#3lx, ", ioaddr);
750 #endif
751 printk("IRQ %d.\n", pdev->irq);
753 /* we must initialize base_addr early, for mdio_{read,write} */
754 dev->base_addr = ioaddr;
756 #if 1 || defined(kernel_bloat)
757 /* OK, this is pure kernel bloat. I don't like it when other drivers
758 waste non-pageable kernel space to emit similar messages, but I need
759 them for bug reports. */
761 const char *connectors[] = {" RJ45", " BNC", " AUI", " MII"};
762 /* The self-test results must be paragraph aligned. */
763 volatile s32 *self_test_results;
764 int boguscnt = 16000; /* Timeout for set-test. */
765 if ((eeprom[3] & 0x03) != 0x03)
766 printk(KERN_INFO " Receiver lock-up bug exists -- enabling"
767 " work-around.\n");
768 printk(KERN_INFO " Board assembly %4.4x%2.2x-%3.3d, Physical"
769 " connectors present:",
770 eeprom[8], eeprom[9]>>8, eeprom[9] & 0xff);
771 for (i = 0; i < 4; i++)
772 if (eeprom[5] & (1<<i))
773 printk(connectors[i]);
774 printk("\n"KERN_INFO" Primary interface chip %s PHY #%d.\n",
775 phys[(eeprom[6]>>8)&15], eeprom[6] & 0x1f);
776 if (eeprom[7] & 0x0700)
777 printk(KERN_INFO " Secondary interface chip %s.\n",
778 phys[(eeprom[7]>>8)&7]);
779 if (((eeprom[6]>>8) & 0x3f) == DP83840
780 || ((eeprom[6]>>8) & 0x3f) == DP83840A) {
781 int mdi_reg23 = mdio_read(dev, eeprom[6] & 0x1f, 23) | 0x0422;
782 if (congenb)
783 mdi_reg23 |= 0x0100;
784 printk(KERN_INFO" DP83840 specific setup, setting register 23 to %4.4x.\n",
785 mdi_reg23);
786 mdio_write(dev, eeprom[6] & 0x1f, 23, mdi_reg23);
788 if ((option >= 0) && (option & 0x70)) {
789 printk(KERN_INFO " Forcing %dMbs %s-duplex operation.\n",
790 (option & 0x20 ? 100 : 10),
791 (option & 0x10 ? "full" : "half"));
792 mdio_write(dev, eeprom[6] & 0x1f, MII_BMCR,
793 ((option & 0x20) ? 0x2000 : 0) | /* 100mbps? */
794 ((option & 0x10) ? 0x0100 : 0)); /* Full duplex? */
797 /* Perform a system self-test. */
798 self_test_results = (s32*) ((((long) tx_ring_space) + 15) & ~0xf);
799 self_test_results[0] = 0;
800 self_test_results[1] = -1;
801 outl(tx_ring_dma | PortSelfTest, ioaddr + SCBPort);
802 do {
803 udelay(10);
804 } while (self_test_results[1] == -1 && --boguscnt >= 0);
806 if (boguscnt < 0) { /* Test optimized out. */
807 printk(KERN_ERR "Self test failed, status %8.8x:\n"
808 KERN_ERR " Failure to initialize the i82557.\n"
809 KERN_ERR " Verify that the card is a bus-master"
810 " capable slot.\n",
811 self_test_results[1]);
812 } else
813 printk(KERN_INFO " General self-test: %s.\n"
814 KERN_INFO " Serial sub-system self-test: %s.\n"
815 KERN_INFO " Internal registers self-test: %s.\n"
816 KERN_INFO " ROM checksum self-test: %s (%#8.8x).\n",
817 self_test_results[1] & 0x1000 ? "failed" : "passed",
818 self_test_results[1] & 0x0020 ? "failed" : "passed",
819 self_test_results[1] & 0x0008 ? "failed" : "passed",
820 self_test_results[1] & 0x0004 ? "failed" : "passed",
821 self_test_results[0]);
823 #endif /* kernel_bloat */
825 outl(PortReset, ioaddr + SCBPort);
826 inl(ioaddr + SCBPort);
827 udelay(10);
829 /* Return the chip to its original power state. */
830 pci_set_power_state(pdev, acpi_idle_state);
832 pci_set_drvdata (pdev, dev);
833 SET_NETDEV_DEV(dev, &pdev->dev);
835 dev->irq = pdev->irq;
837 sp = dev->priv;
838 sp->pdev = pdev;
839 sp->msg_enable = DEBUG;
840 sp->acpi_pwr = acpi_idle_state;
841 sp->tx_ring = tx_ring_space;
842 sp->tx_ring_dma = tx_ring_dma;
843 sp->lstats = (struct speedo_stats *)(sp->tx_ring + TX_RING_SIZE);
844 sp->lstats_dma = TX_RING_ELEM_DMA(sp, TX_RING_SIZE);
845 init_timer(&sp->timer); /* used in ioctl() */
846 spin_lock_init(&sp->lock);
848 sp->mii_if.full_duplex = option >= 0 && (option & 0x10) ? 1 : 0;
849 if (card_idx >= 0) {
850 if (full_duplex[card_idx] >= 0)
851 sp->mii_if.full_duplex = full_duplex[card_idx];
853 sp->default_port = option >= 0 ? (option & 0x0f) : 0;
855 sp->phy[0] = eeprom[6];
856 sp->phy[1] = eeprom[7];
858 sp->mii_if.phy_id = eeprom[6] & 0x1f;
859 sp->mii_if.phy_id_mask = 0x1f;
860 sp->mii_if.reg_num_mask = 0x1f;
861 sp->mii_if.dev = dev;
862 sp->mii_if.mdio_read = mdio_read;
863 sp->mii_if.mdio_write = mdio_write;
865 sp->rx_bug = (eeprom[3] & 0x03) == 3 ? 0 : 1;
866 if (((pdev->device > 0x1030 && (pdev->device < 0x103F)))
867 || (pdev->device == 0x2449) || (pdev->device == 0x2459)
868 || (pdev->device == 0x245D)) {
869 sp->chip_id = 1;
872 if (sp->rx_bug)
873 printk(KERN_INFO " Receiver lock-up workaround activated.\n");
875 /* The Speedo-specific entries in the device structure. */
876 dev->open = &speedo_open;
877 dev->hard_start_xmit = &speedo_start_xmit;
878 netif_set_tx_timeout(dev, &speedo_tx_timeout, TX_TIMEOUT);
879 dev->stop = &speedo_close;
880 dev->get_stats = &speedo_get_stats;
881 dev->set_multicast_list = &set_rx_mode;
882 dev->do_ioctl = &speedo_ioctl;
884 return 0;
887 static void do_slow_command(struct net_device *dev, int cmd)
889 long cmd_ioaddr = dev->base_addr + SCBCmd;
890 int wait = 0;
892 if (inb(cmd_ioaddr) == 0) break;
893 while(++wait <= 200);
894 if (wait > 100)
895 printk(KERN_ERR "Command %4.4x never accepted (%d polls)!\n",
896 inb(cmd_ioaddr), wait);
898 outb(cmd, cmd_ioaddr);
900 for (wait = 0; wait <= 100; wait++)
901 if (inb(cmd_ioaddr) == 0) return;
902 for (; wait <= 20000; wait++)
903 if (inb(cmd_ioaddr) == 0) return;
904 else udelay(1);
905 printk(KERN_ERR "Command %4.4x was not accepted after %d polls!"
906 " Current status %8.8x.\n",
907 cmd, wait, inl(dev->base_addr + SCBStatus));
910 /* Serial EEPROM section.
911 A "bit" grungy, but we work our way through bit-by-bit :->. */
912 /* EEPROM_Ctrl bits. */
913 #define EE_SHIFT_CLK 0x01 /* EEPROM shift clock. */
914 #define EE_CS 0x02 /* EEPROM chip select. */
915 #define EE_DATA_WRITE 0x04 /* EEPROM chip data in. */
916 #define EE_DATA_READ 0x08 /* EEPROM chip data out. */
917 #define EE_ENB (0x4800 | EE_CS)
918 #define EE_WRITE_0 0x4802
919 #define EE_WRITE_1 0x4806
920 #define EE_OFFSET SCBeeprom
922 /* The fixes for the code were kindly provided by Dragan Stancevic
923 <visitor@valinux.com> to strictly follow Intel specifications of EEPROM
924 access timing.
925 The publicly available sheet 64486302 (sec. 3.1) specifies 1us access
926 interval for serial EEPROM. However, it looks like that there is an
927 additional requirement dictating larger udelay's in the code below.
928 2000/05/24 SAW */
929 static int __devinit do_eeprom_cmd(long ioaddr, int cmd, int cmd_len)
931 unsigned retval = 0;
932 long ee_addr = ioaddr + SCBeeprom;
934 io_outw(EE_ENB, ee_addr); udelay(2);
935 io_outw(EE_ENB | EE_SHIFT_CLK, ee_addr); udelay(2);
937 /* Shift the command bits out. */
938 do {
939 short dataval = (cmd & (1 << cmd_len)) ? EE_WRITE_1 : EE_WRITE_0;
940 io_outw(dataval, ee_addr); udelay(2);
941 io_outw(dataval | EE_SHIFT_CLK, ee_addr); udelay(2);
942 retval = (retval << 1) | ((io_inw(ee_addr) & EE_DATA_READ) ? 1 : 0);
943 } while (--cmd_len >= 0);
944 io_outw(EE_ENB, ee_addr); udelay(2);
946 /* Terminate the EEPROM access. */
947 io_outw(EE_ENB & ~EE_CS, ee_addr);
948 return retval;
951 static int mdio_read(struct net_device *dev, int phy_id, int location)
953 long ioaddr = dev->base_addr;
954 int val, boguscnt = 64*10; /* <64 usec. to complete, typ 27 ticks */
955 outl(0x08000000 | (location<<16) | (phy_id<<21), ioaddr + SCBCtrlMDI);
956 do {
957 val = inl(ioaddr + SCBCtrlMDI);
958 if (--boguscnt < 0) {
959 printk(KERN_ERR " mdio_read() timed out with val = %8.8x.\n", val);
960 break;
962 } while (! (val & 0x10000000));
963 return val & 0xffff;
966 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
968 long ioaddr = dev->base_addr;
969 int val, boguscnt = 64*10; /* <64 usec. to complete, typ 27 ticks */
970 outl(0x04000000 | (location<<16) | (phy_id<<21) | value,
971 ioaddr + SCBCtrlMDI);
972 do {
973 val = inl(ioaddr + SCBCtrlMDI);
974 if (--boguscnt < 0) {
975 printk(KERN_ERR" mdio_write() timed out with val = %8.8x.\n", val);
976 break;
978 } while (! (val & 0x10000000));
981 static int
982 speedo_open(struct net_device *dev)
984 struct speedo_private *sp = (struct speedo_private *)dev->priv;
985 long ioaddr = dev->base_addr;
986 int retval;
988 if (netif_msg_ifup(sp))
989 printk(KERN_DEBUG "%s: speedo_open() irq %d.\n", dev->name, dev->irq);
991 pci_set_power_state(sp->pdev, 0);
993 /* Set up the Tx queue early.. */
994 sp->cur_tx = 0;
995 sp->dirty_tx = 0;
996 sp->last_cmd = 0;
997 sp->tx_full = 0;
998 sp->in_interrupt = 0;
1000 /* .. we can safely take handler calls during init. */
1001 retval = request_irq(dev->irq, &speedo_interrupt, SA_SHIRQ, dev->name, dev);
1002 if (retval) {
1003 return retval;
1006 dev->if_port = sp->default_port;
1008 #ifdef oh_no_you_dont_unless_you_honour_the_options_passed_in_to_us
1009 /* Retrigger negotiation to reset previous errors. */
1010 if ((sp->phy[0] & 0x8000) == 0) {
1011 int phy_addr = sp->phy[0] & 0x1f ;
1012 /* Use 0x3300 for restarting NWay, other values to force xcvr:
1013 0x0000 10-HD
1014 0x0100 10-FD
1015 0x2000 100-HD
1016 0x2100 100-FD
1018 #ifdef honor_default_port
1019 mdio_write(dev, phy_addr, MII_BMCR, mii_ctrl[dev->default_port & 7]);
1020 #else
1021 mdio_write(dev, phy_addr, MII_BMCR, 0x3300);
1022 #endif
1024 #endif
1026 speedo_init_rx_ring(dev);
1028 /* Fire up the hardware. */
1029 outw(SCBMaskAll, ioaddr + SCBCmd);
1030 speedo_resume(dev);
1032 netdevice_start(dev);
1033 netif_start_queue(dev);
1035 /* Setup the chip and configure the multicast list. */
1036 sp->mc_setup_head = NULL;
1037 sp->mc_setup_tail = NULL;
1038 sp->flow_ctrl = sp->partner = 0;
1039 sp->rx_mode = -1; /* Invalid -> always reset the mode. */
1040 set_rx_mode(dev);
1041 if ((sp->phy[0] & 0x8000) == 0)
1042 sp->mii_if.advertising = mdio_read(dev, sp->phy[0] & 0x1f, MII_ADVERTISE);
1044 mii_check_link(&sp->mii_if);
1046 if (netif_msg_ifup(sp)) {
1047 printk(KERN_DEBUG "%s: Done speedo_open(), status %8.8x.\n",
1048 dev->name, inw(ioaddr + SCBStatus));
1051 /* Set the timer. The timer serves a dual purpose:
1052 1) to monitor the media interface (e.g. link beat) and perhaps switch
1053 to an alternate media type
1054 2) to monitor Rx activity, and restart the Rx process if the receiver
1055 hangs. */
1056 sp->timer.expires = RUN_AT((24*HZ)/10); /* 2.4 sec. */
1057 sp->timer.data = (unsigned long)dev;
1058 sp->timer.function = &speedo_timer; /* timer handler */
1059 add_timer(&sp->timer);
1061 /* No need to wait for the command unit to accept here. */
1062 if ((sp->phy[0] & 0x8000) == 0)
1063 mdio_read(dev, sp->phy[0] & 0x1f, MII_BMCR);
1065 return 0;
1068 /* Start the chip hardware after a full reset. */
1069 static void speedo_resume(struct net_device *dev)
1071 struct speedo_private *sp = dev->priv;
1072 long ioaddr = dev->base_addr;
1074 /* Start with a Tx threshold of 256 (0x..20.... 8 byte units). */
1075 sp->tx_threshold = 0x01208000;
1077 /* Set the segment registers to '0'. */
1078 if (wait_for_cmd_done(dev) != 0) {
1079 outl(PortPartialReset, ioaddr + SCBPort);
1080 udelay(10);
1083 outl(0, ioaddr + SCBPointer);
1084 inl(ioaddr + SCBPointer); /* Flush to PCI. */
1085 udelay(10); /* Bogus, but it avoids the bug. */
1087 /* Note: these next two operations can take a while. */
1088 do_slow_command(dev, RxAddrLoad);
1089 do_slow_command(dev, CUCmdBase);
1091 /* Load the statistics block and rx ring addresses. */
1092 outl(sp->lstats_dma, ioaddr + SCBPointer);
1093 inl(ioaddr + SCBPointer); /* Flush to PCI */
1095 outb(CUStatsAddr, ioaddr + SCBCmd);
1096 sp->lstats->done_marker = 0;
1097 wait_for_cmd_done(dev);
1099 if (sp->rx_ringp[sp->cur_rx % RX_RING_SIZE] == NULL) {
1100 if (netif_msg_rx_err(sp))
1101 printk(KERN_DEBUG "%s: NULL cur_rx in speedo_resume().\n",
1102 dev->name);
1103 } else {
1104 outl(sp->rx_ring_dma[sp->cur_rx % RX_RING_SIZE],
1105 ioaddr + SCBPointer);
1106 inl(ioaddr + SCBPointer); /* Flush to PCI */
1109 /* Note: RxStart should complete instantly. */
1110 do_slow_command(dev, RxStart);
1111 do_slow_command(dev, CUDumpStats);
1113 /* Fill the first command with our physical address. */
1115 struct descriptor *ias_cmd;
1117 ias_cmd =
1118 (struct descriptor *)&sp->tx_ring[sp->cur_tx++ % TX_RING_SIZE];
1119 /* Avoid a bug(?!) here by marking the command already completed. */
1120 ias_cmd->cmd_status = cpu_to_le32((CmdSuspend | CmdIASetup) | 0xa000);
1121 ias_cmd->link =
1122 cpu_to_le32(TX_RING_ELEM_DMA(sp, sp->cur_tx % TX_RING_SIZE));
1123 memcpy(ias_cmd->params, dev->dev_addr, 6);
1124 if (sp->last_cmd)
1125 clear_suspend(sp->last_cmd);
1126 sp->last_cmd = ias_cmd;
1129 /* Start the chip's Tx process and unmask interrupts. */
1130 outl(TX_RING_ELEM_DMA(sp, sp->dirty_tx % TX_RING_SIZE),
1131 ioaddr + SCBPointer);
1132 /* We are not ACK-ing FCP and ER in the interrupt handler yet so they should
1133 remain masked --Dragan */
1134 outw(CUStart | SCBMaskEarlyRx | SCBMaskFlowCtl, ioaddr + SCBCmd);
1138 * Sometimes the receiver stops making progress. This routine knows how to
1139 * get it going again, without losing packets or being otherwise nasty like
1140 * a chip reset would be. Previously the driver had a whole sequence
1141 * of if RxSuspended, if it's no buffers do one thing, if it's no resources,
1142 * do another, etc. But those things don't really matter. Separate logic
1143 * in the ISR provides for allocating buffers--the other half of operation
1144 * is just making sure the receiver is active. speedo_rx_soft_reset does that.
1145 * This problem with the old, more involved algorithm is shown up under
1146 * ping floods on the order of 60K packets/second on a 100Mbps fdx network.
1148 static void
1149 speedo_rx_soft_reset(struct net_device *dev)
1151 struct speedo_private *sp = dev->priv;
1152 struct RxFD *rfd;
1153 long ioaddr;
1155 ioaddr = dev->base_addr;
1156 if (wait_for_cmd_done(dev) != 0) {
1157 printk("%s: previous command stalled\n", dev->name);
1158 return;
1161 * Put the hardware into a known state.
1163 outb(RxAbort, ioaddr + SCBCmd);
1165 rfd = sp->rx_ringp[sp->cur_rx % RX_RING_SIZE];
1167 rfd->rx_buf_addr = 0xffffffff;
1169 if (wait_for_cmd_done(dev) != 0) {
1170 printk("%s: RxAbort command stalled\n", dev->name);
1171 return;
1173 outl(sp->rx_ring_dma[sp->cur_rx % RX_RING_SIZE],
1174 ioaddr + SCBPointer);
1175 outb(RxStart, ioaddr + SCBCmd);
1179 /* Media monitoring and control. */
1180 static void speedo_timer(unsigned long data)
1182 struct net_device *dev = (struct net_device *)data;
1183 struct speedo_private *sp = (struct speedo_private *)dev->priv;
1184 long ioaddr = dev->base_addr;
1185 int phy_num = sp->phy[0] & 0x1f;
1187 /* We have MII and lost link beat. */
1188 if ((sp->phy[0] & 0x8000) == 0) {
1189 int partner = mdio_read(dev, phy_num, MII_LPA);
1190 if (partner != sp->partner) {
1191 int flow_ctrl = sp->mii_if.advertising & partner & 0x0400 ? 1 : 0;
1192 if (netif_msg_link(sp)) {
1193 printk(KERN_DEBUG "%s: Link status change.\n", dev->name);
1194 printk(KERN_DEBUG "%s: Old partner %x, new %x, adv %x.\n",
1195 dev->name, sp->partner, partner, sp->mii_if.advertising);
1197 sp->partner = partner;
1198 if (flow_ctrl != sp->flow_ctrl) {
1199 sp->flow_ctrl = flow_ctrl;
1200 sp->rx_mode = -1; /* Trigger a reload. */
1204 mii_check_link(&sp->mii_if);
1205 if (netif_msg_timer(sp)) {
1206 printk(KERN_DEBUG "%s: Media control tick, status %4.4x.\n",
1207 dev->name, inw(ioaddr + SCBStatus));
1209 if (sp->rx_mode < 0 ||
1210 (sp->rx_bug && jiffies - sp->last_rx_time > 2*HZ)) {
1211 /* We haven't received a packet in a Long Time. We might have been
1212 bitten by the receiver hang bug. This can be cleared by sending
1213 a set multicast list command. */
1214 if (netif_msg_timer(sp))
1215 printk(KERN_DEBUG "%s: Sending a multicast list set command"
1216 " from a timer routine,"
1217 " m=%d, j=%ld, l=%ld.\n",
1218 dev->name, sp->rx_mode, jiffies, sp->last_rx_time);
1219 set_rx_mode(dev);
1221 /* We must continue to monitor the media. */
1222 sp->timer.expires = RUN_AT(2*HZ); /* 2.0 sec. */
1223 add_timer(&sp->timer);
1226 static void speedo_show_state(struct net_device *dev)
1228 struct speedo_private *sp = (struct speedo_private *)dev->priv;
1229 int i;
1231 if (netif_msg_pktdata(sp)) {
1232 printk(KERN_DEBUG "%s: Tx ring dump, Tx queue %u / %u:\n",
1233 dev->name, sp->cur_tx, sp->dirty_tx);
1234 for (i = 0; i < TX_RING_SIZE; i++)
1235 printk(KERN_DEBUG "%s: %c%c%2d %8.8x.\n", dev->name,
1236 i == sp->dirty_tx % TX_RING_SIZE ? '*' : ' ',
1237 i == sp->cur_tx % TX_RING_SIZE ? '=' : ' ',
1238 i, sp->tx_ring[i].status);
1240 printk(KERN_DEBUG "%s: Printing Rx ring"
1241 " (next to receive into %u, dirty index %u).\n",
1242 dev->name, sp->cur_rx, sp->dirty_rx);
1243 for (i = 0; i < RX_RING_SIZE; i++)
1244 printk(KERN_DEBUG "%s: %c%c%c%2d %8.8x.\n", dev->name,
1245 sp->rx_ringp[i] == sp->last_rxf ? 'l' : ' ',
1246 i == sp->dirty_rx % RX_RING_SIZE ? '*' : ' ',
1247 i == sp->cur_rx % RX_RING_SIZE ? '=' : ' ',
1248 i, (sp->rx_ringp[i] != NULL) ?
1249 (unsigned)sp->rx_ringp[i]->status : 0);
1252 #if 0
1254 long ioaddr = dev->base_addr;
1255 int phy_num = sp->phy[0] & 0x1f;
1256 for (i = 0; i < 16; i++) {
1257 /* FIXME: what does it mean? --SAW */
1258 if (i == 6) i = 21;
1259 printk(KERN_DEBUG "%s: PHY index %d register %d is %4.4x.\n",
1260 dev->name, phy_num, i, mdio_read(dev, phy_num, i));
1263 #endif
1267 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1268 static void
1269 speedo_init_rx_ring(struct net_device *dev)
1271 struct speedo_private *sp = (struct speedo_private *)dev->priv;
1272 struct RxFD *rxf, *last_rxf = NULL;
1273 dma_addr_t last_rxf_dma = 0 /* to shut up the compiler */;
1274 int i;
1276 sp->cur_rx = 0;
1278 for (i = 0; i < RX_RING_SIZE; i++) {
1279 struct sk_buff *skb;
1280 skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD));
1281 /* XXX: do we really want to call this before the NULL check? --hch */
1282 rx_align(skb); /* Align IP on 16 byte boundary */
1283 sp->rx_skbuff[i] = skb;
1284 if (skb == NULL)
1285 break; /* OK. Just initially short of Rx bufs. */
1286 skb->dev = dev; /* Mark as being used by this device. */
1287 rxf = (struct RxFD *)skb->tail;
1288 sp->rx_ringp[i] = rxf;
1289 sp->rx_ring_dma[i] =
1290 pci_map_single(sp->pdev, rxf,
1291 PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_BIDIRECTIONAL);
1292 skb_reserve(skb, sizeof(struct RxFD));
1293 if (last_rxf) {
1294 last_rxf->link = cpu_to_le32(sp->rx_ring_dma[i]);
1295 pci_dma_sync_single(sp->pdev, last_rxf_dma,
1296 sizeof(struct RxFD), PCI_DMA_TODEVICE);
1298 last_rxf = rxf;
1299 last_rxf_dma = sp->rx_ring_dma[i];
1300 rxf->status = cpu_to_le32(0x00000001); /* '1' is flag value only. */
1301 rxf->link = 0; /* None yet. */
1302 /* This field unused by i82557. */
1303 rxf->rx_buf_addr = 0xffffffff;
1304 rxf->count = cpu_to_le32(PKT_BUF_SZ << 16);
1305 pci_dma_sync_single(sp->pdev, sp->rx_ring_dma[i],
1306 sizeof(struct RxFD), PCI_DMA_TODEVICE);
1308 sp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1309 /* Mark the last entry as end-of-list. */
1310 last_rxf->status = cpu_to_le32(0xC0000002); /* '2' is flag value only. */
1311 pci_dma_sync_single(sp->pdev, sp->rx_ring_dma[RX_RING_SIZE-1],
1312 sizeof(struct RxFD), PCI_DMA_TODEVICE);
1313 sp->last_rxf = last_rxf;
1314 sp->last_rxf_dma = last_rxf_dma;
1317 static void speedo_purge_tx(struct net_device *dev)
1319 struct speedo_private *sp = (struct speedo_private *)dev->priv;
1320 int entry;
1322 while ((int)(sp->cur_tx - sp->dirty_tx) > 0) {
1323 entry = sp->dirty_tx % TX_RING_SIZE;
1324 if (sp->tx_skbuff[entry]) {
1325 sp->stats.tx_errors++;
1326 pci_unmap_single(sp->pdev,
1327 le32_to_cpu(sp->tx_ring[entry].tx_buf_addr0),
1328 sp->tx_skbuff[entry]->len, PCI_DMA_TODEVICE);
1329 dev_kfree_skb_irq(sp->tx_skbuff[entry]);
1330 sp->tx_skbuff[entry] = 0;
1332 sp->dirty_tx++;
1334 while (sp->mc_setup_head != NULL) {
1335 struct speedo_mc_block *t;
1336 if (netif_msg_tx_err(sp))
1337 printk(KERN_DEBUG "%s: freeing mc frame.\n", dev->name);
1338 pci_unmap_single(sp->pdev, sp->mc_setup_head->frame_dma,
1339 sp->mc_setup_head->len, PCI_DMA_TODEVICE);
1340 t = sp->mc_setup_head->next;
1341 kfree(sp->mc_setup_head);
1342 sp->mc_setup_head = t;
1344 sp->mc_setup_tail = NULL;
1345 sp->tx_full = 0;
1346 netif_wake_queue(dev);
1349 static void reset_mii(struct net_device *dev)
1351 struct speedo_private *sp = (struct speedo_private *)dev->priv;
1353 /* Reset the MII transceiver, suggested by Fred Young @ scalable.com. */
1354 if ((sp->phy[0] & 0x8000) == 0) {
1355 int phy_addr = sp->phy[0] & 0x1f;
1356 int advertising = mdio_read(dev, phy_addr, MII_ADVERTISE);
1357 int mii_bmcr = mdio_read(dev, phy_addr, MII_BMCR);
1358 mdio_write(dev, phy_addr, MII_BMCR, 0x0400);
1359 mdio_write(dev, phy_addr, MII_BMSR, 0x0000);
1360 mdio_write(dev, phy_addr, MII_ADVERTISE, 0x0000);
1361 mdio_write(dev, phy_addr, MII_BMCR, 0x8000);
1362 #ifdef honor_default_port
1363 mdio_write(dev, phy_addr, MII_BMCR, mii_ctrl[dev->default_port & 7]);
1364 #else
1365 mdio_read(dev, phy_addr, MII_BMCR);
1366 mdio_write(dev, phy_addr, MII_BMCR, mii_bmcr);
1367 mdio_write(dev, phy_addr, MII_ADVERTISE, advertising);
1368 #endif
1372 static void speedo_tx_timeout(struct net_device *dev)
1374 struct speedo_private *sp = (struct speedo_private *)dev->priv;
1375 long ioaddr = dev->base_addr;
1376 int status = inw(ioaddr + SCBStatus);
1377 unsigned long flags;
1379 if (netif_msg_tx_err(sp)) {
1380 printk(KERN_WARNING "%s: Transmit timed out: status %4.4x "
1381 " %4.4x at %d/%d command %8.8x.\n",
1382 dev->name, status, inw(ioaddr + SCBCmd),
1383 sp->dirty_tx, sp->cur_tx,
1384 sp->tx_ring[sp->dirty_tx % TX_RING_SIZE].status);
1387 speedo_show_state(dev);
1388 #if 0
1389 if ((status & 0x00C0) != 0x0080
1390 && (status & 0x003C) == 0x0010) {
1391 /* Only the command unit has stopped. */
1392 printk(KERN_WARNING "%s: Trying to restart the transmitter...\n",
1393 dev->name);
1394 outl(TX_RING_ELEM_DMA(sp, dirty_tx % TX_RING_SIZE]),
1395 ioaddr + SCBPointer);
1396 outw(CUStart, ioaddr + SCBCmd);
1397 reset_mii(dev);
1398 } else {
1399 #else
1401 #endif
1402 del_timer_sync(&sp->timer);
1403 /* Reset the Tx and Rx units. */
1404 outl(PortReset, ioaddr + SCBPort);
1405 /* We may get spurious interrupts here. But I don't think that they
1406 may do much harm. 1999/12/09 SAW */
1407 udelay(10);
1408 /* Disable interrupts. */
1409 outw(SCBMaskAll, ioaddr + SCBCmd);
1410 synchronize_irq(dev->irq);
1411 speedo_tx_buffer_gc(dev);
1412 /* Free as much as possible.
1413 It helps to recover from a hang because of out-of-memory.
1414 It also simplifies speedo_resume() in case TX ring is full or
1415 close-to-be full. */
1416 speedo_purge_tx(dev);
1417 speedo_refill_rx_buffers(dev, 1);
1418 spin_lock_irqsave(&sp->lock, flags);
1419 speedo_resume(dev);
1420 sp->rx_mode = -1;
1421 dev->trans_start = jiffies;
1422 spin_unlock_irqrestore(&sp->lock, flags);
1423 set_rx_mode(dev); /* it takes the spinlock itself --SAW */
1424 /* Reset MII transceiver. Do it before starting the timer to serialize
1425 mdio_xxx operations. Yes, it's a paranoya :-) 2000/05/09 SAW */
1426 reset_mii(dev);
1427 sp->timer.expires = RUN_AT(2*HZ);
1428 add_timer(&sp->timer);
1430 return;
1433 static int
1434 speedo_start_xmit(struct sk_buff *skb, struct net_device *dev)
1436 struct speedo_private *sp = (struct speedo_private *)dev->priv;
1437 long ioaddr = dev->base_addr;
1438 int entry;
1440 /* Prevent interrupts from changing the Tx ring from underneath us. */
1441 unsigned long flags;
1443 spin_lock_irqsave(&sp->lock, flags);
1445 /* Check if there are enough space. */
1446 if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
1447 printk(KERN_ERR "%s: incorrect tbusy state, fixed.\n", dev->name);
1448 netif_stop_queue(dev);
1449 sp->tx_full = 1;
1450 spin_unlock_irqrestore(&sp->lock, flags);
1451 return 1;
1454 /* Calculate the Tx descriptor entry. */
1455 entry = sp->cur_tx++ % TX_RING_SIZE;
1457 sp->tx_skbuff[entry] = skb;
1458 sp->tx_ring[entry].status =
1459 cpu_to_le32(CmdSuspend | CmdTx | CmdTxFlex);
1460 if (!(entry & ((TX_RING_SIZE>>2)-1)))
1461 sp->tx_ring[entry].status |= cpu_to_le32(CmdIntr);
1462 sp->tx_ring[entry].link =
1463 cpu_to_le32(TX_RING_ELEM_DMA(sp, sp->cur_tx % TX_RING_SIZE));
1464 sp->tx_ring[entry].tx_desc_addr =
1465 cpu_to_le32(TX_RING_ELEM_DMA(sp, entry) + TX_DESCR_BUF_OFFSET);
1466 /* The data region is always in one buffer descriptor. */
1467 sp->tx_ring[entry].count = cpu_to_le32(sp->tx_threshold);
1468 sp->tx_ring[entry].tx_buf_addr0 =
1469 cpu_to_le32(pci_map_single(sp->pdev, skb->data,
1470 skb->len, PCI_DMA_TODEVICE));
1471 sp->tx_ring[entry].tx_buf_size0 = cpu_to_le32(skb->len);
1473 /* workaround for hardware bug on 10 mbit half duplex */
1475 if ((sp->partner == 0) && (sp->chip_id == 1)) {
1476 wait_for_cmd_done(dev);
1477 outb(0 , ioaddr + SCBCmd);
1478 udelay(1);
1481 /* Trigger the command unit resume. */
1482 wait_for_cmd_done(dev);
1483 clear_suspend(sp->last_cmd);
1484 /* We want the time window between clearing suspend flag on the previous
1485 command and resuming CU to be as small as possible.
1486 Interrupts in between are very undesired. --SAW */
1487 outb(CUResume, ioaddr + SCBCmd);
1488 sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
1490 /* Leave room for set_rx_mode(). If there is no more space than reserved
1491 for multicast filter mark the ring as full. */
1492 if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
1493 netif_stop_queue(dev);
1494 sp->tx_full = 1;
1497 spin_unlock_irqrestore(&sp->lock, flags);
1499 dev->trans_start = jiffies;
1501 return 0;
1504 static void speedo_tx_buffer_gc(struct net_device *dev)
1506 unsigned int dirty_tx;
1507 struct speedo_private *sp = (struct speedo_private *)dev->priv;
1509 dirty_tx = sp->dirty_tx;
1510 while ((int)(sp->cur_tx - dirty_tx) > 0) {
1511 int entry = dirty_tx % TX_RING_SIZE;
1512 int status = le32_to_cpu(sp->tx_ring[entry].status);
1514 if (netif_msg_tx_done(sp))
1515 printk(KERN_DEBUG " scavenge candidate %d status %4.4x.\n",
1516 entry, status);
1517 if ((status & StatusComplete) == 0)
1518 break; /* It still hasn't been processed. */
1519 if (status & TxUnderrun)
1520 if (sp->tx_threshold < 0x01e08000) {
1521 if (netif_msg_tx_err(sp))
1522 printk(KERN_DEBUG "%s: TX underrun, threshold adjusted.\n",
1523 dev->name);
1524 sp->tx_threshold += 0x00040000;
1526 /* Free the original skb. */
1527 if (sp->tx_skbuff[entry]) {
1528 sp->stats.tx_packets++; /* Count only user packets. */
1529 sp->stats.tx_bytes += sp->tx_skbuff[entry]->len;
1530 pci_unmap_single(sp->pdev,
1531 le32_to_cpu(sp->tx_ring[entry].tx_buf_addr0),
1532 sp->tx_skbuff[entry]->len, PCI_DMA_TODEVICE);
1533 dev_kfree_skb_irq(sp->tx_skbuff[entry]);
1534 sp->tx_skbuff[entry] = 0;
1536 dirty_tx++;
1539 if (netif_msg_tx_err(sp) && (int)(sp->cur_tx - dirty_tx) > TX_RING_SIZE) {
1540 printk(KERN_ERR "out-of-sync dirty pointer, %d vs. %d,"
1541 " full=%d.\n",
1542 dirty_tx, sp->cur_tx, sp->tx_full);
1543 dirty_tx += TX_RING_SIZE;
1546 while (sp->mc_setup_head != NULL
1547 && (int)(dirty_tx - sp->mc_setup_head->tx - 1) > 0) {
1548 struct speedo_mc_block *t;
1549 if (netif_msg_tx_err(sp))
1550 printk(KERN_DEBUG "%s: freeing mc frame.\n", dev->name);
1551 pci_unmap_single(sp->pdev, sp->mc_setup_head->frame_dma,
1552 sp->mc_setup_head->len, PCI_DMA_TODEVICE);
1553 t = sp->mc_setup_head->next;
1554 kfree(sp->mc_setup_head);
1555 sp->mc_setup_head = t;
1557 if (sp->mc_setup_head == NULL)
1558 sp->mc_setup_tail = NULL;
1560 sp->dirty_tx = dirty_tx;
1563 /* The interrupt handler does all of the Rx thread work and cleans up
1564 after the Tx thread. */
1565 static irqreturn_t speedo_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
1567 struct net_device *dev = (struct net_device *)dev_instance;
1568 struct speedo_private *sp;
1569 long ioaddr, boguscnt = max_interrupt_work;
1570 unsigned short status;
1571 unsigned int handled = 0;
1573 ioaddr = dev->base_addr;
1574 sp = (struct speedo_private *)dev->priv;
1576 #ifndef final_version
1577 /* A lock to prevent simultaneous entry on SMP machines. */
1578 if (test_and_set_bit(0, (void*)&sp->in_interrupt)) {
1579 printk(KERN_ERR"%s: SMP simultaneous entry of an interrupt handler.\n",
1580 dev->name);
1581 sp->in_interrupt = 0; /* Avoid halting machine. */
1582 return IRQ_NONE;
1584 #endif
1586 do {
1587 status = inw(ioaddr + SCBStatus);
1588 /* Acknowledge all of the current interrupt sources ASAP. */
1589 /* Will change from 0xfc00 to 0xff00 when we start handling
1590 FCP and ER interrupts --Dragan */
1591 outw(status & 0xfc00, ioaddr + SCBStatus);
1593 if (netif_msg_intr(sp))
1594 printk(KERN_DEBUG "%s: interrupt status=%#4.4x.\n",
1595 dev->name, status);
1597 if ((status & 0xfc00) == 0)
1598 break;
1599 handled = 1;
1602 if ((status & 0x5000) || /* Packet received, or Rx error. */
1603 (sp->rx_ring_state&(RrNoMem|RrPostponed)) == RrPostponed)
1604 /* Need to gather the postponed packet. */
1605 speedo_rx(dev);
1607 /* Always check if all rx buffers are allocated. --SAW */
1608 speedo_refill_rx_buffers(dev, 0);
1610 spin_lock(&sp->lock);
1612 * The chip may have suspended reception for various reasons.
1613 * Check for that, and re-prime it should this be the case.
1615 switch ((status >> 2) & 0xf) {
1616 case 0: /* Idle */
1617 break;
1618 case 1: /* Suspended */
1619 case 2: /* No resources (RxFDs) */
1620 case 9: /* Suspended with no more RBDs */
1621 case 10: /* No resources due to no RBDs */
1622 case 12: /* Ready with no RBDs */
1623 speedo_rx_soft_reset(dev);
1624 break;
1625 case 3: case 5: case 6: case 7: case 8:
1626 case 11: case 13: case 14: case 15:
1627 /* these are all reserved values */
1628 break;
1632 /* User interrupt, Command/Tx unit interrupt or CU not active. */
1633 if (status & 0xA400) {
1634 speedo_tx_buffer_gc(dev);
1635 if (sp->tx_full
1636 && (int)(sp->cur_tx - sp->dirty_tx) < TX_QUEUE_UNFULL) {
1637 /* The ring is no longer full. */
1638 sp->tx_full = 0;
1639 netif_wake_queue(dev); /* Attention: under a spinlock. --SAW */
1643 spin_unlock(&sp->lock);
1645 if (--boguscnt < 0) {
1646 printk(KERN_ERR "%s: Too much work at interrupt, status=0x%4.4x.\n",
1647 dev->name, status);
1648 /* Clear all interrupt sources. */
1649 /* Will change from 0xfc00 to 0xff00 when we start handling
1650 FCP and ER interrupts --Dragan */
1651 outw(0xfc00, ioaddr + SCBStatus);
1652 break;
1654 } while (1);
1656 if (netif_msg_intr(sp))
1657 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1658 dev->name, inw(ioaddr + SCBStatus));
1660 clear_bit(0, (void*)&sp->in_interrupt);
1661 return IRQ_RETVAL(handled);
1664 static inline struct RxFD *speedo_rx_alloc(struct net_device *dev, int entry)
1666 struct speedo_private *sp = (struct speedo_private *)dev->priv;
1667 struct RxFD *rxf;
1668 struct sk_buff *skb;
1669 /* Get a fresh skbuff to replace the consumed one. */
1670 skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD));
1671 /* XXX: do we really want to call this before the NULL check? --hch */
1672 rx_align(skb); /* Align IP on 16 byte boundary */
1673 sp->rx_skbuff[entry] = skb;
1674 if (skb == NULL) {
1675 sp->rx_ringp[entry] = NULL;
1676 return NULL;
1678 rxf = sp->rx_ringp[entry] = (struct RxFD *)skb->tail;
1679 sp->rx_ring_dma[entry] =
1680 pci_map_single(sp->pdev, rxf,
1681 PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_FROMDEVICE);
1682 skb->dev = dev;
1683 skb_reserve(skb, sizeof(struct RxFD));
1684 rxf->rx_buf_addr = 0xffffffff;
1685 pci_dma_sync_single(sp->pdev, sp->rx_ring_dma[entry],
1686 sizeof(struct RxFD), PCI_DMA_TODEVICE);
1687 return rxf;
1690 static inline void speedo_rx_link(struct net_device *dev, int entry,
1691 struct RxFD *rxf, dma_addr_t rxf_dma)
1693 struct speedo_private *sp = (struct speedo_private *)dev->priv;
1694 rxf->status = cpu_to_le32(0xC0000001); /* '1' for driver use only. */
1695 rxf->link = 0; /* None yet. */
1696 rxf->count = cpu_to_le32(PKT_BUF_SZ << 16);
1697 sp->last_rxf->link = cpu_to_le32(rxf_dma);
1698 sp->last_rxf->status &= cpu_to_le32(~0xC0000000);
1699 pci_dma_sync_single(sp->pdev, sp->last_rxf_dma,
1700 sizeof(struct RxFD), PCI_DMA_TODEVICE);
1701 sp->last_rxf = rxf;
1702 sp->last_rxf_dma = rxf_dma;
1705 static int speedo_refill_rx_buf(struct net_device *dev, int force)
1707 struct speedo_private *sp = (struct speedo_private *)dev->priv;
1708 int entry;
1709 struct RxFD *rxf;
1711 entry = sp->dirty_rx % RX_RING_SIZE;
1712 if (sp->rx_skbuff[entry] == NULL) {
1713 rxf = speedo_rx_alloc(dev, entry);
1714 if (rxf == NULL) {
1715 unsigned int forw;
1716 int forw_entry;
1717 if (netif_msg_rx_err(sp) || !(sp->rx_ring_state & RrOOMReported)) {
1718 printk(KERN_WARNING "%s: can't fill rx buffer (force %d)!\n",
1719 dev->name, force);
1720 sp->rx_ring_state |= RrOOMReported;
1722 speedo_show_state(dev);
1723 if (!force)
1724 return -1; /* Better luck next time! */
1725 /* Borrow an skb from one of next entries. */
1726 for (forw = sp->dirty_rx + 1; forw != sp->cur_rx; forw++)
1727 if (sp->rx_skbuff[forw % RX_RING_SIZE] != NULL)
1728 break;
1729 if (forw == sp->cur_rx)
1730 return -1;
1731 forw_entry = forw % RX_RING_SIZE;
1732 sp->rx_skbuff[entry] = sp->rx_skbuff[forw_entry];
1733 sp->rx_skbuff[forw_entry] = NULL;
1734 rxf = sp->rx_ringp[forw_entry];
1735 sp->rx_ringp[forw_entry] = NULL;
1736 sp->rx_ringp[entry] = rxf;
1738 } else {
1739 rxf = sp->rx_ringp[entry];
1741 speedo_rx_link(dev, entry, rxf, sp->rx_ring_dma[entry]);
1742 sp->dirty_rx++;
1743 sp->rx_ring_state &= ~(RrNoMem|RrOOMReported); /* Mark the progress. */
1744 return 0;
1747 static void speedo_refill_rx_buffers(struct net_device *dev, int force)
1749 struct speedo_private *sp = (struct speedo_private *)dev->priv;
1751 /* Refill the RX ring. */
1752 while ((int)(sp->cur_rx - sp->dirty_rx) > 0 &&
1753 speedo_refill_rx_buf(dev, force) != -1);
1756 static int
1757 speedo_rx(struct net_device *dev)
1759 struct speedo_private *sp = (struct speedo_private *)dev->priv;
1760 int entry = sp->cur_rx % RX_RING_SIZE;
1761 int rx_work_limit = sp->dirty_rx + RX_RING_SIZE - sp->cur_rx;
1762 int alloc_ok = 1;
1763 int npkts = 0;
1765 if (netif_msg_intr(sp))
1766 printk(KERN_DEBUG " In speedo_rx().\n");
1767 /* If we own the next entry, it's a new packet. Send it up. */
1768 while (sp->rx_ringp[entry] != NULL) {
1769 int status;
1770 int pkt_len;
1772 pci_dma_sync_single(sp->pdev, sp->rx_ring_dma[entry],
1773 sizeof(struct RxFD), PCI_DMA_FROMDEVICE);
1774 status = le32_to_cpu(sp->rx_ringp[entry]->status);
1775 pkt_len = le32_to_cpu(sp->rx_ringp[entry]->count) & 0x3fff;
1777 if (!(status & RxComplete))
1778 break;
1780 if (--rx_work_limit < 0)
1781 break;
1783 /* Check for a rare out-of-memory case: the current buffer is
1784 the last buffer allocated in the RX ring. --SAW */
1785 if (sp->last_rxf == sp->rx_ringp[entry]) {
1786 /* Postpone the packet. It'll be reaped at an interrupt when this
1787 packet is no longer the last packet in the ring. */
1788 if (netif_msg_rx_err(sp))
1789 printk(KERN_DEBUG "%s: RX packet postponed!\n",
1790 dev->name);
1791 sp->rx_ring_state |= RrPostponed;
1792 break;
1795 if (netif_msg_rx_status(sp))
1796 printk(KERN_DEBUG " speedo_rx() status %8.8x len %d.\n", status,
1797 pkt_len);
1798 if ((status & (RxErrTooBig|RxOK|0x0f90)) != RxOK) {
1799 if (status & RxErrTooBig)
1800 printk(KERN_ERR "%s: Ethernet frame overran the Rx buffer, "
1801 "status %8.8x!\n", dev->name, status);
1802 else if (! (status & RxOK)) {
1803 /* There was a fatal error. This *should* be impossible. */
1804 sp->stats.rx_errors++;
1805 printk(KERN_ERR "%s: Anomalous event in speedo_rx(), "
1806 "status %8.8x.\n",
1807 dev->name, status);
1809 } else {
1810 struct sk_buff *skb;
1812 /* Check if the packet is long enough to just accept without
1813 copying to a properly sized skbuff. */
1814 if (pkt_len < rx_copybreak
1815 && (skb = dev_alloc_skb(pkt_len + 2)) != 0) {
1816 skb->dev = dev;
1817 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1818 /* 'skb_put()' points to the start of sk_buff data area. */
1819 pci_dma_sync_single(sp->pdev, sp->rx_ring_dma[entry],
1820 sizeof(struct RxFD) + pkt_len, PCI_DMA_FROMDEVICE);
1822 #if 1 || USE_IP_CSUM
1823 /* Packet is in one chunk -- we can copy + cksum. */
1824 eth_copy_and_sum(skb, sp->rx_skbuff[entry]->tail, pkt_len, 0);
1825 skb_put(skb, pkt_len);
1826 #else
1827 memcpy(skb_put(skb, pkt_len), sp->rx_skbuff[entry]->tail,
1828 pkt_len);
1829 #endif
1830 npkts++;
1831 } else {
1832 /* Pass up the already-filled skbuff. */
1833 skb = sp->rx_skbuff[entry];
1834 if (skb == NULL) {
1835 printk(KERN_ERR "%s: Inconsistent Rx descriptor chain.\n",
1836 dev->name);
1837 break;
1839 sp->rx_skbuff[entry] = NULL;
1840 skb_put(skb, pkt_len);
1841 npkts++;
1842 sp->rx_ringp[entry] = NULL;
1843 pci_unmap_single(sp->pdev, sp->rx_ring_dma[entry],
1844 PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_FROMDEVICE);
1846 skb->protocol = eth_type_trans(skb, dev);
1847 netif_rx(skb);
1848 dev->last_rx = jiffies;
1849 sp->stats.rx_packets++;
1850 sp->stats.rx_bytes += pkt_len;
1852 entry = (++sp->cur_rx) % RX_RING_SIZE;
1853 sp->rx_ring_state &= ~RrPostponed;
1854 /* Refill the recently taken buffers.
1855 Do it one-by-one to handle traffic bursts better. */
1856 if (alloc_ok && speedo_refill_rx_buf(dev, 0) == -1)
1857 alloc_ok = 0;
1860 /* Try hard to refill the recently taken buffers. */
1861 speedo_refill_rx_buffers(dev, 1);
1863 if (npkts)
1864 sp->last_rx_time = jiffies;
1866 return 0;
1869 static int
1870 speedo_close(struct net_device *dev)
1872 long ioaddr = dev->base_addr;
1873 struct speedo_private *sp = (struct speedo_private *)dev->priv;
1874 int i;
1876 netdevice_stop(dev);
1877 netif_stop_queue(dev);
1879 if (netif_msg_ifdown(sp))
1880 printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x.\n",
1881 dev->name, inw(ioaddr + SCBStatus));
1883 /* Shut off the media monitoring timer. */
1884 del_timer_sync(&sp->timer);
1886 outw(SCBMaskAll, ioaddr + SCBCmd);
1888 /* Shutting down the chip nicely fails to disable flow control. So.. */
1889 outl(PortPartialReset, ioaddr + SCBPort);
1890 inl(ioaddr + SCBPort); /* flush posted write */
1892 * The chip requires a 10 microsecond quiet period. Wait here!
1894 udelay(10);
1896 free_irq(dev->irq, dev);
1897 speedo_show_state(dev);
1899 /* Free all the skbuffs in the Rx and Tx queues. */
1900 for (i = 0; i < RX_RING_SIZE; i++) {
1901 struct sk_buff *skb = sp->rx_skbuff[i];
1902 sp->rx_skbuff[i] = 0;
1903 /* Clear the Rx descriptors. */
1904 if (skb) {
1905 pci_unmap_single(sp->pdev,
1906 sp->rx_ring_dma[i],
1907 PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_FROMDEVICE);
1908 dev_kfree_skb(skb);
1912 for (i = 0; i < TX_RING_SIZE; i++) {
1913 struct sk_buff *skb = sp->tx_skbuff[i];
1914 sp->tx_skbuff[i] = 0;
1915 /* Clear the Tx descriptors. */
1916 if (skb) {
1917 pci_unmap_single(sp->pdev,
1918 le32_to_cpu(sp->tx_ring[i].tx_buf_addr0),
1919 skb->len, PCI_DMA_TODEVICE);
1920 dev_kfree_skb(skb);
1924 /* Free multicast setting blocks. */
1925 for (i = 0; sp->mc_setup_head != NULL; i++) {
1926 struct speedo_mc_block *t;
1927 t = sp->mc_setup_head->next;
1928 kfree(sp->mc_setup_head);
1929 sp->mc_setup_head = t;
1931 sp->mc_setup_tail = NULL;
1932 if (netif_msg_ifdown(sp))
1933 printk(KERN_DEBUG "%s: %d multicast blocks dropped.\n", dev->name, i);
1935 pci_set_power_state(sp->pdev, 2);
1937 return 0;
1940 /* The Speedo-3 has an especially awkward and unusable method of getting
1941 statistics out of the chip. It takes an unpredictable length of time
1942 for the dump-stats command to complete. To avoid a busy-wait loop we
1943 update the stats with the previous dump results, and then trigger a
1944 new dump.
1946 Oh, and incoming frames are dropped while executing dump-stats!
1948 static struct net_device_stats *
1949 speedo_get_stats(struct net_device *dev)
1951 struct speedo_private *sp = (struct speedo_private *)dev->priv;
1952 long ioaddr = dev->base_addr;
1954 /* Update only if the previous dump finished. */
1955 if (sp->lstats->done_marker == le32_to_cpu(0xA007)) {
1956 sp->stats.tx_aborted_errors += le32_to_cpu(sp->lstats->tx_coll16_errs);
1957 sp->stats.tx_window_errors += le32_to_cpu(sp->lstats->tx_late_colls);
1958 sp->stats.tx_fifo_errors += le32_to_cpu(sp->lstats->tx_underruns);
1959 sp->stats.tx_fifo_errors += le32_to_cpu(sp->lstats->tx_lost_carrier);
1960 /*sp->stats.tx_deferred += le32_to_cpu(sp->lstats->tx_deferred);*/
1961 sp->stats.collisions += le32_to_cpu(sp->lstats->tx_total_colls);
1962 sp->stats.rx_crc_errors += le32_to_cpu(sp->lstats->rx_crc_errs);
1963 sp->stats.rx_frame_errors += le32_to_cpu(sp->lstats->rx_align_errs);
1964 sp->stats.rx_over_errors += le32_to_cpu(sp->lstats->rx_resource_errs);
1965 sp->stats.rx_fifo_errors += le32_to_cpu(sp->lstats->rx_overrun_errs);
1966 sp->stats.rx_length_errors += le32_to_cpu(sp->lstats->rx_runt_errs);
1967 sp->lstats->done_marker = 0x0000;
1968 if (netif_running(dev)) {
1969 unsigned long flags;
1970 /* Take a spinlock to make wait_for_cmd_done and sending the
1971 command atomic. --SAW */
1972 spin_lock_irqsave(&sp->lock, flags);
1973 wait_for_cmd_done(dev);
1974 outb(CUDumpStats, ioaddr + SCBCmd);
1975 spin_unlock_irqrestore(&sp->lock, flags);
1978 return &sp->stats;
1981 static int netdev_ethtool_ioctl(struct net_device *dev, void *useraddr)
1983 u32 ethcmd;
1984 struct speedo_private *sp = dev->priv;
1986 if (copy_from_user(&ethcmd, useraddr, sizeof(ethcmd)))
1987 return -EFAULT;
1989 switch (ethcmd) {
1990 /* get driver-specific version/etc. info */
1991 case ETHTOOL_GDRVINFO: {
1992 struct ethtool_drvinfo info = {ETHTOOL_GDRVINFO};
1993 strncpy(info.driver, "eepro100", sizeof(info.driver)-1);
1994 strncpy(info.version, version, sizeof(info.version)-1);
1995 if (sp && sp->pdev)
1996 strcpy(info.bus_info, sp->pdev->slot_name);
1997 if (copy_to_user(useraddr, &info, sizeof(info)))
1998 return -EFAULT;
1999 return 0;
2002 /* get settings */
2003 case ETHTOOL_GSET: {
2004 struct ethtool_cmd ecmd = { ETHTOOL_GSET };
2005 spin_lock_irq(&sp->lock);
2006 mii_ethtool_gset(&sp->mii_if, &ecmd);
2007 spin_unlock_irq(&sp->lock);
2008 if (copy_to_user(useraddr, &ecmd, sizeof(ecmd)))
2009 return -EFAULT;
2010 return 0;
2012 /* set settings */
2013 case ETHTOOL_SSET: {
2014 int r;
2015 struct ethtool_cmd ecmd;
2016 if (copy_from_user(&ecmd, useraddr, sizeof(ecmd)))
2017 return -EFAULT;
2018 spin_lock_irq(&sp->lock);
2019 r = mii_ethtool_sset(&sp->mii_if, &ecmd);
2020 spin_unlock_irq(&sp->lock);
2021 return r;
2023 /* restart autonegotiation */
2024 case ETHTOOL_NWAY_RST: {
2025 return mii_nway_restart(&sp->mii_if);
2027 /* get link status */
2028 case ETHTOOL_GLINK: {
2029 struct ethtool_value edata = {ETHTOOL_GLINK};
2030 edata.data = mii_link_ok(&sp->mii_if);
2031 if (copy_to_user(useraddr, &edata, sizeof(edata)))
2032 return -EFAULT;
2033 return 0;
2035 /* get message-level */
2036 case ETHTOOL_GMSGLVL: {
2037 struct ethtool_value edata = {ETHTOOL_GMSGLVL};
2038 edata.data = sp->msg_enable;
2039 if (copy_to_user(useraddr, &edata, sizeof(edata)))
2040 return -EFAULT;
2041 return 0;
2043 /* set message-level */
2044 case ETHTOOL_SMSGLVL: {
2045 struct ethtool_value edata;
2046 if (copy_from_user(&edata, useraddr, sizeof(edata)))
2047 return -EFAULT;
2048 sp->msg_enable = edata.data;
2049 return 0;
2054 return -EOPNOTSUPP;
2057 static int speedo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2059 struct speedo_private *sp = (struct speedo_private *)dev->priv;
2060 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&rq->ifr_data;
2061 int phy = sp->phy[0] & 0x1f;
2062 int saved_acpi;
2063 int t;
2065 switch(cmd) {
2066 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
2067 data->phy_id = phy;
2069 case SIOCGMIIREG: /* Read MII PHY register. */
2070 /* FIXME: these operations need to be serialized with MDIO
2071 access from the timeout handler.
2072 They are currently serialized only with MDIO access from the
2073 timer routine. 2000/05/09 SAW */
2074 saved_acpi = pci_set_power_state(sp->pdev, 0);
2075 t = del_timer_sync(&sp->timer);
2076 data->val_out = mdio_read(dev, data->phy_id & 0x1f, data->reg_num & 0x1f);
2077 if (t)
2078 add_timer(&sp->timer); /* may be set to the past --SAW */
2079 pci_set_power_state(sp->pdev, saved_acpi);
2080 return 0;
2082 case SIOCSMIIREG: /* Write MII PHY register. */
2083 if (!capable(CAP_NET_ADMIN))
2084 return -EPERM;
2085 saved_acpi = pci_set_power_state(sp->pdev, 0);
2086 t = del_timer_sync(&sp->timer);
2087 mdio_write(dev, data->phy_id, data->reg_num, data->val_in);
2088 if (t)
2089 add_timer(&sp->timer); /* may be set to the past --SAW */
2090 pci_set_power_state(sp->pdev, saved_acpi);
2091 return 0;
2092 case SIOCETHTOOL:
2093 return netdev_ethtool_ioctl(dev, (void *) rq->ifr_data);
2094 default:
2095 return -EOPNOTSUPP;
2099 /* Set or clear the multicast filter for this adaptor.
2100 This is very ugly with Intel chips -- we usually have to execute an
2101 entire configuration command, plus process a multicast command.
2102 This is complicated. We must put a large configuration command and
2103 an arbitrarily-sized multicast command in the transmit list.
2104 To minimize the disruption -- the previous command might have already
2105 loaded the link -- we convert the current command block, normally a Tx
2106 command, into a no-op and link it to the new command.
2108 static void set_rx_mode(struct net_device *dev)
2110 struct speedo_private *sp = (struct speedo_private *)dev->priv;
2111 long ioaddr = dev->base_addr;
2112 struct descriptor *last_cmd;
2113 char new_rx_mode;
2114 unsigned long flags;
2115 int entry, i;
2117 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
2118 new_rx_mode = 3;
2119 } else if ((dev->flags & IFF_ALLMULTI) ||
2120 dev->mc_count > multicast_filter_limit) {
2121 new_rx_mode = 1;
2122 } else
2123 new_rx_mode = 0;
2125 if (netif_msg_rx_status(sp))
2126 printk(KERN_DEBUG "%s: set_rx_mode %d -> %d\n", dev->name,
2127 sp->rx_mode, new_rx_mode);
2129 if ((int)(sp->cur_tx - sp->dirty_tx) > TX_RING_SIZE - TX_MULTICAST_SIZE) {
2130 /* The Tx ring is full -- don't add anything! Hope the mode will be
2131 * set again later. */
2132 sp->rx_mode = -1;
2133 return;
2136 if (new_rx_mode != sp->rx_mode) {
2137 u8 *config_cmd_data;
2139 spin_lock_irqsave(&sp->lock, flags);
2140 entry = sp->cur_tx++ % TX_RING_SIZE;
2141 last_cmd = sp->last_cmd;
2142 sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
2144 sp->tx_skbuff[entry] = 0; /* Redundant. */
2145 sp->tx_ring[entry].status = cpu_to_le32(CmdSuspend | CmdConfigure);
2146 sp->tx_ring[entry].link =
2147 cpu_to_le32(TX_RING_ELEM_DMA(sp, (entry + 1) % TX_RING_SIZE));
2148 config_cmd_data = (void *)&sp->tx_ring[entry].tx_desc_addr;
2149 /* Construct a full CmdConfig frame. */
2150 memcpy(config_cmd_data, i82558_config_cmd, CONFIG_DATA_SIZE);
2151 config_cmd_data[1] = (txfifo << 4) | rxfifo;
2152 config_cmd_data[4] = rxdmacount;
2153 config_cmd_data[5] = txdmacount + 0x80;
2154 config_cmd_data[15] |= (new_rx_mode & 2) ? 1 : 0;
2155 /* 0x80 doesn't disable FC 0x84 does.
2156 Disable Flow control since we are not ACK-ing any FC interrupts
2157 for now. --Dragan */
2158 config_cmd_data[19] = 0x84;
2159 config_cmd_data[19] |= sp->mii_if.full_duplex ? 0x40 : 0;
2160 config_cmd_data[21] = (new_rx_mode & 1) ? 0x0D : 0x05;
2161 if (sp->phy[0] & 0x8000) { /* Use the AUI port instead. */
2162 config_cmd_data[15] |= 0x80;
2163 config_cmd_data[8] = 0;
2165 /* Trigger the command unit resume. */
2166 wait_for_cmd_done(dev);
2167 clear_suspend(last_cmd);
2168 outb(CUResume, ioaddr + SCBCmd);
2169 if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
2170 netif_stop_queue(dev);
2171 sp->tx_full = 1;
2173 spin_unlock_irqrestore(&sp->lock, flags);
2176 if (new_rx_mode == 0 && dev->mc_count < 4) {
2177 /* The simple case of 0-3 multicast list entries occurs often, and
2178 fits within one tx_ring[] entry. */
2179 struct dev_mc_list *mclist;
2180 u16 *setup_params, *eaddrs;
2182 spin_lock_irqsave(&sp->lock, flags);
2183 entry = sp->cur_tx++ % TX_RING_SIZE;
2184 last_cmd = sp->last_cmd;
2185 sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
2187 sp->tx_skbuff[entry] = 0;
2188 sp->tx_ring[entry].status = cpu_to_le32(CmdSuspend | CmdMulticastList);
2189 sp->tx_ring[entry].link =
2190 cpu_to_le32(TX_RING_ELEM_DMA(sp, (entry + 1) % TX_RING_SIZE));
2191 sp->tx_ring[entry].tx_desc_addr = 0; /* Really MC list count. */
2192 setup_params = (u16 *)&sp->tx_ring[entry].tx_desc_addr;
2193 *setup_params++ = cpu_to_le16(dev->mc_count*6);
2194 /* Fill in the multicast addresses. */
2195 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
2196 i++, mclist = mclist->next) {
2197 eaddrs = (u16 *)mclist->dmi_addr;
2198 *setup_params++ = *eaddrs++;
2199 *setup_params++ = *eaddrs++;
2200 *setup_params++ = *eaddrs++;
2203 wait_for_cmd_done(dev);
2204 clear_suspend(last_cmd);
2205 /* Immediately trigger the command unit resume. */
2206 outb(CUResume, ioaddr + SCBCmd);
2208 if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
2209 netif_stop_queue(dev);
2210 sp->tx_full = 1;
2212 spin_unlock_irqrestore(&sp->lock, flags);
2213 } else if (new_rx_mode == 0) {
2214 struct dev_mc_list *mclist;
2215 u16 *setup_params, *eaddrs;
2216 struct speedo_mc_block *mc_blk;
2217 struct descriptor *mc_setup_frm;
2218 int i;
2220 mc_blk = kmalloc(sizeof(*mc_blk) + 2 + multicast_filter_limit*6,
2221 GFP_ATOMIC);
2222 if (mc_blk == NULL) {
2223 printk(KERN_ERR "%s: Failed to allocate a setup frame.\n",
2224 dev->name);
2225 sp->rx_mode = -1; /* We failed, try again. */
2226 return;
2228 mc_blk->next = NULL;
2229 mc_blk->len = 2 + multicast_filter_limit*6;
2230 mc_blk->frame_dma =
2231 pci_map_single(sp->pdev, &mc_blk->frame, mc_blk->len,
2232 PCI_DMA_TODEVICE);
2233 mc_setup_frm = &mc_blk->frame;
2235 /* Fill the setup frame. */
2236 if (netif_msg_ifup(sp))
2237 printk(KERN_DEBUG "%s: Constructing a setup frame at %p.\n",
2238 dev->name, mc_setup_frm);
2239 mc_setup_frm->cmd_status =
2240 cpu_to_le32(CmdSuspend | CmdIntr | CmdMulticastList);
2241 /* Link set below. */
2242 setup_params = (u16 *)&mc_setup_frm->params;
2243 *setup_params++ = cpu_to_le16(dev->mc_count*6);
2244 /* Fill in the multicast addresses. */
2245 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
2246 i++, mclist = mclist->next) {
2247 eaddrs = (u16 *)mclist->dmi_addr;
2248 *setup_params++ = *eaddrs++;
2249 *setup_params++ = *eaddrs++;
2250 *setup_params++ = *eaddrs++;
2253 /* Disable interrupts while playing with the Tx Cmd list. */
2254 spin_lock_irqsave(&sp->lock, flags);
2256 if (sp->mc_setup_tail)
2257 sp->mc_setup_tail->next = mc_blk;
2258 else
2259 sp->mc_setup_head = mc_blk;
2260 sp->mc_setup_tail = mc_blk;
2261 mc_blk->tx = sp->cur_tx;
2263 entry = sp->cur_tx++ % TX_RING_SIZE;
2264 last_cmd = sp->last_cmd;
2265 sp->last_cmd = mc_setup_frm;
2267 /* Change the command to a NoOp, pointing to the CmdMulti command. */
2268 sp->tx_skbuff[entry] = 0;
2269 sp->tx_ring[entry].status = cpu_to_le32(CmdNOp);
2270 sp->tx_ring[entry].link = cpu_to_le32(mc_blk->frame_dma);
2272 /* Set the link in the setup frame. */
2273 mc_setup_frm->link =
2274 cpu_to_le32(TX_RING_ELEM_DMA(sp, (entry + 1) % TX_RING_SIZE));
2276 pci_dma_sync_single(sp->pdev, mc_blk->frame_dma,
2277 mc_blk->len, PCI_DMA_TODEVICE);
2279 wait_for_cmd_done(dev);
2280 clear_suspend(last_cmd);
2281 /* Immediately trigger the command unit resume. */
2282 outb(CUResume, ioaddr + SCBCmd);
2284 if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
2285 netif_stop_queue(dev);
2286 sp->tx_full = 1;
2288 spin_unlock_irqrestore(&sp->lock, flags);
2290 if (netif_msg_rx_status(sp))
2291 printk(" CmdMCSetup frame length %d in entry %d.\n",
2292 dev->mc_count, entry);
2295 sp->rx_mode = new_rx_mode;
2298 #ifdef CONFIG_PM
2299 static int eepro100_suspend(struct pci_dev *pdev, u32 state)
2301 struct net_device *dev = pci_get_drvdata (pdev);
2302 struct speedo_private *sp = (struct speedo_private *)dev->priv;
2303 long ioaddr = dev->base_addr;
2305 pci_save_state(pdev, sp->pm_state);
2307 if (!netif_running(dev))
2308 return 0;
2310 del_timer_sync(&sp->timer);
2312 netif_device_detach(dev);
2313 outl(PortPartialReset, ioaddr + SCBPort);
2315 /* XXX call pci_set_power_state ()? */
2316 return 0;
2319 static int eepro100_resume(struct pci_dev *pdev)
2321 struct net_device *dev = pci_get_drvdata (pdev);
2322 struct speedo_private *sp = (struct speedo_private *)dev->priv;
2323 long ioaddr = dev->base_addr;
2325 pci_restore_state(pdev, sp->pm_state);
2327 if (!netif_running(dev))
2328 return 0;
2330 /* I'm absolutely uncertain if this part of code may work.
2331 The problems are:
2332 - correct hardware reinitialization;
2333 - correct driver behavior between different steps of the
2334 reinitialization;
2335 - serialization with other driver calls.
2336 2000/03/08 SAW */
2337 outw(SCBMaskAll, ioaddr + SCBCmd);
2338 speedo_resume(dev);
2339 netif_device_attach(dev);
2340 sp->rx_mode = -1;
2341 sp->flow_ctrl = sp->partner = 0;
2342 set_rx_mode(dev);
2343 sp->timer.expires = RUN_AT(2*HZ);
2344 add_timer(&sp->timer);
2345 return 0;
2347 #endif /* CONFIG_PM */
2349 static void __devexit eepro100_remove_one (struct pci_dev *pdev)
2351 struct net_device *dev = pci_get_drvdata (pdev);
2352 struct speedo_private *sp = (struct speedo_private *)dev->priv;
2354 unregister_netdev(dev);
2356 release_region(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
2357 release_mem_region(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
2359 #ifndef USE_IO
2360 iounmap((char *)dev->base_addr);
2361 #endif
2363 pci_free_consistent(pdev, TX_RING_SIZE * sizeof(struct TxFD)
2364 + sizeof(struct speedo_stats),
2365 sp->tx_ring, sp->tx_ring_dma);
2366 pci_disable_device(pdev);
2367 kfree(dev);
2370 static struct pci_device_id eepro100_pci_tbl[] __devinitdata = {
2371 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82557,
2372 PCI_ANY_ID, PCI_ANY_ID, },
2373 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82559ER,
2374 PCI_ANY_ID, PCI_ANY_ID, },
2375 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_7,
2376 PCI_ANY_ID, PCI_ANY_ID, },
2377 { PCI_VENDOR_ID_INTEL, 0x1029, PCI_ANY_ID, PCI_ANY_ID, },
2378 { PCI_VENDOR_ID_INTEL, 0x1030, PCI_ANY_ID, PCI_ANY_ID, },
2379 { PCI_VENDOR_ID_INTEL, 0x1031, PCI_ANY_ID, PCI_ANY_ID, },
2380 { PCI_VENDOR_ID_INTEL, 0x1032, PCI_ANY_ID, PCI_ANY_ID, },
2381 { PCI_VENDOR_ID_INTEL, 0x1033, PCI_ANY_ID, PCI_ANY_ID, },
2382 { PCI_VENDOR_ID_INTEL, 0x1034, PCI_ANY_ID, PCI_ANY_ID, },
2383 { PCI_VENDOR_ID_INTEL, 0x1035, PCI_ANY_ID, PCI_ANY_ID, },
2384 { PCI_VENDOR_ID_INTEL, 0x1036, PCI_ANY_ID, PCI_ANY_ID, },
2385 { PCI_VENDOR_ID_INTEL, 0x1037, PCI_ANY_ID, PCI_ANY_ID, },
2386 { PCI_VENDOR_ID_INTEL, 0x1038, PCI_ANY_ID, PCI_ANY_ID, },
2387 { PCI_VENDOR_ID_INTEL, 0x1039, PCI_ANY_ID, PCI_ANY_ID, },
2388 { PCI_VENDOR_ID_INTEL, 0x103A, PCI_ANY_ID, PCI_ANY_ID, },
2389 { PCI_VENDOR_ID_INTEL, 0x103B, PCI_ANY_ID, PCI_ANY_ID, },
2390 { PCI_VENDOR_ID_INTEL, 0x103C, PCI_ANY_ID, PCI_ANY_ID, },
2391 { PCI_VENDOR_ID_INTEL, 0x103D, PCI_ANY_ID, PCI_ANY_ID, },
2392 { PCI_VENDOR_ID_INTEL, 0x103E, PCI_ANY_ID, PCI_ANY_ID, },
2393 { PCI_VENDOR_ID_INTEL, 0x1050, PCI_ANY_ID, PCI_ANY_ID, },
2394 { PCI_VENDOR_ID_INTEL, 0x1059, PCI_ANY_ID, PCI_ANY_ID, },
2395 { PCI_VENDOR_ID_INTEL, 0x1227, PCI_ANY_ID, PCI_ANY_ID, },
2396 { PCI_VENDOR_ID_INTEL, 0x1228, PCI_ANY_ID, PCI_ANY_ID, },
2397 { PCI_VENDOR_ID_INTEL, 0x2449, PCI_ANY_ID, PCI_ANY_ID, },
2398 { PCI_VENDOR_ID_INTEL, 0x2459, PCI_ANY_ID, PCI_ANY_ID, },
2399 { PCI_VENDOR_ID_INTEL, 0x245D, PCI_ANY_ID, PCI_ANY_ID, },
2400 { PCI_VENDOR_ID_INTEL, 0x5200, PCI_ANY_ID, PCI_ANY_ID, },
2401 { PCI_VENDOR_ID_INTEL, 0x5201, PCI_ANY_ID, PCI_ANY_ID, },
2402 { 0,}
2404 MODULE_DEVICE_TABLE(pci, eepro100_pci_tbl);
2406 static struct pci_driver eepro100_driver = {
2407 .name = "eepro100",
2408 .id_table = eepro100_pci_tbl,
2409 .probe = eepro100_init_one,
2410 .remove = __devexit_p(eepro100_remove_one),
2411 #ifdef CONFIG_PM
2412 .suspend = eepro100_suspend,
2413 .resume = eepro100_resume,
2414 #endif /* CONFIG_PM */
2417 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,48)
2418 static int pci_module_init(struct pci_driver *pdev)
2420 int rc;
2422 rc = pci_register_driver(pdev);
2423 if (rc <= 0) {
2424 printk(KERN_INFO "%s: No cards found, driver not installed.\n",
2425 pdev->name);
2426 pci_unregister_driver(pdev);
2427 return -ENODEV;
2429 return 0;
2431 #endif
2433 static int __init eepro100_init_module(void)
2435 #ifdef MODULE
2436 printk(version);
2437 #endif
2438 return pci_module_init(&eepro100_driver);
2441 static void __exit eepro100_cleanup_module(void)
2443 pci_unregister_driver(&eepro100_driver);
2446 module_init(eepro100_init_module);
2447 module_exit(eepro100_cleanup_module);
2450 * Local variables:
2451 * compile-command: "gcc -DMODULE -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -c eepro100.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS`"
2452 * c-indent-level: 4
2453 * c-basic-offset: 4
2454 * tab-width: 4
2455 * End: