Merge with Linux 2.5.48.
[linux-2.6/linux-mips.git] / drivers / net / eepro100.c
blob84bffe17cb91a94c20a8ba5d6d01184f39ea2d9a
1 /* drivers/net/eepro100.c: An Intel i82557-559 Ethernet driver for Linux. */
2 /*
3 Written 1996-1999 by Donald Becker.
5 The driver also contains updates by different kernel developers
6 (see incomplete list below).
7 Current maintainer is Andrey V. Savochkin <saw@saw.sw.com.sg>.
8 Please use this email address and linux-kernel mailing list for bug reports.
10 This software may be used and distributed according to the terms
11 of the GNU General Public License, incorporated herein by reference.
13 This driver is for the Intel EtherExpress Pro100 (Speedo3) design.
14 It should work with all i82557/558/559 boards.
16 Version history:
17 1998 Apr - 2000 Feb Andrey V. Savochkin <saw@saw.sw.com.sg>
18 Serious fixes for multicast filter list setting, TX timeout routine;
19 RX ring refilling logic; other stuff
20 2000 Feb Jeff Garzik <jgarzik@pobox.com>
21 Convert to new PCI driver interface
22 2000 Mar 24 Dragan Stancevic <visitor@valinux.com>
23 Disabled FC and ER, to avoid lockups when when we get FCP interrupts.
24 2000 Jul 17 Goutham Rao <goutham.rao@intel.com>
25 PCI DMA API fixes, adding pci_dma_sync_single calls where neccesary
26 2000 Aug 31 David Mosberger <davidm@hpl.hp.com>
27 rx_align support: enables rx DMA without causing unaligned accesses.
30 static const char *version =
31 "eepro100.c:v1.09j-t 9/29/99 Donald Becker http://www.scyld.com/network/eepro100.html\n"
32 "eepro100.c: $Revision: 1.36 $ 2000/11/17 Modified by Andrey V. Savochkin <saw@saw.sw.com.sg> and others\n";
34 /* A few user-configurable values that apply to all boards.
35 First set is undocumented and spelled per Intel recommendations. */
37 static int congenb /* = 0 */; /* Enable congestion control in the DP83840. */
38 static int txfifo = 8; /* Tx FIFO threshold in 4 byte units, 0-15 */
39 static int rxfifo = 8; /* Rx FIFO threshold, default 32 bytes. */
40 /* Tx/Rx DMA burst length, 0-127, 0 == no preemption, tx==128 -> disabled. */
41 static int txdmacount = 128;
42 static int rxdmacount /* = 0 */;
44 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__) || defined(__mips__) || \
45 defined(__arm__)
46 /* align rx buffers to 2 bytes so that IP header is aligned */
47 # define rx_align(skb) skb_reserve((skb), 2)
48 # define RxFD_ALIGNMENT __attribute__ ((aligned (2), packed))
49 #else
50 # define rx_align(skb)
51 # define RxFD_ALIGNMENT
52 #endif
54 /* Set the copy breakpoint for the copy-only-tiny-buffer Rx method.
55 Lower values use more memory, but are faster. */
56 static int rx_copybreak = 200;
58 /* Maximum events (Rx packets, etc.) to handle at each interrupt. */
59 static int max_interrupt_work = 20;
61 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast) */
62 static int multicast_filter_limit = 64;
64 /* 'options' is used to pass a transceiver override or full-duplex flag
65 e.g. "options=16" for FD, "options=32" for 100mbps-only. */
66 static int full_duplex[] = {-1, -1, -1, -1, -1, -1, -1, -1};
67 static int options[] = {-1, -1, -1, -1, -1, -1, -1, -1};
69 /* A few values that may be tweaked. */
70 /* The ring sizes should be a power of two for efficiency. */
71 #define TX_RING_SIZE 64
72 #define RX_RING_SIZE 64
73 /* How much slots multicast filter setup may take.
74 Do not descrease without changing set_rx_mode() implementaion. */
75 #define TX_MULTICAST_SIZE 2
76 #define TX_MULTICAST_RESERV (TX_MULTICAST_SIZE*2)
77 /* Actual number of TX packets queued, must be
78 <= TX_RING_SIZE-TX_MULTICAST_RESERV. */
79 #define TX_QUEUE_LIMIT (TX_RING_SIZE-TX_MULTICAST_RESERV)
80 /* Hysteresis marking queue as no longer full. */
81 #define TX_QUEUE_UNFULL (TX_QUEUE_LIMIT-4)
83 /* Operational parameters that usually are not changed. */
85 /* Time in jiffies before concluding the transmitter is hung. */
86 #define TX_TIMEOUT (2*HZ)
87 /* Size of an pre-allocated Rx buffer: <Ethernet MTU> + slack.*/
88 #define PKT_BUF_SZ 1536
90 #if !defined(__OPTIMIZE__) || !defined(__KERNEL__)
91 #warning You must compile this file with the correct options!
92 #warning See the last lines of the source file.
93 #error You must compile this driver with "-O".
94 #endif
96 #include <linux/config.h>
97 #include <linux/version.h>
98 #include <linux/module.h>
100 #include <linux/kernel.h>
101 #include <linux/string.h>
102 #include <linux/errno.h>
103 #include <linux/ioport.h>
104 #include <linux/slab.h>
105 #include <linux/interrupt.h>
106 #include <linux/timer.h>
107 #include <linux/pci.h>
108 #include <linux/spinlock.h>
109 #include <linux/init.h>
110 #include <linux/mii.h>
111 #include <linux/delay.h>
113 #include <asm/bitops.h>
114 #include <asm/io.h>
115 #include <asm/uaccess.h>
117 #include <linux/netdevice.h>
118 #include <linux/etherdevice.h>
119 #include <linux/skbuff.h>
120 #include <linux/ethtool.h>
121 #include <linux/mii.h>
123 static int debug = -1;
124 #define DEBUG_DEFAULT (NETIF_MSG_DRV | \
125 NETIF_MSG_HW | \
126 NETIF_MSG_RX_ERR | \
127 NETIF_MSG_TX_ERR)
128 #define DEBUG ((debug >= 0) ? (1<<debug)-1 : DEBUG_DEFAULT)
131 MODULE_AUTHOR("Maintainer: Andrey V. Savochkin <saw@saw.sw.com.sg>");
132 MODULE_DESCRIPTION("Intel i82557/i82558/i82559 PCI EtherExpressPro driver");
133 MODULE_LICENSE("GPL");
134 MODULE_PARM(debug, "i");
135 MODULE_PARM(options, "1-" __MODULE_STRING(8) "i");
136 MODULE_PARM(full_duplex, "1-" __MODULE_STRING(8) "i");
137 MODULE_PARM(congenb, "i");
138 MODULE_PARM(txfifo, "i");
139 MODULE_PARM(rxfifo, "i");
140 MODULE_PARM(txdmacount, "i");
141 MODULE_PARM(rxdmacount, "i");
142 MODULE_PARM(rx_copybreak, "i");
143 MODULE_PARM(max_interrupt_work, "i");
144 MODULE_PARM(multicast_filter_limit, "i");
145 MODULE_PARM_DESC(debug, "debug level (0-6)");
146 MODULE_PARM_DESC(options, "Bits 0-3: tranceiver type, bit 4: full duplex, bit 5: 100Mbps");
147 MODULE_PARM_DESC(full_duplex, "full duplex setting(s) (1)");
148 MODULE_PARM_DESC(congenb, "Enable congestion control (1)");
149 MODULE_PARM_DESC(txfifo, "Tx FIFO threshold in 4 byte units, (0-15)");
150 MODULE_PARM_DESC(rxfifo, "Rx FIFO threshold in 4 byte units, (0-15)");
151 MODULE_PARM_DESC(txdmaccount, "Tx DMA burst length; 128 - disable (0-128)");
152 MODULE_PARM_DESC(rxdmaccount, "Rx DMA burst length; 128 - disable (0-128)");
153 MODULE_PARM_DESC(rx_copybreak, "copy breakpoint for copy-only-tiny-frames");
154 MODULE_PARM_DESC(max_interrupt_work, "maximum events handled per interrupt");
155 MODULE_PARM_DESC(multicast_filter_limit, "maximum number of filtered multicast addresses");
157 #define RUN_AT(x) (jiffies + (x))
159 /* ACPI power states don't universally work (yet) */
160 #ifndef CONFIG_PM
161 #undef pci_set_power_state
162 #define pci_set_power_state null_set_power_state
163 static inline int null_set_power_state(struct pci_dev *dev, int state)
165 return 0;
167 #endif /* CONFIG_PM */
169 #define netdevice_start(dev)
170 #define netdevice_stop(dev)
171 #define netif_set_tx_timeout(dev, tf, tm) \
172 do { \
173 (dev)->tx_timeout = (tf); \
174 (dev)->watchdog_timeo = (tm); \
175 } while(0)
180 Theory of Operation
182 I. Board Compatibility
184 This device driver is designed for the Intel i82557 "Speedo3" chip, Intel's
185 single-chip fast Ethernet controller for PCI, as used on the Intel
186 EtherExpress Pro 100 adapter.
188 II. Board-specific settings
190 PCI bus devices are configured by the system at boot time, so no jumpers
191 need to be set on the board. The system BIOS should be set to assign the
192 PCI INTA signal to an otherwise unused system IRQ line. While it's
193 possible to share PCI interrupt lines, it negatively impacts performance and
194 only recent kernels support it.
196 III. Driver operation
198 IIIA. General
199 The Speedo3 is very similar to other Intel network chips, that is to say
200 "apparently designed on a different planet". This chips retains the complex
201 Rx and Tx descriptors and multiple buffers pointers as previous chips, but
202 also has simplified Tx and Rx buffer modes. This driver uses the "flexible"
203 Tx mode, but in a simplified lower-overhead manner: it associates only a
204 single buffer descriptor with each frame descriptor.
206 Despite the extra space overhead in each receive skbuff, the driver must use
207 the simplified Rx buffer mode to assure that only a single data buffer is
208 associated with each RxFD. The driver implements this by reserving space
209 for the Rx descriptor at the head of each Rx skbuff.
211 The Speedo-3 has receive and command unit base addresses that are added to
212 almost all descriptor pointers. The driver sets these to zero, so that all
213 pointer fields are absolute addresses.
215 The System Control Block (SCB) of some previous Intel chips exists on the
216 chip in both PCI I/O and memory space. This driver uses the I/O space
217 registers, but might switch to memory mapped mode to better support non-x86
218 processors.
220 IIIB. Transmit structure
222 The driver must use the complex Tx command+descriptor mode in order to
223 have a indirect pointer to the skbuff data section. Each Tx command block
224 (TxCB) is associated with two immediately appended Tx Buffer Descriptor
225 (TxBD). A fixed ring of these TxCB+TxBD pairs are kept as part of the
226 speedo_private data structure for each adapter instance.
228 The newer i82558 explicitly supports this structure, and can read the two
229 TxBDs in the same PCI burst as the TxCB.
231 This ring structure is used for all normal transmit packets, but the
232 transmit packet descriptors aren't long enough for most non-Tx commands such
233 as CmdConfigure. This is complicated by the possibility that the chip has
234 already loaded the link address in the previous descriptor. So for these
235 commands we convert the next free descriptor on the ring to a NoOp, and point
236 that descriptor's link to the complex command.
238 An additional complexity of these non-transmit commands are that they may be
239 added asynchronous to the normal transmit queue, so we disable interrupts
240 whenever the Tx descriptor ring is manipulated.
242 A notable aspect of these special configure commands is that they do
243 work with the normal Tx ring entry scavenge method. The Tx ring scavenge
244 is done at interrupt time using the 'dirty_tx' index, and checking for the
245 command-complete bit. While the setup frames may have the NoOp command on the
246 Tx ring marked as complete, but not have completed the setup command, this
247 is not a problem. The tx_ring entry can be still safely reused, as the
248 tx_skbuff[] entry is always empty for config_cmd and mc_setup frames.
250 Commands may have bits set e.g. CmdSuspend in the command word to either
251 suspend or stop the transmit/command unit. This driver always flags the last
252 command with CmdSuspend, erases the CmdSuspend in the previous command, and
253 then issues a CU_RESUME.
254 Note: Watch out for the potential race condition here: imagine
255 erasing the previous suspend
256 the chip processes the previous command
257 the chip processes the final command, and suspends
258 doing the CU_RESUME
259 the chip processes the next-yet-valid post-final-command.
260 So blindly sending a CU_RESUME is only safe if we do it immediately after
261 after erasing the previous CmdSuspend, without the possibility of an
262 intervening delay. Thus the resume command is always within the
263 interrupts-disabled region. This is a timing dependence, but handling this
264 condition in a timing-independent way would considerably complicate the code.
266 Note: In previous generation Intel chips, restarting the command unit was a
267 notoriously slow process. This is presumably no longer true.
269 IIIC. Receive structure
271 Because of the bus-master support on the Speedo3 this driver uses the new
272 SKBUFF_RX_COPYBREAK scheme, rather than a fixed intermediate receive buffer.
273 This scheme allocates full-sized skbuffs as receive buffers. The value
274 SKBUFF_RX_COPYBREAK is used as the copying breakpoint: it is chosen to
275 trade-off the memory wasted by passing the full-sized skbuff to the queue
276 layer for all frames vs. the copying cost of copying a frame to a
277 correctly-sized skbuff.
279 For small frames the copying cost is negligible (esp. considering that we
280 are pre-loading the cache with immediately useful header information), so we
281 allocate a new, minimally-sized skbuff. For large frames the copying cost
282 is non-trivial, and the larger copy might flush the cache of useful data, so
283 we pass up the skbuff the packet was received into.
285 IV. Notes
287 Thanks to Steve Williams of Intel for arranging the non-disclosure agreement
288 that stated that I could disclose the information. But I still resent
289 having to sign an Intel NDA when I'm helping Intel sell their own product!
293 static int speedo_found1(struct pci_dev *pdev, long ioaddr, int fnd_cnt, int acpi_idle_state);
295 enum pci_flags_bit {
296 PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
297 PCI_ADDR0=0x10<<0, PCI_ADDR1=0x10<<1, PCI_ADDR2=0x10<<2, PCI_ADDR3=0x10<<3,
300 static inline unsigned int io_inw(unsigned long port)
302 return inw(port);
304 static inline void io_outw(unsigned int val, unsigned long port)
306 outw(val, port);
309 #ifndef USE_IO
310 /* Currently alpha headers define in/out macros.
311 Undefine them. 2000/03/30 SAW */
312 #undef inb
313 #undef inw
314 #undef inl
315 #undef outb
316 #undef outw
317 #undef outl
318 #define inb readb
319 #define inw readw
320 #define inl readl
321 #define outb writeb
322 #define outw writew
323 #define outl writel
324 #endif
326 /* Offsets to the various registers.
327 All accesses need not be longword aligned. */
328 enum speedo_offsets {
329 SCBStatus = 0, SCBCmd = 2, /* Rx/Command Unit command and status. */
330 SCBIntmask = 3,
331 SCBPointer = 4, /* General purpose pointer. */
332 SCBPort = 8, /* Misc. commands and operands. */
333 SCBflash = 12, SCBeeprom = 14, /* EEPROM and flash memory control. */
334 SCBCtrlMDI = 16, /* MDI interface control. */
335 SCBEarlyRx = 20, /* Early receive byte count. */
337 /* Commands that can be put in a command list entry. */
338 enum commands {
339 CmdNOp = 0, CmdIASetup = 0x10000, CmdConfigure = 0x20000,
340 CmdMulticastList = 0x30000, CmdTx = 0x40000, CmdTDR = 0x50000,
341 CmdDump = 0x60000, CmdDiagnose = 0x70000,
342 CmdSuspend = 0x40000000, /* Suspend after completion. */
343 CmdIntr = 0x20000000, /* Interrupt after completion. */
344 CmdTxFlex = 0x00080000, /* Use "Flexible mode" for CmdTx command. */
346 /* Clear CmdSuspend (1<<30) avoiding interference with the card access to the
347 status bits. Previous driver versions used separate 16 bit fields for
348 commands and statuses. --SAW
350 #if defined(__alpha__)
351 # define clear_suspend(cmd) clear_bit(30, &(cmd)->cmd_status);
352 #else
353 # if defined(__LITTLE_ENDIAN)
354 # define clear_suspend(cmd) ((__u16 *)&(cmd)->cmd_status)[1] &= ~0x4000
355 # elif defined(__BIG_ENDIAN)
356 # define clear_suspend(cmd) ((__u16 *)&(cmd)->cmd_status)[1] &= ~0x0040
357 # else
358 # error Unsupported byteorder
359 # endif
360 #endif
362 enum SCBCmdBits {
363 SCBMaskCmdDone=0x8000, SCBMaskRxDone=0x4000, SCBMaskCmdIdle=0x2000,
364 SCBMaskRxSuspend=0x1000, SCBMaskEarlyRx=0x0800, SCBMaskFlowCtl=0x0400,
365 SCBTriggerIntr=0x0200, SCBMaskAll=0x0100,
366 /* The rest are Rx and Tx commands. */
367 CUStart=0x0010, CUResume=0x0020, CUStatsAddr=0x0040, CUShowStats=0x0050,
368 CUCmdBase=0x0060, /* CU Base address (set to zero) . */
369 CUDumpStats=0x0070, /* Dump then reset stats counters. */
370 RxStart=0x0001, RxResume=0x0002, RxAbort=0x0004, RxAddrLoad=0x0006,
371 RxResumeNoResources=0x0007,
374 enum SCBPort_cmds {
375 PortReset=0, PortSelfTest=1, PortPartialReset=2, PortDump=3,
378 /* The Speedo3 Rx and Tx frame/buffer descriptors. */
379 struct descriptor { /* A generic descriptor. */
380 volatile s32 cmd_status; /* All command and status fields. */
381 u32 link; /* struct descriptor * */
382 unsigned char params[0];
385 /* The Speedo3 Rx and Tx buffer descriptors. */
386 struct RxFD { /* Receive frame descriptor. */
387 volatile s32 status;
388 u32 link; /* struct RxFD * */
389 u32 rx_buf_addr; /* void * */
390 u32 count;
391 } RxFD_ALIGNMENT;
393 /* Selected elements of the Tx/RxFD.status word. */
394 enum RxFD_bits {
395 RxComplete=0x8000, RxOK=0x2000,
396 RxErrCRC=0x0800, RxErrAlign=0x0400, RxErrTooBig=0x0200, RxErrSymbol=0x0010,
397 RxEth2Type=0x0020, RxNoMatch=0x0004, RxNoIAMatch=0x0002,
398 TxUnderrun=0x1000, StatusComplete=0x8000,
401 #define CONFIG_DATA_SIZE 22
402 struct TxFD { /* Transmit frame descriptor set. */
403 s32 status;
404 u32 link; /* void * */
405 u32 tx_desc_addr; /* Always points to the tx_buf_addr element. */
406 s32 count; /* # of TBD (=1), Tx start thresh., etc. */
407 /* This constitutes two "TBD" entries -- we only use one. */
408 #define TX_DESCR_BUF_OFFSET 16
409 u32 tx_buf_addr0; /* void *, frame to be transmitted. */
410 s32 tx_buf_size0; /* Length of Tx frame. */
411 u32 tx_buf_addr1; /* void *, frame to be transmitted. */
412 s32 tx_buf_size1; /* Length of Tx frame. */
413 /* the structure must have space for at least CONFIG_DATA_SIZE starting
414 * from tx_desc_addr field */
417 /* Multicast filter setting block. --SAW */
418 struct speedo_mc_block {
419 struct speedo_mc_block *next;
420 unsigned int tx;
421 dma_addr_t frame_dma;
422 unsigned int len;
423 struct descriptor frame __attribute__ ((__aligned__(16)));
426 /* Elements of the dump_statistics block. This block must be lword aligned. */
427 struct speedo_stats {
428 u32 tx_good_frames;
429 u32 tx_coll16_errs;
430 u32 tx_late_colls;
431 u32 tx_underruns;
432 u32 tx_lost_carrier;
433 u32 tx_deferred;
434 u32 tx_one_colls;
435 u32 tx_multi_colls;
436 u32 tx_total_colls;
437 u32 rx_good_frames;
438 u32 rx_crc_errs;
439 u32 rx_align_errs;
440 u32 rx_resource_errs;
441 u32 rx_overrun_errs;
442 u32 rx_colls_errs;
443 u32 rx_runt_errs;
444 u32 done_marker;
447 enum Rx_ring_state_bits {
448 RrNoMem=1, RrPostponed=2, RrNoResources=4, RrOOMReported=8,
451 /* Do not change the position (alignment) of the first few elements!
452 The later elements are grouped for cache locality.
454 Unfortunately, all the positions have been shifted since there.
455 A new re-alignment is required. 2000/03/06 SAW */
456 struct speedo_private {
457 struct TxFD *tx_ring; /* Commands (usually CmdTxPacket). */
458 struct RxFD *rx_ringp[RX_RING_SIZE]; /* Rx descriptor, used as ring. */
459 /* The addresses of a Tx/Rx-in-place packets/buffers. */
460 struct sk_buff *tx_skbuff[TX_RING_SIZE];
461 struct sk_buff *rx_skbuff[RX_RING_SIZE];
462 /* Mapped addresses of the rings. */
463 dma_addr_t tx_ring_dma;
464 #define TX_RING_ELEM_DMA(sp, n) ((sp)->tx_ring_dma + (n)*sizeof(struct TxFD))
465 dma_addr_t rx_ring_dma[RX_RING_SIZE];
466 struct descriptor *last_cmd; /* Last command sent. */
467 unsigned int cur_tx, dirty_tx; /* The ring entries to be free()ed. */
468 spinlock_t lock; /* Group with Tx control cache line. */
469 u32 tx_threshold; /* The value for txdesc.count. */
470 struct RxFD *last_rxf; /* Last filled RX buffer. */
471 dma_addr_t last_rxf_dma;
472 unsigned int cur_rx, dirty_rx; /* The next free ring entry */
473 long last_rx_time; /* Last Rx, in jiffies, to handle Rx hang. */
474 struct net_device_stats stats;
475 struct speedo_stats *lstats;
476 dma_addr_t lstats_dma;
477 int chip_id;
478 struct pci_dev *pdev;
479 struct timer_list timer; /* Media selection timer. */
480 struct speedo_mc_block *mc_setup_head; /* Multicast setup frame list head. */
481 struct speedo_mc_block *mc_setup_tail; /* Multicast setup frame list tail. */
482 long in_interrupt; /* Word-aligned dev->interrupt */
483 unsigned char acpi_pwr;
484 signed char rx_mode; /* Current PROMISC/ALLMULTI setting. */
485 unsigned int tx_full:1; /* The Tx queue is full. */
486 unsigned int flow_ctrl:1; /* Use 802.3x flow control. */
487 unsigned int rx_bug:1; /* Work around receiver hang errata. */
488 unsigned char default_port:8; /* Last dev->if_port value. */
489 unsigned char rx_ring_state; /* RX ring status flags. */
490 unsigned short phy[2]; /* PHY media interfaces available. */
491 unsigned short partner; /* Link partner caps. */
492 struct mii_if_info mii_if; /* MII API hooks, info */
493 u32 msg_enable; /* debug message level */
494 #ifdef CONFIG_PM
495 u32 pm_state[16];
496 #endif
499 /* The parameters for a CmdConfigure operation.
500 There are so many options that it would be difficult to document each bit.
501 We mostly use the default or recommended settings. */
502 static const char i82557_config_cmd[CONFIG_DATA_SIZE] = {
503 22, 0x08, 0, 0, 0, 0, 0x32, 0x03, 1, /* 1=Use MII 0=Use AUI */
504 0, 0x2E, 0, 0x60, 0,
505 0xf2, 0x48, 0, 0x40, 0xf2, 0x80, /* 0x40=Force full-duplex */
506 0x3f, 0x05, };
507 static const char i82558_config_cmd[CONFIG_DATA_SIZE] = {
508 22, 0x08, 0, 1, 0, 0, 0x22, 0x03, 1, /* 1=Use MII 0=Use AUI */
509 0, 0x2E, 0, 0x60, 0x08, 0x88,
510 0x68, 0, 0x40, 0xf2, 0x84, /* Disable FC */
511 0x31, 0x05, };
513 /* PHY media interface chips. */
514 static const char *phys[] = {
515 "None", "i82553-A/B", "i82553-C", "i82503",
516 "DP83840", "80c240", "80c24", "i82555",
517 "unknown-8", "unknown-9", "DP83840A", "unknown-11",
518 "unknown-12", "unknown-13", "unknown-14", "unknown-15", };
519 enum phy_chips { NonSuchPhy=0, I82553AB, I82553C, I82503, DP83840, S80C240,
520 S80C24, I82555, DP83840A=10, };
521 static const char is_mii[] = { 0, 1, 1, 0, 1, 1, 0, 1 };
522 #define EE_READ_CMD (6)
524 static int eepro100_init_one(struct pci_dev *pdev,
525 const struct pci_device_id *ent);
527 static void eepro100_remove_one (struct pci_dev *pdev);
529 static int do_eeprom_cmd(long ioaddr, int cmd, int cmd_len);
530 static int mdio_read(struct net_device *dev, int phy_id, int location);
531 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
532 static int speedo_open(struct net_device *dev);
533 static void speedo_resume(struct net_device *dev);
534 static void speedo_timer(unsigned long data);
535 static void speedo_init_rx_ring(struct net_device *dev);
536 static void speedo_tx_timeout(struct net_device *dev);
537 static int speedo_start_xmit(struct sk_buff *skb, struct net_device *dev);
538 static void speedo_refill_rx_buffers(struct net_device *dev, int force);
539 static int speedo_rx(struct net_device *dev);
540 static void speedo_tx_buffer_gc(struct net_device *dev);
541 static void speedo_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
542 static int speedo_close(struct net_device *dev);
543 static struct net_device_stats *speedo_get_stats(struct net_device *dev);
544 static int speedo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
545 static void set_rx_mode(struct net_device *dev);
546 static void speedo_show_state(struct net_device *dev);
550 #ifdef honor_default_port
551 /* Optional driver feature to allow forcing the transceiver setting.
552 Not recommended. */
553 static int mii_ctrl[8] = { 0x3300, 0x3100, 0x0000, 0x0100,
554 0x2000, 0x2100, 0x0400, 0x3100};
555 #endif
557 /* How to wait for the command unit to accept a command.
558 Typically this takes 0 ticks. */
559 static inline unsigned char wait_for_cmd_done(struct net_device *dev)
561 int wait = 1000;
562 long cmd_ioaddr = dev->base_addr + SCBCmd;
563 unsigned char r;
565 do {
566 udelay(1);
567 r = inb(cmd_ioaddr);
568 } while(r && --wait >= 0);
570 if (wait < 0)
571 printk(KERN_ALERT "%s: wait_for_cmd_done timeout!\n", dev->name);
572 return r;
575 static int __devinit eepro100_init_one (struct pci_dev *pdev,
576 const struct pci_device_id *ent)
578 unsigned long ioaddr;
579 int irq;
580 int acpi_idle_state = 0, pm;
581 static int cards_found /* = 0 */;
583 #ifndef MODULE
584 /* when built-in, we only print version if device is found */
585 static int did_version;
586 if (did_version++ == 0)
587 printk(version);
588 #endif
590 /* save power state before pci_enable_device overwrites it */
591 pm = pci_find_capability(pdev, PCI_CAP_ID_PM);
592 if (pm) {
593 u16 pwr_command;
594 pci_read_config_word(pdev, pm + PCI_PM_CTRL, &pwr_command);
595 acpi_idle_state = pwr_command & PCI_PM_CTRL_STATE_MASK;
598 if (pci_enable_device(pdev))
599 goto err_out_free_mmio_region;
601 pci_set_master(pdev);
603 if (!request_region(pci_resource_start(pdev, 1),
604 pci_resource_len(pdev, 1), "eepro100")) {
605 printk (KERN_ERR "eepro100: cannot reserve I/O ports\n");
606 goto err_out_none;
608 if (!request_mem_region(pci_resource_start(pdev, 0),
609 pci_resource_len(pdev, 0), "eepro100")) {
610 printk (KERN_ERR "eepro100: cannot reserve MMIO region\n");
611 goto err_out_free_pio_region;
614 irq = pdev->irq;
615 #ifdef USE_IO
616 ioaddr = pci_resource_start(pdev, 1);
617 if (DEBUG & NETIF_MSG_PROBE)
618 printk("Found Intel i82557 PCI Speedo at I/O %#lx, IRQ %d.\n",
619 ioaddr, irq);
620 #else
621 ioaddr = (unsigned long)ioremap(pci_resource_start(pdev, 0),
622 pci_resource_len(pdev, 0));
623 if (!ioaddr) {
624 printk (KERN_ERR "eepro100: cannot remap MMIO region %lx @ %lx\n",
625 pci_resource_len(pdev, 0), pci_resource_start(pdev, 0));
626 goto err_out_free_mmio_region;
628 if (DEBUG & NETIF_MSG_PROBE)
629 printk("Found Intel i82557 PCI Speedo, MMIO at %#lx, IRQ %d.\n",
630 pci_resource_start(pdev, 0), irq);
631 #endif
634 if (speedo_found1(pdev, ioaddr, cards_found, acpi_idle_state) == 0)
635 cards_found++;
636 else
637 goto err_out_iounmap;
639 return 0;
641 err_out_iounmap: ;
642 #ifndef USE_IO
643 iounmap ((void *)ioaddr);
644 #endif
645 err_out_free_mmio_region:
646 release_mem_region(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
647 err_out_free_pio_region:
648 release_region(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
649 err_out_none:
650 return -ENODEV;
653 static int __devinit speedo_found1(struct pci_dev *pdev,
654 long ioaddr, int card_idx, int acpi_idle_state)
656 struct net_device *dev;
657 struct speedo_private *sp;
658 const char *product;
659 int i, option;
660 u16 eeprom[0x100];
661 int size;
662 void *tx_ring_space;
663 dma_addr_t tx_ring_dma;
665 size = TX_RING_SIZE * sizeof(struct TxFD) + sizeof(struct speedo_stats);
666 tx_ring_space = pci_alloc_consistent(pdev, size, &tx_ring_dma);
667 if (tx_ring_space == NULL)
668 return -1;
670 dev = init_etherdev(NULL, sizeof(struct speedo_private));
671 if (dev == NULL) {
672 printk(KERN_ERR "eepro100: Could not allocate ethernet device.\n");
673 pci_free_consistent(pdev, size, tx_ring_space, tx_ring_dma);
674 return -1;
677 SET_MODULE_OWNER(dev);
679 if (dev->mem_start > 0)
680 option = dev->mem_start;
681 else if (card_idx >= 0 && options[card_idx] >= 0)
682 option = options[card_idx];
683 else
684 option = 0;
686 /* Read the station address EEPROM before doing the reset.
687 Nominally his should even be done before accepting the device, but
688 then we wouldn't have a device name with which to report the error.
689 The size test is for 6 bit vs. 8 bit address serial EEPROMs.
692 unsigned long iobase;
693 int read_cmd, ee_size;
694 u16 sum;
695 int j;
697 /* Use IO only to avoid postponed writes and satisfy EEPROM timing
698 requirements. */
699 iobase = pci_resource_start(pdev, 1);
700 if ((do_eeprom_cmd(iobase, EE_READ_CMD << 24, 27) & 0xffe0000)
701 == 0xffe0000) {
702 ee_size = 0x100;
703 read_cmd = EE_READ_CMD << 24;
704 } else {
705 ee_size = 0x40;
706 read_cmd = EE_READ_CMD << 22;
709 for (j = 0, i = 0, sum = 0; i < ee_size; i++) {
710 u16 value = do_eeprom_cmd(iobase, read_cmd | (i << 16), 27);
711 eeprom[i] = value;
712 sum += value;
713 if (i < 3) {
714 dev->dev_addr[j++] = value;
715 dev->dev_addr[j++] = value >> 8;
718 if (sum != 0xBABA)
719 printk(KERN_WARNING "%s: Invalid EEPROM checksum %#4.4x, "
720 "check settings before activating this device!\n",
721 dev->name, sum);
722 /* Don't unregister_netdev(dev); as the EEPro may actually be
723 usable, especially if the MAC address is set later.
724 On the other hand, it may be unusable if MDI data is corrupted. */
727 /* Reset the chip: stop Tx and Rx processes and clear counters.
728 This takes less than 10usec and will easily finish before the next
729 action. */
730 outl(PortReset, ioaddr + SCBPort);
731 inl(ioaddr + SCBPort);
732 udelay(10);
734 if (eeprom[3] & 0x0100)
735 product = "OEM i82557/i82558 10/100 Ethernet";
736 else
737 product = pdev->dev.name;
739 printk(KERN_INFO "%s: %s, ", dev->name, product);
741 for (i = 0; i < 5; i++)
742 printk("%2.2X:", dev->dev_addr[i]);
743 printk("%2.2X, ", dev->dev_addr[i]);
744 #ifdef USE_IO
745 printk("I/O at %#3lx, ", ioaddr);
746 #endif
747 printk("IRQ %d.\n", pdev->irq);
749 /* we must initialize base_addr early, for mdio_{read,write} */
750 dev->base_addr = ioaddr;
752 #if 1 || defined(kernel_bloat)
753 /* OK, this is pure kernel bloat. I don't like it when other drivers
754 waste non-pageable kernel space to emit similar messages, but I need
755 them for bug reports. */
757 const char *connectors[] = {" RJ45", " BNC", " AUI", " MII"};
758 /* The self-test results must be paragraph aligned. */
759 volatile s32 *self_test_results;
760 int boguscnt = 16000; /* Timeout for set-test. */
761 if ((eeprom[3] & 0x03) != 0x03)
762 printk(KERN_INFO " Receiver lock-up bug exists -- enabling"
763 " work-around.\n");
764 printk(KERN_INFO " Board assembly %4.4x%2.2x-%3.3d, Physical"
765 " connectors present:",
766 eeprom[8], eeprom[9]>>8, eeprom[9] & 0xff);
767 for (i = 0; i < 4; i++)
768 if (eeprom[5] & (1<<i))
769 printk(connectors[i]);
770 printk("\n"KERN_INFO" Primary interface chip %s PHY #%d.\n",
771 phys[(eeprom[6]>>8)&15], eeprom[6] & 0x1f);
772 if (eeprom[7] & 0x0700)
773 printk(KERN_INFO " Secondary interface chip %s.\n",
774 phys[(eeprom[7]>>8)&7]);
775 if (((eeprom[6]>>8) & 0x3f) == DP83840
776 || ((eeprom[6]>>8) & 0x3f) == DP83840A) {
777 int mdi_reg23 = mdio_read(dev, eeprom[6] & 0x1f, 23) | 0x0422;
778 if (congenb)
779 mdi_reg23 |= 0x0100;
780 printk(KERN_INFO" DP83840 specific setup, setting register 23 to %4.4x.\n",
781 mdi_reg23);
782 mdio_write(dev, eeprom[6] & 0x1f, 23, mdi_reg23);
784 if ((option >= 0) && (option & 0x70)) {
785 printk(KERN_INFO " Forcing %dMbs %s-duplex operation.\n",
786 (option & 0x20 ? 100 : 10),
787 (option & 0x10 ? "full" : "half"));
788 mdio_write(dev, eeprom[6] & 0x1f, MII_BMCR,
789 ((option & 0x20) ? 0x2000 : 0) | /* 100mbps? */
790 ((option & 0x10) ? 0x0100 : 0)); /* Full duplex? */
793 /* Perform a system self-test. */
794 self_test_results = (s32*) ((((long) tx_ring_space) + 15) & ~0xf);
795 self_test_results[0] = 0;
796 self_test_results[1] = -1;
797 outl(tx_ring_dma | PortSelfTest, ioaddr + SCBPort);
798 do {
799 udelay(10);
800 } while (self_test_results[1] == -1 && --boguscnt >= 0);
802 if (boguscnt < 0) { /* Test optimized out. */
803 printk(KERN_ERR "Self test failed, status %8.8x:\n"
804 KERN_ERR " Failure to initialize the i82557.\n"
805 KERN_ERR " Verify that the card is a bus-master"
806 " capable slot.\n",
807 self_test_results[1]);
808 } else
809 printk(KERN_INFO " General self-test: %s.\n"
810 KERN_INFO " Serial sub-system self-test: %s.\n"
811 KERN_INFO " Internal registers self-test: %s.\n"
812 KERN_INFO " ROM checksum self-test: %s (%#8.8x).\n",
813 self_test_results[1] & 0x1000 ? "failed" : "passed",
814 self_test_results[1] & 0x0020 ? "failed" : "passed",
815 self_test_results[1] & 0x0008 ? "failed" : "passed",
816 self_test_results[1] & 0x0004 ? "failed" : "passed",
817 self_test_results[0]);
819 #endif /* kernel_bloat */
821 outl(PortReset, ioaddr + SCBPort);
822 inl(ioaddr + SCBPort);
823 udelay(10);
825 /* Return the chip to its original power state. */
826 pci_set_power_state(pdev, acpi_idle_state);
828 pci_set_drvdata (pdev, dev);
830 dev->irq = pdev->irq;
832 sp = dev->priv;
833 sp->pdev = pdev;
834 sp->msg_enable = DEBUG;
835 sp->acpi_pwr = acpi_idle_state;
836 sp->tx_ring = tx_ring_space;
837 sp->tx_ring_dma = tx_ring_dma;
838 sp->lstats = (struct speedo_stats *)(sp->tx_ring + TX_RING_SIZE);
839 sp->lstats_dma = TX_RING_ELEM_DMA(sp, TX_RING_SIZE);
840 init_timer(&sp->timer); /* used in ioctl() */
842 sp->mii_if.full_duplex = option >= 0 && (option & 0x10) ? 1 : 0;
843 if (card_idx >= 0) {
844 if (full_duplex[card_idx] >= 0)
845 sp->mii_if.full_duplex = full_duplex[card_idx];
847 sp->default_port = option >= 0 ? (option & 0x0f) : 0;
849 sp->phy[0] = eeprom[6];
850 sp->phy[1] = eeprom[7];
852 sp->mii_if.phy_id = eeprom[6] & 0x1f;
853 sp->mii_if.phy_id_mask = 0x1f;
854 sp->mii_if.reg_num_mask = 0x1f;
855 sp->mii_if.dev = dev;
856 sp->mii_if.mdio_read = mdio_read;
857 sp->mii_if.mdio_write = mdio_write;
859 sp->rx_bug = (eeprom[3] & 0x03) == 3 ? 0 : 1;
860 if (((pdev->device > 0x1030 && (pdev->device < 0x103F)))
861 || (pdev->device == 0x2449) || (pdev->device == 0x2459)
862 || (pdev->device == 0x245D)) {
863 sp->chip_id = 1;
866 if (sp->rx_bug)
867 printk(KERN_INFO " Receiver lock-up workaround activated.\n");
869 /* The Speedo-specific entries in the device structure. */
870 dev->open = &speedo_open;
871 dev->hard_start_xmit = &speedo_start_xmit;
872 netif_set_tx_timeout(dev, &speedo_tx_timeout, TX_TIMEOUT);
873 dev->stop = &speedo_close;
874 dev->get_stats = &speedo_get_stats;
875 dev->set_multicast_list = &set_rx_mode;
876 dev->do_ioctl = &speedo_ioctl;
878 return 0;
881 static void do_slow_command(struct net_device *dev, int cmd)
883 long cmd_ioaddr = dev->base_addr + SCBCmd;
884 int wait = 0;
886 if (inb(cmd_ioaddr) == 0) break;
887 while(++wait <= 200);
888 if (wait > 100)
889 printk(KERN_ERR "Command %4.4x never accepted (%d polls)!\n",
890 inb(cmd_ioaddr), wait);
892 outb(cmd, cmd_ioaddr);
894 for (wait = 0; wait <= 100; wait++)
895 if (inb(cmd_ioaddr) == 0) return;
896 for (; wait <= 20000; wait++)
897 if (inb(cmd_ioaddr) == 0) return;
898 else udelay(1);
899 printk(KERN_ERR "Command %4.4x was not accepted after %d polls!"
900 " Current status %8.8x.\n",
901 cmd, wait, inl(dev->base_addr + SCBStatus));
904 /* Serial EEPROM section.
905 A "bit" grungy, but we work our way through bit-by-bit :->. */
906 /* EEPROM_Ctrl bits. */
907 #define EE_SHIFT_CLK 0x01 /* EEPROM shift clock. */
908 #define EE_CS 0x02 /* EEPROM chip select. */
909 #define EE_DATA_WRITE 0x04 /* EEPROM chip data in. */
910 #define EE_DATA_READ 0x08 /* EEPROM chip data out. */
911 #define EE_ENB (0x4800 | EE_CS)
912 #define EE_WRITE_0 0x4802
913 #define EE_WRITE_1 0x4806
914 #define EE_OFFSET SCBeeprom
916 /* The fixes for the code were kindly provided by Dragan Stancevic
917 <visitor@valinux.com> to strictly follow Intel specifications of EEPROM
918 access timing.
919 The publicly available sheet 64486302 (sec. 3.1) specifies 1us access
920 interval for serial EEPROM. However, it looks like that there is an
921 additional requirement dictating larger udelay's in the code below.
922 2000/05/24 SAW */
923 static int __devinit do_eeprom_cmd(long ioaddr, int cmd, int cmd_len)
925 unsigned retval = 0;
926 long ee_addr = ioaddr + SCBeeprom;
928 io_outw(EE_ENB, ee_addr); udelay(2);
929 io_outw(EE_ENB | EE_SHIFT_CLK, ee_addr); udelay(2);
931 /* Shift the command bits out. */
932 do {
933 short dataval = (cmd & (1 << cmd_len)) ? EE_WRITE_1 : EE_WRITE_0;
934 io_outw(dataval, ee_addr); udelay(2);
935 io_outw(dataval | EE_SHIFT_CLK, ee_addr); udelay(2);
936 retval = (retval << 1) | ((io_inw(ee_addr) & EE_DATA_READ) ? 1 : 0);
937 } while (--cmd_len >= 0);
938 io_outw(EE_ENB, ee_addr); udelay(2);
940 /* Terminate the EEPROM access. */
941 io_outw(EE_ENB & ~EE_CS, ee_addr);
942 return retval;
945 static int mdio_read(struct net_device *dev, int phy_id, int location)
947 long ioaddr = dev->base_addr;
948 int val, boguscnt = 64*10; /* <64 usec. to complete, typ 27 ticks */
949 outl(0x08000000 | (location<<16) | (phy_id<<21), ioaddr + SCBCtrlMDI);
950 do {
951 val = inl(ioaddr + SCBCtrlMDI);
952 if (--boguscnt < 0) {
953 printk(KERN_ERR " mdio_read() timed out with val = %8.8x.\n", val);
954 break;
956 } while (! (val & 0x10000000));
957 return val & 0xffff;
960 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
962 long ioaddr = dev->base_addr;
963 int val, boguscnt = 64*10; /* <64 usec. to complete, typ 27 ticks */
964 outl(0x04000000 | (location<<16) | (phy_id<<21) | value,
965 ioaddr + SCBCtrlMDI);
966 do {
967 val = inl(ioaddr + SCBCtrlMDI);
968 if (--boguscnt < 0) {
969 printk(KERN_ERR" mdio_write() timed out with val = %8.8x.\n", val);
970 break;
972 } while (! (val & 0x10000000));
975 static int
976 speedo_open(struct net_device *dev)
978 struct speedo_private *sp = (struct speedo_private *)dev->priv;
979 long ioaddr = dev->base_addr;
980 int retval;
982 if (netif_msg_ifup(sp))
983 printk(KERN_DEBUG "%s: speedo_open() irq %d.\n", dev->name, dev->irq);
985 pci_set_power_state(sp->pdev, 0);
987 /* Set up the Tx queue early.. */
988 sp->cur_tx = 0;
989 sp->dirty_tx = 0;
990 sp->last_cmd = 0;
991 sp->tx_full = 0;
992 spin_lock_init(&sp->lock);
993 sp->in_interrupt = 0;
995 /* .. we can safely take handler calls during init. */
996 retval = request_irq(dev->irq, &speedo_interrupt, SA_SHIRQ, dev->name, dev);
997 if (retval) {
998 return retval;
1001 dev->if_port = sp->default_port;
1003 #ifdef oh_no_you_dont_unless_you_honour_the_options_passed_in_to_us
1004 /* Retrigger negotiation to reset previous errors. */
1005 if ((sp->phy[0] & 0x8000) == 0) {
1006 int phy_addr = sp->phy[0] & 0x1f ;
1007 /* Use 0x3300 for restarting NWay, other values to force xcvr:
1008 0x0000 10-HD
1009 0x0100 10-FD
1010 0x2000 100-HD
1011 0x2100 100-FD
1013 #ifdef honor_default_port
1014 mdio_write(dev, phy_addr, MII_BMCR, mii_ctrl[dev->default_port & 7]);
1015 #else
1016 mdio_write(dev, phy_addr, MII_BMCR, 0x3300);
1017 #endif
1019 #endif
1021 speedo_init_rx_ring(dev);
1023 /* Fire up the hardware. */
1024 outw(SCBMaskAll, ioaddr + SCBCmd);
1025 speedo_resume(dev);
1027 netdevice_start(dev);
1028 netif_start_queue(dev);
1030 /* Setup the chip and configure the multicast list. */
1031 sp->mc_setup_head = NULL;
1032 sp->mc_setup_tail = NULL;
1033 sp->flow_ctrl = sp->partner = 0;
1034 sp->rx_mode = -1; /* Invalid -> always reset the mode. */
1035 set_rx_mode(dev);
1036 if ((sp->phy[0] & 0x8000) == 0)
1037 sp->mii_if.advertising = mdio_read(dev, sp->phy[0] & 0x1f, MII_ADVERTISE);
1039 mii_check_link(&sp->mii_if);
1041 if (netif_msg_ifup(sp)) {
1042 printk(KERN_DEBUG "%s: Done speedo_open(), status %8.8x.\n",
1043 dev->name, inw(ioaddr + SCBStatus));
1046 /* Set the timer. The timer serves a dual purpose:
1047 1) to monitor the media interface (e.g. link beat) and perhaps switch
1048 to an alternate media type
1049 2) to monitor Rx activity, and restart the Rx process if the receiver
1050 hangs. */
1051 sp->timer.expires = RUN_AT((24*HZ)/10); /* 2.4 sec. */
1052 sp->timer.data = (unsigned long)dev;
1053 sp->timer.function = &speedo_timer; /* timer handler */
1054 add_timer(&sp->timer);
1056 /* No need to wait for the command unit to accept here. */
1057 if ((sp->phy[0] & 0x8000) == 0)
1058 mdio_read(dev, sp->phy[0] & 0x1f, MII_BMCR);
1060 return 0;
1063 /* Start the chip hardware after a full reset. */
1064 static void speedo_resume(struct net_device *dev)
1066 struct speedo_private *sp = dev->priv;
1067 long ioaddr = dev->base_addr;
1069 /* Start with a Tx threshold of 256 (0x..20.... 8 byte units). */
1070 sp->tx_threshold = 0x01208000;
1072 /* Set the segment registers to '0'. */
1073 if (wait_for_cmd_done(dev) != 0) {
1074 outl(PortPartialReset, ioaddr + SCBPort);
1075 udelay(10);
1078 outl(0, ioaddr + SCBPointer);
1079 inl(ioaddr + SCBPointer); /* Flush to PCI. */
1080 udelay(10); /* Bogus, but it avoids the bug. */
1082 /* Note: these next two operations can take a while. */
1083 do_slow_command(dev, RxAddrLoad);
1084 do_slow_command(dev, CUCmdBase);
1086 /* Load the statistics block and rx ring addresses. */
1087 outl(sp->lstats_dma, ioaddr + SCBPointer);
1088 inl(ioaddr + SCBPointer); /* Flush to PCI */
1090 outb(CUStatsAddr, ioaddr + SCBCmd);
1091 sp->lstats->done_marker = 0;
1092 wait_for_cmd_done(dev);
1094 if (sp->rx_ringp[sp->cur_rx % RX_RING_SIZE] == NULL) {
1095 if (netif_msg_rx_err(sp))
1096 printk(KERN_DEBUG "%s: NULL cur_rx in speedo_resume().\n",
1097 dev->name);
1098 } else {
1099 outl(sp->rx_ring_dma[sp->cur_rx % RX_RING_SIZE],
1100 ioaddr + SCBPointer);
1101 inl(ioaddr + SCBPointer); /* Flush to PCI */
1104 /* Note: RxStart should complete instantly. */
1105 do_slow_command(dev, RxStart);
1106 do_slow_command(dev, CUDumpStats);
1108 /* Fill the first command with our physical address. */
1110 struct descriptor *ias_cmd;
1112 ias_cmd =
1113 (struct descriptor *)&sp->tx_ring[sp->cur_tx++ % TX_RING_SIZE];
1114 /* Avoid a bug(?!) here by marking the command already completed. */
1115 ias_cmd->cmd_status = cpu_to_le32((CmdSuspend | CmdIASetup) | 0xa000);
1116 ias_cmd->link =
1117 cpu_to_le32(TX_RING_ELEM_DMA(sp, sp->cur_tx % TX_RING_SIZE));
1118 memcpy(ias_cmd->params, dev->dev_addr, 6);
1119 if (sp->last_cmd)
1120 clear_suspend(sp->last_cmd);
1121 sp->last_cmd = ias_cmd;
1124 /* Start the chip's Tx process and unmask interrupts. */
1125 outl(TX_RING_ELEM_DMA(sp, sp->dirty_tx % TX_RING_SIZE),
1126 ioaddr + SCBPointer);
1127 /* We are not ACK-ing FCP and ER in the interrupt handler yet so they should
1128 remain masked --Dragan */
1129 outw(CUStart | SCBMaskEarlyRx | SCBMaskFlowCtl, ioaddr + SCBCmd);
1133 * Sometimes the receiver stops making progress. This routine knows how to
1134 * get it going again, without losing packets or being otherwise nasty like
1135 * a chip reset would be. Previously the driver had a whole sequence
1136 * of if RxSuspended, if it's no buffers do one thing, if it's no resources,
1137 * do another, etc. But those things don't really matter. Separate logic
1138 * in the ISR provides for allocating buffers--the other half of operation
1139 * is just making sure the receiver is active. speedo_rx_soft_reset does that.
1140 * This problem with the old, more involved algorithm is shown up under
1141 * ping floods on the order of 60K packets/second on a 100Mbps fdx network.
1143 static void
1144 speedo_rx_soft_reset(struct net_device *dev)
1146 struct speedo_private *sp = dev->priv;
1147 struct RxFD *rfd;
1148 long ioaddr;
1150 ioaddr = dev->base_addr;
1151 if (wait_for_cmd_done(dev) != 0) {
1152 printk("%s: previous command stalled\n", dev->name);
1153 return;
1156 * Put the hardware into a known state.
1158 outb(RxAbort, ioaddr + SCBCmd);
1160 rfd = sp->rx_ringp[sp->cur_rx % RX_RING_SIZE];
1162 rfd->rx_buf_addr = 0xffffffff;
1164 if (wait_for_cmd_done(dev) != 0) {
1165 printk("%s: RxAbort command stalled\n", dev->name);
1166 return;
1168 outl(sp->rx_ring_dma[sp->cur_rx % RX_RING_SIZE],
1169 ioaddr + SCBPointer);
1170 outb(RxStart, ioaddr + SCBCmd);
1174 /* Media monitoring and control. */
1175 static void speedo_timer(unsigned long data)
1177 struct net_device *dev = (struct net_device *)data;
1178 struct speedo_private *sp = (struct speedo_private *)dev->priv;
1179 long ioaddr = dev->base_addr;
1180 int phy_num = sp->phy[0] & 0x1f;
1182 /* We have MII and lost link beat. */
1183 if ((sp->phy[0] & 0x8000) == 0) {
1184 int partner = mdio_read(dev, phy_num, MII_LPA);
1185 if (partner != sp->partner) {
1186 int flow_ctrl = sp->mii_if.advertising & partner & 0x0400 ? 1 : 0;
1187 if (netif_msg_link(sp)) {
1188 printk(KERN_DEBUG "%s: Link status change.\n", dev->name);
1189 printk(KERN_DEBUG "%s: Old partner %x, new %x, adv %x.\n",
1190 dev->name, sp->partner, partner, sp->mii_if.advertising);
1192 sp->partner = partner;
1193 if (flow_ctrl != sp->flow_ctrl) {
1194 sp->flow_ctrl = flow_ctrl;
1195 sp->rx_mode = -1; /* Trigger a reload. */
1199 mii_check_link(&sp->mii_if);
1200 if (netif_msg_timer(sp)) {
1201 printk(KERN_DEBUG "%s: Media control tick, status %4.4x.\n",
1202 dev->name, inw(ioaddr + SCBStatus));
1204 if (sp->rx_mode < 0 ||
1205 (sp->rx_bug && jiffies - sp->last_rx_time > 2*HZ)) {
1206 /* We haven't received a packet in a Long Time. We might have been
1207 bitten by the receiver hang bug. This can be cleared by sending
1208 a set multicast list command. */
1209 if (netif_msg_timer(sp))
1210 printk(KERN_DEBUG "%s: Sending a multicast list set command"
1211 " from a timer routine,"
1212 " m=%d, j=%ld, l=%ld.\n",
1213 dev->name, sp->rx_mode, jiffies, sp->last_rx_time);
1214 set_rx_mode(dev);
1216 /* We must continue to monitor the media. */
1217 sp->timer.expires = RUN_AT(2*HZ); /* 2.0 sec. */
1218 add_timer(&sp->timer);
1221 static void speedo_show_state(struct net_device *dev)
1223 struct speedo_private *sp = (struct speedo_private *)dev->priv;
1224 int i;
1226 if (netif_msg_pktdata(sp)) {
1227 printk(KERN_DEBUG "%s: Tx ring dump, Tx queue %u / %u:\n",
1228 dev->name, sp->cur_tx, sp->dirty_tx);
1229 for (i = 0; i < TX_RING_SIZE; i++)
1230 printk(KERN_DEBUG "%s: %c%c%2d %8.8x.\n", dev->name,
1231 i == sp->dirty_tx % TX_RING_SIZE ? '*' : ' ',
1232 i == sp->cur_tx % TX_RING_SIZE ? '=' : ' ',
1233 i, sp->tx_ring[i].status);
1235 printk(KERN_DEBUG "%s: Printing Rx ring"
1236 " (next to receive into %u, dirty index %u).\n",
1237 dev->name, sp->cur_rx, sp->dirty_rx);
1238 for (i = 0; i < RX_RING_SIZE; i++)
1239 printk(KERN_DEBUG "%s: %c%c%c%2d %8.8x.\n", dev->name,
1240 sp->rx_ringp[i] == sp->last_rxf ? 'l' : ' ',
1241 i == sp->dirty_rx % RX_RING_SIZE ? '*' : ' ',
1242 i == sp->cur_rx % RX_RING_SIZE ? '=' : ' ',
1243 i, (sp->rx_ringp[i] != NULL) ?
1244 (unsigned)sp->rx_ringp[i]->status : 0);
1247 #if 0
1249 long ioaddr = dev->base_addr;
1250 int phy_num = sp->phy[0] & 0x1f;
1251 for (i = 0; i < 16; i++) {
1252 /* FIXME: what does it mean? --SAW */
1253 if (i == 6) i = 21;
1254 printk(KERN_DEBUG "%s: PHY index %d register %d is %4.4x.\n",
1255 dev->name, phy_num, i, mdio_read(dev, phy_num, i));
1258 #endif
1262 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1263 static void
1264 speedo_init_rx_ring(struct net_device *dev)
1266 struct speedo_private *sp = (struct speedo_private *)dev->priv;
1267 struct RxFD *rxf, *last_rxf = NULL;
1268 dma_addr_t last_rxf_dma = 0 /* to shut up the compiler */;
1269 int i;
1271 sp->cur_rx = 0;
1273 for (i = 0; i < RX_RING_SIZE; i++) {
1274 struct sk_buff *skb;
1275 skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD));
1276 /* XXX: do we really want to call this before the NULL check? --hch */
1277 rx_align(skb); /* Align IP on 16 byte boundary */
1278 sp->rx_skbuff[i] = skb;
1279 if (skb == NULL)
1280 break; /* OK. Just initially short of Rx bufs. */
1281 skb->dev = dev; /* Mark as being used by this device. */
1282 rxf = (struct RxFD *)skb->tail;
1283 sp->rx_ringp[i] = rxf;
1284 sp->rx_ring_dma[i] =
1285 pci_map_single(sp->pdev, rxf,
1286 PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_BIDIRECTIONAL);
1287 skb_reserve(skb, sizeof(struct RxFD));
1288 if (last_rxf) {
1289 last_rxf->link = cpu_to_le32(sp->rx_ring_dma[i]);
1290 pci_dma_sync_single(sp->pdev, last_rxf_dma,
1291 sizeof(struct RxFD), PCI_DMA_TODEVICE);
1293 last_rxf = rxf;
1294 last_rxf_dma = sp->rx_ring_dma[i];
1295 rxf->status = cpu_to_le32(0x00000001); /* '1' is flag value only. */
1296 rxf->link = 0; /* None yet. */
1297 /* This field unused by i82557. */
1298 rxf->rx_buf_addr = 0xffffffff;
1299 rxf->count = cpu_to_le32(PKT_BUF_SZ << 16);
1300 pci_dma_sync_single(sp->pdev, sp->rx_ring_dma[i],
1301 sizeof(struct RxFD), PCI_DMA_TODEVICE);
1303 sp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1304 /* Mark the last entry as end-of-list. */
1305 last_rxf->status = cpu_to_le32(0xC0000002); /* '2' is flag value only. */
1306 pci_dma_sync_single(sp->pdev, sp->rx_ring_dma[RX_RING_SIZE-1],
1307 sizeof(struct RxFD), PCI_DMA_TODEVICE);
1308 sp->last_rxf = last_rxf;
1309 sp->last_rxf_dma = last_rxf_dma;
1312 static void speedo_purge_tx(struct net_device *dev)
1314 struct speedo_private *sp = (struct speedo_private *)dev->priv;
1315 int entry;
1317 while ((int)(sp->cur_tx - sp->dirty_tx) > 0) {
1318 entry = sp->dirty_tx % TX_RING_SIZE;
1319 if (sp->tx_skbuff[entry]) {
1320 sp->stats.tx_errors++;
1321 pci_unmap_single(sp->pdev,
1322 le32_to_cpu(sp->tx_ring[entry].tx_buf_addr0),
1323 sp->tx_skbuff[entry]->len, PCI_DMA_TODEVICE);
1324 dev_kfree_skb_irq(sp->tx_skbuff[entry]);
1325 sp->tx_skbuff[entry] = 0;
1327 sp->dirty_tx++;
1329 while (sp->mc_setup_head != NULL) {
1330 struct speedo_mc_block *t;
1331 if (netif_msg_tx_err(sp))
1332 printk(KERN_DEBUG "%s: freeing mc frame.\n", dev->name);
1333 pci_unmap_single(sp->pdev, sp->mc_setup_head->frame_dma,
1334 sp->mc_setup_head->len, PCI_DMA_TODEVICE);
1335 t = sp->mc_setup_head->next;
1336 kfree(sp->mc_setup_head);
1337 sp->mc_setup_head = t;
1339 sp->mc_setup_tail = NULL;
1340 sp->tx_full = 0;
1341 netif_wake_queue(dev);
1344 static void reset_mii(struct net_device *dev)
1346 struct speedo_private *sp = (struct speedo_private *)dev->priv;
1348 /* Reset the MII transceiver, suggested by Fred Young @ scalable.com. */
1349 if ((sp->phy[0] & 0x8000) == 0) {
1350 int phy_addr = sp->phy[0] & 0x1f;
1351 int advertising = mdio_read(dev, phy_addr, MII_ADVERTISE);
1352 int mii_bmcr = mdio_read(dev, phy_addr, MII_BMCR);
1353 mdio_write(dev, phy_addr, MII_BMCR, 0x0400);
1354 mdio_write(dev, phy_addr, MII_BMSR, 0x0000);
1355 mdio_write(dev, phy_addr, MII_ADVERTISE, 0x0000);
1356 mdio_write(dev, phy_addr, MII_BMCR, 0x8000);
1357 #ifdef honor_default_port
1358 mdio_write(dev, phy_addr, MII_BMCR, mii_ctrl[dev->default_port & 7]);
1359 #else
1360 mdio_read(dev, phy_addr, MII_BMCR);
1361 mdio_write(dev, phy_addr, MII_BMCR, mii_bmcr);
1362 mdio_write(dev, phy_addr, MII_ADVERTISE, advertising);
1363 #endif
1367 static void speedo_tx_timeout(struct net_device *dev)
1369 struct speedo_private *sp = (struct speedo_private *)dev->priv;
1370 long ioaddr = dev->base_addr;
1371 int status = inw(ioaddr + SCBStatus);
1372 unsigned long flags;
1374 if (netif_msg_tx_err(sp)) {
1375 printk(KERN_WARNING "%s: Transmit timed out: status %4.4x "
1376 " %4.4x at %d/%d command %8.8x.\n",
1377 dev->name, status, inw(ioaddr + SCBCmd),
1378 sp->dirty_tx, sp->cur_tx,
1379 sp->tx_ring[sp->dirty_tx % TX_RING_SIZE].status);
1382 speedo_show_state(dev);
1383 #if 0
1384 if ((status & 0x00C0) != 0x0080
1385 && (status & 0x003C) == 0x0010) {
1386 /* Only the command unit has stopped. */
1387 printk(KERN_WARNING "%s: Trying to restart the transmitter...\n",
1388 dev->name);
1389 outl(TX_RING_ELEM_DMA(sp, dirty_tx % TX_RING_SIZE]),
1390 ioaddr + SCBPointer);
1391 outw(CUStart, ioaddr + SCBCmd);
1392 reset_mii(dev);
1393 } else {
1394 #else
1396 #endif
1397 del_timer_sync(&sp->timer);
1398 /* Reset the Tx and Rx units. */
1399 outl(PortReset, ioaddr + SCBPort);
1400 /* We may get spurious interrupts here. But I don't think that they
1401 may do much harm. 1999/12/09 SAW */
1402 udelay(10);
1403 /* Disable interrupts. */
1404 outw(SCBMaskAll, ioaddr + SCBCmd);
1405 synchronize_irq(dev->irq);
1406 speedo_tx_buffer_gc(dev);
1407 /* Free as much as possible.
1408 It helps to recover from a hang because of out-of-memory.
1409 It also simplifies speedo_resume() in case TX ring is full or
1410 close-to-be full. */
1411 speedo_purge_tx(dev);
1412 speedo_refill_rx_buffers(dev, 1);
1413 spin_lock_irqsave(&sp->lock, flags);
1414 speedo_resume(dev);
1415 sp->rx_mode = -1;
1416 dev->trans_start = jiffies;
1417 spin_unlock_irqrestore(&sp->lock, flags);
1418 set_rx_mode(dev); /* it takes the spinlock itself --SAW */
1419 /* Reset MII transceiver. Do it before starting the timer to serialize
1420 mdio_xxx operations. Yes, it's a paranoya :-) 2000/05/09 SAW */
1421 reset_mii(dev);
1422 sp->timer.expires = RUN_AT(2*HZ);
1423 add_timer(&sp->timer);
1425 return;
1428 static int
1429 speedo_start_xmit(struct sk_buff *skb, struct net_device *dev)
1431 struct speedo_private *sp = (struct speedo_private *)dev->priv;
1432 long ioaddr = dev->base_addr;
1433 int entry;
1435 /* Prevent interrupts from changing the Tx ring from underneath us. */
1436 unsigned long flags;
1438 spin_lock_irqsave(&sp->lock, flags);
1440 /* Check if there are enough space. */
1441 if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
1442 printk(KERN_ERR "%s: incorrect tbusy state, fixed.\n", dev->name);
1443 netif_stop_queue(dev);
1444 sp->tx_full = 1;
1445 spin_unlock_irqrestore(&sp->lock, flags);
1446 return 1;
1449 /* Calculate the Tx descriptor entry. */
1450 entry = sp->cur_tx++ % TX_RING_SIZE;
1452 sp->tx_skbuff[entry] = skb;
1453 sp->tx_ring[entry].status =
1454 cpu_to_le32(CmdSuspend | CmdTx | CmdTxFlex);
1455 if (!(entry & ((TX_RING_SIZE>>2)-1)))
1456 sp->tx_ring[entry].status |= cpu_to_le32(CmdIntr);
1457 sp->tx_ring[entry].link =
1458 cpu_to_le32(TX_RING_ELEM_DMA(sp, sp->cur_tx % TX_RING_SIZE));
1459 sp->tx_ring[entry].tx_desc_addr =
1460 cpu_to_le32(TX_RING_ELEM_DMA(sp, entry) + TX_DESCR_BUF_OFFSET);
1461 /* The data region is always in one buffer descriptor. */
1462 sp->tx_ring[entry].count = cpu_to_le32(sp->tx_threshold);
1463 sp->tx_ring[entry].tx_buf_addr0 =
1464 cpu_to_le32(pci_map_single(sp->pdev, skb->data,
1465 skb->len, PCI_DMA_TODEVICE));
1466 sp->tx_ring[entry].tx_buf_size0 = cpu_to_le32(skb->len);
1468 /* workaround for hardware bug on 10 mbit half duplex */
1470 if ((sp->partner == 0) && (sp->chip_id == 1)) {
1471 wait_for_cmd_done(dev);
1472 outb(0 , ioaddr + SCBCmd);
1473 udelay(1);
1476 /* Trigger the command unit resume. */
1477 wait_for_cmd_done(dev);
1478 clear_suspend(sp->last_cmd);
1479 /* We want the time window between clearing suspend flag on the previous
1480 command and resuming CU to be as small as possible.
1481 Interrupts in between are very undesired. --SAW */
1482 outb(CUResume, ioaddr + SCBCmd);
1483 sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
1485 /* Leave room for set_rx_mode(). If there is no more space than reserved
1486 for multicast filter mark the ring as full. */
1487 if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
1488 netif_stop_queue(dev);
1489 sp->tx_full = 1;
1492 spin_unlock_irqrestore(&sp->lock, flags);
1494 dev->trans_start = jiffies;
1496 return 0;
1499 static void speedo_tx_buffer_gc(struct net_device *dev)
1501 unsigned int dirty_tx;
1502 struct speedo_private *sp = (struct speedo_private *)dev->priv;
1504 dirty_tx = sp->dirty_tx;
1505 while ((int)(sp->cur_tx - dirty_tx) > 0) {
1506 int entry = dirty_tx % TX_RING_SIZE;
1507 int status = le32_to_cpu(sp->tx_ring[entry].status);
1509 if (netif_msg_tx_done(sp))
1510 printk(KERN_DEBUG " scavenge candidate %d status %4.4x.\n",
1511 entry, status);
1512 if ((status & StatusComplete) == 0)
1513 break; /* It still hasn't been processed. */
1514 if (status & TxUnderrun)
1515 if (sp->tx_threshold < 0x01e08000) {
1516 if (netif_msg_tx_err(sp))
1517 printk(KERN_DEBUG "%s: TX underrun, threshold adjusted.\n",
1518 dev->name);
1519 sp->tx_threshold += 0x00040000;
1521 /* Free the original skb. */
1522 if (sp->tx_skbuff[entry]) {
1523 sp->stats.tx_packets++; /* Count only user packets. */
1524 sp->stats.tx_bytes += sp->tx_skbuff[entry]->len;
1525 pci_unmap_single(sp->pdev,
1526 le32_to_cpu(sp->tx_ring[entry].tx_buf_addr0),
1527 sp->tx_skbuff[entry]->len, PCI_DMA_TODEVICE);
1528 dev_kfree_skb_irq(sp->tx_skbuff[entry]);
1529 sp->tx_skbuff[entry] = 0;
1531 dirty_tx++;
1534 if (netif_msg_tx_err(sp) && (int)(sp->cur_tx - dirty_tx) > TX_RING_SIZE) {
1535 printk(KERN_ERR "out-of-sync dirty pointer, %d vs. %d,"
1536 " full=%d.\n",
1537 dirty_tx, sp->cur_tx, sp->tx_full);
1538 dirty_tx += TX_RING_SIZE;
1541 while (sp->mc_setup_head != NULL
1542 && (int)(dirty_tx - sp->mc_setup_head->tx - 1) > 0) {
1543 struct speedo_mc_block *t;
1544 if (netif_msg_tx_err(sp))
1545 printk(KERN_DEBUG "%s: freeing mc frame.\n", dev->name);
1546 pci_unmap_single(sp->pdev, sp->mc_setup_head->frame_dma,
1547 sp->mc_setup_head->len, PCI_DMA_TODEVICE);
1548 t = sp->mc_setup_head->next;
1549 kfree(sp->mc_setup_head);
1550 sp->mc_setup_head = t;
1552 if (sp->mc_setup_head == NULL)
1553 sp->mc_setup_tail = NULL;
1555 sp->dirty_tx = dirty_tx;
1558 /* The interrupt handler does all of the Rx thread work and cleans up
1559 after the Tx thread. */
1560 static void speedo_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
1562 struct net_device *dev = (struct net_device *)dev_instance;
1563 struct speedo_private *sp;
1564 long ioaddr, boguscnt = max_interrupt_work;
1565 unsigned short status;
1567 ioaddr = dev->base_addr;
1568 sp = (struct speedo_private *)dev->priv;
1570 #ifndef final_version
1571 /* A lock to prevent simultaneous entry on SMP machines. */
1572 if (test_and_set_bit(0, (void*)&sp->in_interrupt)) {
1573 printk(KERN_ERR"%s: SMP simultaneous entry of an interrupt handler.\n",
1574 dev->name);
1575 sp->in_interrupt = 0; /* Avoid halting machine. */
1576 return;
1578 #endif
1580 do {
1581 status = inw(ioaddr + SCBStatus);
1582 /* Acknowledge all of the current interrupt sources ASAP. */
1583 /* Will change from 0xfc00 to 0xff00 when we start handling
1584 FCP and ER interrupts --Dragan */
1585 outw(status & 0xfc00, ioaddr + SCBStatus);
1587 if (netif_msg_intr(sp))
1588 printk(KERN_DEBUG "%s: interrupt status=%#4.4x.\n",
1589 dev->name, status);
1591 if ((status & 0xfc00) == 0)
1592 break;
1595 if ((status & 0x5000) || /* Packet received, or Rx error. */
1596 (sp->rx_ring_state&(RrNoMem|RrPostponed)) == RrPostponed)
1597 /* Need to gather the postponed packet. */
1598 speedo_rx(dev);
1600 /* Always check if all rx buffers are allocated. --SAW */
1601 speedo_refill_rx_buffers(dev, 0);
1603 spin_lock(&sp->lock);
1605 * The chip may have suspended reception for various reasons.
1606 * Check for that, and re-prime it should this be the case.
1608 switch ((status >> 2) & 0xf) {
1609 case 0: /* Idle */
1610 break;
1611 case 1: /* Suspended */
1612 case 2: /* No resources (RxFDs) */
1613 case 9: /* Suspended with no more RBDs */
1614 case 10: /* No resources due to no RBDs */
1615 case 12: /* Ready with no RBDs */
1616 speedo_rx_soft_reset(dev);
1617 break;
1618 case 3: case 5: case 6: case 7: case 8:
1619 case 11: case 13: case 14: case 15:
1620 /* these are all reserved values */
1621 break;
1625 /* User interrupt, Command/Tx unit interrupt or CU not active. */
1626 if (status & 0xA400) {
1627 speedo_tx_buffer_gc(dev);
1628 if (sp->tx_full
1629 && (int)(sp->cur_tx - sp->dirty_tx) < TX_QUEUE_UNFULL) {
1630 /* The ring is no longer full. */
1631 sp->tx_full = 0;
1632 netif_wake_queue(dev); /* Attention: under a spinlock. --SAW */
1636 spin_unlock(&sp->lock);
1638 if (--boguscnt < 0) {
1639 printk(KERN_ERR "%s: Too much work at interrupt, status=0x%4.4x.\n",
1640 dev->name, status);
1641 /* Clear all interrupt sources. */
1642 /* Will change from 0xfc00 to 0xff00 when we start handling
1643 FCP and ER interrupts --Dragan */
1644 outw(0xfc00, ioaddr + SCBStatus);
1645 break;
1647 } while (1);
1649 if (netif_msg_intr(sp))
1650 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1651 dev->name, inw(ioaddr + SCBStatus));
1653 clear_bit(0, (void*)&sp->in_interrupt);
1654 return;
1657 static inline struct RxFD *speedo_rx_alloc(struct net_device *dev, int entry)
1659 struct speedo_private *sp = (struct speedo_private *)dev->priv;
1660 struct RxFD *rxf;
1661 struct sk_buff *skb;
1662 /* Get a fresh skbuff to replace the consumed one. */
1663 skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD));
1664 /* XXX: do we really want to call this before the NULL check? --hch */
1665 rx_align(skb); /* Align IP on 16 byte boundary */
1666 sp->rx_skbuff[entry] = skb;
1667 if (skb == NULL) {
1668 sp->rx_ringp[entry] = NULL;
1669 return NULL;
1671 rxf = sp->rx_ringp[entry] = (struct RxFD *)skb->tail;
1672 sp->rx_ring_dma[entry] =
1673 pci_map_single(sp->pdev, rxf,
1674 PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_FROMDEVICE);
1675 skb->dev = dev;
1676 skb_reserve(skb, sizeof(struct RxFD));
1677 rxf->rx_buf_addr = 0xffffffff;
1678 pci_dma_sync_single(sp->pdev, sp->rx_ring_dma[entry],
1679 sizeof(struct RxFD), PCI_DMA_TODEVICE);
1680 return rxf;
1683 static inline void speedo_rx_link(struct net_device *dev, int entry,
1684 struct RxFD *rxf, dma_addr_t rxf_dma)
1686 struct speedo_private *sp = (struct speedo_private *)dev->priv;
1687 rxf->status = cpu_to_le32(0xC0000001); /* '1' for driver use only. */
1688 rxf->link = 0; /* None yet. */
1689 rxf->count = cpu_to_le32(PKT_BUF_SZ << 16);
1690 sp->last_rxf->link = cpu_to_le32(rxf_dma);
1691 sp->last_rxf->status &= cpu_to_le32(~0xC0000000);
1692 pci_dma_sync_single(sp->pdev, sp->last_rxf_dma,
1693 sizeof(struct RxFD), PCI_DMA_TODEVICE);
1694 sp->last_rxf = rxf;
1695 sp->last_rxf_dma = rxf_dma;
1698 static int speedo_refill_rx_buf(struct net_device *dev, int force)
1700 struct speedo_private *sp = (struct speedo_private *)dev->priv;
1701 int entry;
1702 struct RxFD *rxf;
1704 entry = sp->dirty_rx % RX_RING_SIZE;
1705 if (sp->rx_skbuff[entry] == NULL) {
1706 rxf = speedo_rx_alloc(dev, entry);
1707 if (rxf == NULL) {
1708 unsigned int forw;
1709 int forw_entry;
1710 if (netif_msg_rx_err(sp) || !(sp->rx_ring_state & RrOOMReported)) {
1711 printk(KERN_WARNING "%s: can't fill rx buffer (force %d)!\n",
1712 dev->name, force);
1713 sp->rx_ring_state |= RrOOMReported;
1715 speedo_show_state(dev);
1716 if (!force)
1717 return -1; /* Better luck next time! */
1718 /* Borrow an skb from one of next entries. */
1719 for (forw = sp->dirty_rx + 1; forw != sp->cur_rx; forw++)
1720 if (sp->rx_skbuff[forw % RX_RING_SIZE] != NULL)
1721 break;
1722 if (forw == sp->cur_rx)
1723 return -1;
1724 forw_entry = forw % RX_RING_SIZE;
1725 sp->rx_skbuff[entry] = sp->rx_skbuff[forw_entry];
1726 sp->rx_skbuff[forw_entry] = NULL;
1727 rxf = sp->rx_ringp[forw_entry];
1728 sp->rx_ringp[forw_entry] = NULL;
1729 sp->rx_ringp[entry] = rxf;
1731 } else {
1732 rxf = sp->rx_ringp[entry];
1734 speedo_rx_link(dev, entry, rxf, sp->rx_ring_dma[entry]);
1735 sp->dirty_rx++;
1736 sp->rx_ring_state &= ~(RrNoMem|RrOOMReported); /* Mark the progress. */
1737 return 0;
1740 static void speedo_refill_rx_buffers(struct net_device *dev, int force)
1742 struct speedo_private *sp = (struct speedo_private *)dev->priv;
1744 /* Refill the RX ring. */
1745 while ((int)(sp->cur_rx - sp->dirty_rx) > 0 &&
1746 speedo_refill_rx_buf(dev, force) != -1);
1749 static int
1750 speedo_rx(struct net_device *dev)
1752 struct speedo_private *sp = (struct speedo_private *)dev->priv;
1753 int entry = sp->cur_rx % RX_RING_SIZE;
1754 int rx_work_limit = sp->dirty_rx + RX_RING_SIZE - sp->cur_rx;
1755 int alloc_ok = 1;
1756 int npkts = 0;
1758 if (netif_msg_intr(sp))
1759 printk(KERN_DEBUG " In speedo_rx().\n");
1760 /* If we own the next entry, it's a new packet. Send it up. */
1761 while (sp->rx_ringp[entry] != NULL) {
1762 int status;
1763 int pkt_len;
1765 pci_dma_sync_single(sp->pdev, sp->rx_ring_dma[entry],
1766 sizeof(struct RxFD), PCI_DMA_FROMDEVICE);
1767 status = le32_to_cpu(sp->rx_ringp[entry]->status);
1768 pkt_len = le32_to_cpu(sp->rx_ringp[entry]->count) & 0x3fff;
1770 if (!(status & RxComplete))
1771 break;
1773 if (--rx_work_limit < 0)
1774 break;
1776 /* Check for a rare out-of-memory case: the current buffer is
1777 the last buffer allocated in the RX ring. --SAW */
1778 if (sp->last_rxf == sp->rx_ringp[entry]) {
1779 /* Postpone the packet. It'll be reaped at an interrupt when this
1780 packet is no longer the last packet in the ring. */
1781 if (netif_msg_rx_err(sp))
1782 printk(KERN_DEBUG "%s: RX packet postponed!\n",
1783 dev->name);
1784 sp->rx_ring_state |= RrPostponed;
1785 break;
1788 if (netif_msg_rx_status(sp))
1789 printk(KERN_DEBUG " speedo_rx() status %8.8x len %d.\n", status,
1790 pkt_len);
1791 if ((status & (RxErrTooBig|RxOK|0x0f90)) != RxOK) {
1792 if (status & RxErrTooBig)
1793 printk(KERN_ERR "%s: Ethernet frame overran the Rx buffer, "
1794 "status %8.8x!\n", dev->name, status);
1795 else if (! (status & RxOK)) {
1796 /* There was a fatal error. This *should* be impossible. */
1797 sp->stats.rx_errors++;
1798 printk(KERN_ERR "%s: Anomalous event in speedo_rx(), "
1799 "status %8.8x.\n",
1800 dev->name, status);
1802 } else {
1803 struct sk_buff *skb;
1805 /* Check if the packet is long enough to just accept without
1806 copying to a properly sized skbuff. */
1807 if (pkt_len < rx_copybreak
1808 && (skb = dev_alloc_skb(pkt_len + 2)) != 0) {
1809 skb->dev = dev;
1810 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1811 /* 'skb_put()' points to the start of sk_buff data area. */
1812 pci_dma_sync_single(sp->pdev, sp->rx_ring_dma[entry],
1813 sizeof(struct RxFD) + pkt_len, PCI_DMA_FROMDEVICE);
1815 #if 1 || USE_IP_CSUM
1816 /* Packet is in one chunk -- we can copy + cksum. */
1817 eth_copy_and_sum(skb, sp->rx_skbuff[entry]->tail, pkt_len, 0);
1818 skb_put(skb, pkt_len);
1819 #else
1820 memcpy(skb_put(skb, pkt_len), sp->rx_skbuff[entry]->tail,
1821 pkt_len);
1822 #endif
1823 npkts++;
1824 } else {
1825 /* Pass up the already-filled skbuff. */
1826 skb = sp->rx_skbuff[entry];
1827 if (skb == NULL) {
1828 printk(KERN_ERR "%s: Inconsistent Rx descriptor chain.\n",
1829 dev->name);
1830 break;
1832 sp->rx_skbuff[entry] = NULL;
1833 skb_put(skb, pkt_len);
1834 npkts++;
1835 sp->rx_ringp[entry] = NULL;
1836 pci_unmap_single(sp->pdev, sp->rx_ring_dma[entry],
1837 PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_FROMDEVICE);
1839 skb->protocol = eth_type_trans(skb, dev);
1840 netif_rx(skb);
1841 dev->last_rx = jiffies;
1842 sp->stats.rx_packets++;
1843 sp->stats.rx_bytes += pkt_len;
1845 entry = (++sp->cur_rx) % RX_RING_SIZE;
1846 sp->rx_ring_state &= ~RrPostponed;
1847 /* Refill the recently taken buffers.
1848 Do it one-by-one to handle traffic bursts better. */
1849 if (alloc_ok && speedo_refill_rx_buf(dev, 0) == -1)
1850 alloc_ok = 0;
1853 /* Try hard to refill the recently taken buffers. */
1854 speedo_refill_rx_buffers(dev, 1);
1856 if (npkts)
1857 sp->last_rx_time = jiffies;
1859 return 0;
1862 static int
1863 speedo_close(struct net_device *dev)
1865 long ioaddr = dev->base_addr;
1866 struct speedo_private *sp = (struct speedo_private *)dev->priv;
1867 int i;
1869 netdevice_stop(dev);
1870 netif_stop_queue(dev);
1872 if (netif_msg_ifdown(sp))
1873 printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x.\n",
1874 dev->name, inw(ioaddr + SCBStatus));
1876 /* Shut off the media monitoring timer. */
1877 del_timer_sync(&sp->timer);
1879 outw(SCBMaskAll, ioaddr + SCBCmd);
1881 /* Shutting down the chip nicely fails to disable flow control. So.. */
1882 outl(PortPartialReset, ioaddr + SCBPort);
1883 inl(ioaddr + SCBPort); /* flush posted write */
1885 * The chip requires a 10 microsecond quiet period. Wait here!
1887 udelay(10);
1889 free_irq(dev->irq, dev);
1890 speedo_show_state(dev);
1892 /* Free all the skbuffs in the Rx and Tx queues. */
1893 for (i = 0; i < RX_RING_SIZE; i++) {
1894 struct sk_buff *skb = sp->rx_skbuff[i];
1895 sp->rx_skbuff[i] = 0;
1896 /* Clear the Rx descriptors. */
1897 if (skb) {
1898 pci_unmap_single(sp->pdev,
1899 sp->rx_ring_dma[i],
1900 PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_FROMDEVICE);
1901 dev_kfree_skb(skb);
1905 for (i = 0; i < TX_RING_SIZE; i++) {
1906 struct sk_buff *skb = sp->tx_skbuff[i];
1907 sp->tx_skbuff[i] = 0;
1908 /* Clear the Tx descriptors. */
1909 if (skb) {
1910 pci_unmap_single(sp->pdev,
1911 le32_to_cpu(sp->tx_ring[i].tx_buf_addr0),
1912 skb->len, PCI_DMA_TODEVICE);
1913 dev_kfree_skb(skb);
1917 /* Free multicast setting blocks. */
1918 for (i = 0; sp->mc_setup_head != NULL; i++) {
1919 struct speedo_mc_block *t;
1920 t = sp->mc_setup_head->next;
1921 kfree(sp->mc_setup_head);
1922 sp->mc_setup_head = t;
1924 sp->mc_setup_tail = NULL;
1925 if (netif_msg_ifdown(sp))
1926 printk(KERN_DEBUG "%s: %d multicast blocks dropped.\n", dev->name, i);
1928 pci_set_power_state(sp->pdev, 2);
1930 return 0;
1933 /* The Speedo-3 has an especially awkward and unusable method of getting
1934 statistics out of the chip. It takes an unpredictable length of time
1935 for the dump-stats command to complete. To avoid a busy-wait loop we
1936 update the stats with the previous dump results, and then trigger a
1937 new dump.
1939 Oh, and incoming frames are dropped while executing dump-stats!
1941 static struct net_device_stats *
1942 speedo_get_stats(struct net_device *dev)
1944 struct speedo_private *sp = (struct speedo_private *)dev->priv;
1945 long ioaddr = dev->base_addr;
1947 /* Update only if the previous dump finished. */
1948 if (sp->lstats->done_marker == le32_to_cpu(0xA007)) {
1949 sp->stats.tx_aborted_errors += le32_to_cpu(sp->lstats->tx_coll16_errs);
1950 sp->stats.tx_window_errors += le32_to_cpu(sp->lstats->tx_late_colls);
1951 sp->stats.tx_fifo_errors += le32_to_cpu(sp->lstats->tx_underruns);
1952 sp->stats.tx_fifo_errors += le32_to_cpu(sp->lstats->tx_lost_carrier);
1953 /*sp->stats.tx_deferred += le32_to_cpu(sp->lstats->tx_deferred);*/
1954 sp->stats.collisions += le32_to_cpu(sp->lstats->tx_total_colls);
1955 sp->stats.rx_crc_errors += le32_to_cpu(sp->lstats->rx_crc_errs);
1956 sp->stats.rx_frame_errors += le32_to_cpu(sp->lstats->rx_align_errs);
1957 sp->stats.rx_over_errors += le32_to_cpu(sp->lstats->rx_resource_errs);
1958 sp->stats.rx_fifo_errors += le32_to_cpu(sp->lstats->rx_overrun_errs);
1959 sp->stats.rx_length_errors += le32_to_cpu(sp->lstats->rx_runt_errs);
1960 sp->lstats->done_marker = 0x0000;
1961 if (netif_running(dev)) {
1962 unsigned long flags;
1963 /* Take a spinlock to make wait_for_cmd_done and sending the
1964 command atomic. --SAW */
1965 spin_lock_irqsave(&sp->lock, flags);
1966 wait_for_cmd_done(dev);
1967 outb(CUDumpStats, ioaddr + SCBCmd);
1968 spin_unlock_irqrestore(&sp->lock, flags);
1971 return &sp->stats;
1974 static int netdev_ethtool_ioctl(struct net_device *dev, void *useraddr)
1976 u32 ethcmd;
1977 struct speedo_private *sp = dev->priv;
1979 if (copy_from_user(&ethcmd, useraddr, sizeof(ethcmd)))
1980 return -EFAULT;
1982 switch (ethcmd) {
1983 /* get driver-specific version/etc. info */
1984 case ETHTOOL_GDRVINFO: {
1985 struct ethtool_drvinfo info = {ETHTOOL_GDRVINFO};
1986 strncpy(info.driver, "eepro100", sizeof(info.driver)-1);
1987 strncpy(info.version, version, sizeof(info.version)-1);
1988 if (sp && sp->pdev)
1989 strcpy(info.bus_info, sp->pdev->slot_name);
1990 if (copy_to_user(useraddr, &info, sizeof(info)))
1991 return -EFAULT;
1992 return 0;
1995 /* get settings */
1996 case ETHTOOL_GSET: {
1997 struct ethtool_cmd ecmd = { ETHTOOL_GSET };
1998 spin_lock_irq(&sp->lock);
1999 mii_ethtool_gset(&sp->mii_if, &ecmd);
2000 spin_unlock_irq(&sp->lock);
2001 if (copy_to_user(useraddr, &ecmd, sizeof(ecmd)))
2002 return -EFAULT;
2003 return 0;
2005 /* set settings */
2006 case ETHTOOL_SSET: {
2007 int r;
2008 struct ethtool_cmd ecmd;
2009 if (copy_from_user(&ecmd, useraddr, sizeof(ecmd)))
2010 return -EFAULT;
2011 spin_lock_irq(&sp->lock);
2012 r = mii_ethtool_sset(&sp->mii_if, &ecmd);
2013 spin_unlock_irq(&sp->lock);
2014 return r;
2016 /* restart autonegotiation */
2017 case ETHTOOL_NWAY_RST: {
2018 return mii_nway_restart(&sp->mii_if);
2020 /* get link status */
2021 case ETHTOOL_GLINK: {
2022 struct ethtool_value edata = {ETHTOOL_GLINK};
2023 edata.data = mii_link_ok(&sp->mii_if);
2024 if (copy_to_user(useraddr, &edata, sizeof(edata)))
2025 return -EFAULT;
2026 return 0;
2028 /* get message-level */
2029 case ETHTOOL_GMSGLVL: {
2030 struct ethtool_value edata = {ETHTOOL_GMSGLVL};
2031 edata.data = sp->msg_enable;
2032 if (copy_to_user(useraddr, &edata, sizeof(edata)))
2033 return -EFAULT;
2034 return 0;
2036 /* set message-level */
2037 case ETHTOOL_SMSGLVL: {
2038 struct ethtool_value edata;
2039 if (copy_from_user(&edata, useraddr, sizeof(edata)))
2040 return -EFAULT;
2041 sp->msg_enable = edata.data;
2042 return 0;
2047 return -EOPNOTSUPP;
2050 static int speedo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2052 struct speedo_private *sp = (struct speedo_private *)dev->priv;
2053 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&rq->ifr_data;
2054 int phy = sp->phy[0] & 0x1f;
2055 int saved_acpi;
2056 int t;
2058 switch(cmd) {
2059 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
2060 data->phy_id = phy;
2062 case SIOCGMIIREG: /* Read MII PHY register. */
2063 /* FIXME: these operations need to be serialized with MDIO
2064 access from the timeout handler.
2065 They are currently serialized only with MDIO access from the
2066 timer routine. 2000/05/09 SAW */
2067 saved_acpi = pci_set_power_state(sp->pdev, 0);
2068 t = del_timer_sync(&sp->timer);
2069 data->val_out = mdio_read(dev, data->phy_id & 0x1f, data->reg_num & 0x1f);
2070 if (t)
2071 add_timer(&sp->timer); /* may be set to the past --SAW */
2072 pci_set_power_state(sp->pdev, saved_acpi);
2073 return 0;
2075 case SIOCSMIIREG: /* Write MII PHY register. */
2076 if (!capable(CAP_NET_ADMIN))
2077 return -EPERM;
2078 saved_acpi = pci_set_power_state(sp->pdev, 0);
2079 t = del_timer_sync(&sp->timer);
2080 mdio_write(dev, data->phy_id, data->reg_num, data->val_in);
2081 if (t)
2082 add_timer(&sp->timer); /* may be set to the past --SAW */
2083 pci_set_power_state(sp->pdev, saved_acpi);
2084 return 0;
2085 case SIOCETHTOOL:
2086 return netdev_ethtool_ioctl(dev, (void *) rq->ifr_data);
2087 default:
2088 return -EOPNOTSUPP;
2092 /* Set or clear the multicast filter for this adaptor.
2093 This is very ugly with Intel chips -- we usually have to execute an
2094 entire configuration command, plus process a multicast command.
2095 This is complicated. We must put a large configuration command and
2096 an arbitrarily-sized multicast command in the transmit list.
2097 To minimize the disruption -- the previous command might have already
2098 loaded the link -- we convert the current command block, normally a Tx
2099 command, into a no-op and link it to the new command.
2101 static void set_rx_mode(struct net_device *dev)
2103 struct speedo_private *sp = (struct speedo_private *)dev->priv;
2104 long ioaddr = dev->base_addr;
2105 struct descriptor *last_cmd;
2106 char new_rx_mode;
2107 unsigned long flags;
2108 int entry, i;
2110 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
2111 new_rx_mode = 3;
2112 } else if ((dev->flags & IFF_ALLMULTI) ||
2113 dev->mc_count > multicast_filter_limit) {
2114 new_rx_mode = 1;
2115 } else
2116 new_rx_mode = 0;
2118 if (netif_msg_rx_status(sp))
2119 printk(KERN_DEBUG "%s: set_rx_mode %d -> %d\n", dev->name,
2120 sp->rx_mode, new_rx_mode);
2122 if ((int)(sp->cur_tx - sp->dirty_tx) > TX_RING_SIZE - TX_MULTICAST_SIZE) {
2123 /* The Tx ring is full -- don't add anything! Hope the mode will be
2124 * set again later. */
2125 sp->rx_mode = -1;
2126 return;
2129 if (new_rx_mode != sp->rx_mode) {
2130 u8 *config_cmd_data;
2132 spin_lock_irqsave(&sp->lock, flags);
2133 entry = sp->cur_tx++ % TX_RING_SIZE;
2134 last_cmd = sp->last_cmd;
2135 sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
2137 sp->tx_skbuff[entry] = 0; /* Redundant. */
2138 sp->tx_ring[entry].status = cpu_to_le32(CmdSuspend | CmdConfigure);
2139 sp->tx_ring[entry].link =
2140 cpu_to_le32(TX_RING_ELEM_DMA(sp, (entry + 1) % TX_RING_SIZE));
2141 config_cmd_data = (void *)&sp->tx_ring[entry].tx_desc_addr;
2142 /* Construct a full CmdConfig frame. */
2143 memcpy(config_cmd_data, i82558_config_cmd, CONFIG_DATA_SIZE);
2144 config_cmd_data[1] = (txfifo << 4) | rxfifo;
2145 config_cmd_data[4] = rxdmacount;
2146 config_cmd_data[5] = txdmacount + 0x80;
2147 config_cmd_data[15] |= (new_rx_mode & 2) ? 1 : 0;
2148 /* 0x80 doesn't disable FC 0x84 does.
2149 Disable Flow control since we are not ACK-ing any FC interrupts
2150 for now. --Dragan */
2151 config_cmd_data[19] = 0x84;
2152 config_cmd_data[19] |= sp->mii_if.full_duplex ? 0x40 : 0;
2153 config_cmd_data[21] = (new_rx_mode & 1) ? 0x0D : 0x05;
2154 if (sp->phy[0] & 0x8000) { /* Use the AUI port instead. */
2155 config_cmd_data[15] |= 0x80;
2156 config_cmd_data[8] = 0;
2158 /* Trigger the command unit resume. */
2159 wait_for_cmd_done(dev);
2160 clear_suspend(last_cmd);
2161 outb(CUResume, ioaddr + SCBCmd);
2162 if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
2163 netif_stop_queue(dev);
2164 sp->tx_full = 1;
2166 spin_unlock_irqrestore(&sp->lock, flags);
2169 if (new_rx_mode == 0 && dev->mc_count < 4) {
2170 /* The simple case of 0-3 multicast list entries occurs often, and
2171 fits within one tx_ring[] entry. */
2172 struct dev_mc_list *mclist;
2173 u16 *setup_params, *eaddrs;
2175 spin_lock_irqsave(&sp->lock, flags);
2176 entry = sp->cur_tx++ % TX_RING_SIZE;
2177 last_cmd = sp->last_cmd;
2178 sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
2180 sp->tx_skbuff[entry] = 0;
2181 sp->tx_ring[entry].status = cpu_to_le32(CmdSuspend | CmdMulticastList);
2182 sp->tx_ring[entry].link =
2183 cpu_to_le32(TX_RING_ELEM_DMA(sp, (entry + 1) % TX_RING_SIZE));
2184 sp->tx_ring[entry].tx_desc_addr = 0; /* Really MC list count. */
2185 setup_params = (u16 *)&sp->tx_ring[entry].tx_desc_addr;
2186 *setup_params++ = cpu_to_le16(dev->mc_count*6);
2187 /* Fill in the multicast addresses. */
2188 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
2189 i++, mclist = mclist->next) {
2190 eaddrs = (u16 *)mclist->dmi_addr;
2191 *setup_params++ = *eaddrs++;
2192 *setup_params++ = *eaddrs++;
2193 *setup_params++ = *eaddrs++;
2196 wait_for_cmd_done(dev);
2197 clear_suspend(last_cmd);
2198 /* Immediately trigger the command unit resume. */
2199 outb(CUResume, ioaddr + SCBCmd);
2201 if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
2202 netif_stop_queue(dev);
2203 sp->tx_full = 1;
2205 spin_unlock_irqrestore(&sp->lock, flags);
2206 } else if (new_rx_mode == 0) {
2207 struct dev_mc_list *mclist;
2208 u16 *setup_params, *eaddrs;
2209 struct speedo_mc_block *mc_blk;
2210 struct descriptor *mc_setup_frm;
2211 int i;
2213 mc_blk = kmalloc(sizeof(*mc_blk) + 2 + multicast_filter_limit*6,
2214 GFP_ATOMIC);
2215 if (mc_blk == NULL) {
2216 printk(KERN_ERR "%s: Failed to allocate a setup frame.\n",
2217 dev->name);
2218 sp->rx_mode = -1; /* We failed, try again. */
2219 return;
2221 mc_blk->next = NULL;
2222 mc_blk->len = 2 + multicast_filter_limit*6;
2223 mc_blk->frame_dma =
2224 pci_map_single(sp->pdev, &mc_blk->frame, mc_blk->len,
2225 PCI_DMA_TODEVICE);
2226 mc_setup_frm = &mc_blk->frame;
2228 /* Fill the setup frame. */
2229 if (netif_msg_ifup(sp))
2230 printk(KERN_DEBUG "%s: Constructing a setup frame at %p.\n",
2231 dev->name, mc_setup_frm);
2232 mc_setup_frm->cmd_status =
2233 cpu_to_le32(CmdSuspend | CmdIntr | CmdMulticastList);
2234 /* Link set below. */
2235 setup_params = (u16 *)&mc_setup_frm->params;
2236 *setup_params++ = cpu_to_le16(dev->mc_count*6);
2237 /* Fill in the multicast addresses. */
2238 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
2239 i++, mclist = mclist->next) {
2240 eaddrs = (u16 *)mclist->dmi_addr;
2241 *setup_params++ = *eaddrs++;
2242 *setup_params++ = *eaddrs++;
2243 *setup_params++ = *eaddrs++;
2246 /* Disable interrupts while playing with the Tx Cmd list. */
2247 spin_lock_irqsave(&sp->lock, flags);
2249 if (sp->mc_setup_tail)
2250 sp->mc_setup_tail->next = mc_blk;
2251 else
2252 sp->mc_setup_head = mc_blk;
2253 sp->mc_setup_tail = mc_blk;
2254 mc_blk->tx = sp->cur_tx;
2256 entry = sp->cur_tx++ % TX_RING_SIZE;
2257 last_cmd = sp->last_cmd;
2258 sp->last_cmd = mc_setup_frm;
2260 /* Change the command to a NoOp, pointing to the CmdMulti command. */
2261 sp->tx_skbuff[entry] = 0;
2262 sp->tx_ring[entry].status = cpu_to_le32(CmdNOp);
2263 sp->tx_ring[entry].link = cpu_to_le32(mc_blk->frame_dma);
2265 /* Set the link in the setup frame. */
2266 mc_setup_frm->link =
2267 cpu_to_le32(TX_RING_ELEM_DMA(sp, (entry + 1) % TX_RING_SIZE));
2269 pci_dma_sync_single(sp->pdev, mc_blk->frame_dma,
2270 mc_blk->len, PCI_DMA_TODEVICE);
2272 wait_for_cmd_done(dev);
2273 clear_suspend(last_cmd);
2274 /* Immediately trigger the command unit resume. */
2275 outb(CUResume, ioaddr + SCBCmd);
2277 if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
2278 netif_stop_queue(dev);
2279 sp->tx_full = 1;
2281 spin_unlock_irqrestore(&sp->lock, flags);
2283 if (netif_msg_rx_status(sp))
2284 printk(" CmdMCSetup frame length %d in entry %d.\n",
2285 dev->mc_count, entry);
2288 sp->rx_mode = new_rx_mode;
2291 #ifdef CONFIG_PM
2292 static int eepro100_suspend(struct pci_dev *pdev, u32 state)
2294 struct net_device *dev = pci_get_drvdata (pdev);
2295 struct speedo_private *sp = (struct speedo_private *)dev->priv;
2296 long ioaddr = dev->base_addr;
2298 pci_save_state(pdev, sp->pm_state);
2300 if (!netif_running(dev))
2301 return 0;
2303 del_timer_sync(&sp->timer);
2305 netif_device_detach(dev);
2306 outl(PortPartialReset, ioaddr + SCBPort);
2308 /* XXX call pci_set_power_state ()? */
2309 return 0;
2312 static int eepro100_resume(struct pci_dev *pdev)
2314 struct net_device *dev = pci_get_drvdata (pdev);
2315 struct speedo_private *sp = (struct speedo_private *)dev->priv;
2316 long ioaddr = dev->base_addr;
2318 pci_restore_state(pdev, sp->pm_state);
2320 if (!netif_running(dev))
2321 return 0;
2323 /* I'm absolutely uncertain if this part of code may work.
2324 The problems are:
2325 - correct hardware reinitialization;
2326 - correct driver behavior between different steps of the
2327 reinitialization;
2328 - serialization with other driver calls.
2329 2000/03/08 SAW */
2330 outw(SCBMaskAll, ioaddr + SCBCmd);
2331 speedo_resume(dev);
2332 netif_device_attach(dev);
2333 sp->rx_mode = -1;
2334 sp->flow_ctrl = sp->partner = 0;
2335 set_rx_mode(dev);
2336 sp->timer.expires = RUN_AT(2*HZ);
2337 add_timer(&sp->timer);
2338 return 0;
2340 #endif /* CONFIG_PM */
2342 static void __devexit eepro100_remove_one (struct pci_dev *pdev)
2344 struct net_device *dev = pci_get_drvdata (pdev);
2345 struct speedo_private *sp = (struct speedo_private *)dev->priv;
2347 unregister_netdev(dev);
2349 release_region(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
2350 release_mem_region(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
2352 #ifndef USE_IO
2353 iounmap((char *)dev->base_addr);
2354 #endif
2356 pci_free_consistent(pdev, TX_RING_SIZE * sizeof(struct TxFD)
2357 + sizeof(struct speedo_stats),
2358 sp->tx_ring, sp->tx_ring_dma);
2359 pci_disable_device(pdev);
2360 kfree(dev);
2363 static struct pci_device_id eepro100_pci_tbl[] __devinitdata = {
2364 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82557,
2365 PCI_ANY_ID, PCI_ANY_ID, },
2366 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82559ER,
2367 PCI_ANY_ID, PCI_ANY_ID, },
2368 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_7,
2369 PCI_ANY_ID, PCI_ANY_ID, },
2370 { PCI_VENDOR_ID_INTEL, 0x1029, PCI_ANY_ID, PCI_ANY_ID, },
2371 { PCI_VENDOR_ID_INTEL, 0x1030, PCI_ANY_ID, PCI_ANY_ID, },
2372 { PCI_VENDOR_ID_INTEL, 0x1031, PCI_ANY_ID, PCI_ANY_ID, },
2373 { PCI_VENDOR_ID_INTEL, 0x1032, PCI_ANY_ID, PCI_ANY_ID, },
2374 { PCI_VENDOR_ID_INTEL, 0x1033, PCI_ANY_ID, PCI_ANY_ID, },
2375 { PCI_VENDOR_ID_INTEL, 0x1034, PCI_ANY_ID, PCI_ANY_ID, },
2376 { PCI_VENDOR_ID_INTEL, 0x1035, PCI_ANY_ID, PCI_ANY_ID, },
2377 { PCI_VENDOR_ID_INTEL, 0x1036, PCI_ANY_ID, PCI_ANY_ID, },
2378 { PCI_VENDOR_ID_INTEL, 0x1037, PCI_ANY_ID, PCI_ANY_ID, },
2379 { PCI_VENDOR_ID_INTEL, 0x1038, PCI_ANY_ID, PCI_ANY_ID, },
2380 { PCI_VENDOR_ID_INTEL, 0x1039, PCI_ANY_ID, PCI_ANY_ID, },
2381 { PCI_VENDOR_ID_INTEL, 0x103A, PCI_ANY_ID, PCI_ANY_ID, },
2382 { PCI_VENDOR_ID_INTEL, 0x103B, PCI_ANY_ID, PCI_ANY_ID, },
2383 { PCI_VENDOR_ID_INTEL, 0x103C, PCI_ANY_ID, PCI_ANY_ID, },
2384 { PCI_VENDOR_ID_INTEL, 0x103D, PCI_ANY_ID, PCI_ANY_ID, },
2385 { PCI_VENDOR_ID_INTEL, 0x103E, PCI_ANY_ID, PCI_ANY_ID, },
2386 { PCI_VENDOR_ID_INTEL, 0x1227, PCI_ANY_ID, PCI_ANY_ID, },
2387 { PCI_VENDOR_ID_INTEL, 0x1228, PCI_ANY_ID, PCI_ANY_ID, },
2388 { PCI_VENDOR_ID_INTEL, 0x2449, PCI_ANY_ID, PCI_ANY_ID, },
2389 { PCI_VENDOR_ID_INTEL, 0x2459, PCI_ANY_ID, PCI_ANY_ID, },
2390 { PCI_VENDOR_ID_INTEL, 0x245D, PCI_ANY_ID, PCI_ANY_ID, },
2391 { PCI_VENDOR_ID_INTEL, 0x5200, PCI_ANY_ID, PCI_ANY_ID, },
2392 { PCI_VENDOR_ID_INTEL, 0x5201, PCI_ANY_ID, PCI_ANY_ID, },
2393 { 0,}
2395 MODULE_DEVICE_TABLE(pci, eepro100_pci_tbl);
2397 static struct pci_driver eepro100_driver = {
2398 .name = "eepro100",
2399 .id_table = eepro100_pci_tbl,
2400 .probe = eepro100_init_one,
2401 .remove = __devexit_p(eepro100_remove_one),
2402 #ifdef CONFIG_PM
2403 .suspend = eepro100_suspend,
2404 .resume = eepro100_resume,
2405 #endif /* CONFIG_PM */
2408 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,48)
2409 static int pci_module_init(struct pci_driver *pdev)
2411 int rc;
2413 rc = pci_register_driver(pdev);
2414 if (rc <= 0) {
2415 printk(KERN_INFO "%s: No cards found, driver not installed.\n",
2416 pdev->name);
2417 pci_unregister_driver(pdev);
2418 return -ENODEV;
2420 return 0;
2422 #endif
2424 static int __init eepro100_init_module(void)
2426 #ifdef MODULE
2427 printk(version);
2428 #endif
2429 return pci_module_init(&eepro100_driver);
2432 static void __exit eepro100_cleanup_module(void)
2434 pci_unregister_driver(&eepro100_driver);
2437 module_init(eepro100_init_module);
2438 module_exit(eepro100_cleanup_module);
2441 * Local variables:
2442 * compile-command: "gcc -DMODULE -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -c eepro100.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS`"
2443 * c-indent-level: 4
2444 * c-basic-offset: 4
2445 * tab-width: 4
2446 * End: