More meth updates.
[linux-2.6/linux-mips.git] / drivers / net / lance.c
blob56cc884228a816aeb589d5c60e4a99004238d944
1 /* lance.c: An AMD LANCE/PCnet ethernet driver for Linux. */
2 /*
3 Written/copyright 1993-1998 by Donald Becker.
5 Copyright 1993 United States Government as represented by the
6 Director, National Security Agency.
7 This software may be used and distributed according to the terms
8 of the GNU General Public License, incorporated herein by reference.
10 This driver is for the Allied Telesis AT1500 and HP J2405A, and should work
11 with most other LANCE-based bus-master (NE2100/NE2500) ethercards.
13 The author may be reached as becker@scyld.com, or C/O
14 Scyld Computing Corporation
15 410 Severn Ave., Suite 210
16 Annapolis MD 21403
18 Andrey V. Savochkin:
19 - alignment problem with 1.3.* kernel and some minor changes.
20 Thomas Bogendoerfer (tsbogend@bigbug.franken.de):
21 - added support for Linux/Alpha, but removed most of it, because
22 it worked only for the PCI chip.
23 - added hook for the 32bit lance driver
24 - added PCnetPCI II (79C970A) to chip table
25 Paul Gortmaker (gpg109@rsphy1.anu.edu.au):
26 - hopefully fix above so Linux/Alpha can use ISA cards too.
27 8/20/96 Fixed 7990 autoIRQ failure and reversed unneeded alignment -djb
28 v1.12 10/27/97 Module support -djb
29 v1.14 2/3/98 Module support modified, made PCI support optional -djb
30 v1.15 5/27/99 Fixed bug in the cleanup_module(). dev->priv was freed
31 before unregister_netdev() which caused NULL pointer
32 reference later in the chain (in rtnetlink_fill_ifinfo())
33 -- Mika Kuoppala <miku@iki.fi>
35 Forward ported v1.14 to 2.1.129, merged the PCI and misc changes from
36 the 2.1 version of the old driver - Alan Cox
38 Get rid of check_region, check kmalloc return in lance_probe1
39 Arnaldo Carvalho de Melo <acme@conectiva.com.br> - 11/01/2001
42 static const char version[] = "lance.c:v1.15ac 1999/11/13 dplatt@3do.com, becker@cesdis.gsfc.nasa.gov\n";
44 #include <linux/module.h>
45 #include <linux/kernel.h>
46 #include <linux/string.h>
47 #include <linux/errno.h>
48 #include <linux/ioport.h>
49 #include <linux/slab.h>
50 #include <linux/interrupt.h>
51 #include <linux/pci.h>
52 #include <linux/init.h>
53 #include <linux/netdevice.h>
54 #include <linux/etherdevice.h>
55 #include <linux/skbuff.h>
57 #include <asm/bitops.h>
58 #include <asm/io.h>
59 #include <asm/dma.h>
61 static unsigned int lance_portlist[] __initdata = { 0x300, 0x320, 0x340, 0x360, 0};
62 int lance_probe(struct net_device *dev);
63 static int lance_probe1(struct net_device *dev, int ioaddr, int irq, int options);
65 #ifdef LANCE_DEBUG
66 static int lance_debug = LANCE_DEBUG;
67 #else
68 static int lance_debug = 1;
69 #endif
72 Theory of Operation
74 I. Board Compatibility
76 This device driver is designed for the AMD 79C960, the "PCnet-ISA
77 single-chip ethernet controller for ISA". This chip is used in a wide
78 variety of boards from vendors such as Allied Telesis, HP, Kingston,
79 and Boca. This driver is also intended to work with older AMD 7990
80 designs, such as the NE1500 and NE2100, and newer 79C961. For convenience,
81 I use the name LANCE to refer to all of the AMD chips, even though it properly
82 refers only to the original 7990.
84 II. Board-specific settings
86 The driver is designed to work the boards that use the faster
87 bus-master mode, rather than in shared memory mode. (Only older designs
88 have on-board buffer memory needed to support the slower shared memory mode.)
90 Most ISA boards have jumpered settings for the I/O base, IRQ line, and DMA
91 channel. This driver probes the likely base addresses:
92 {0x300, 0x320, 0x340, 0x360}.
93 After the board is found it generates a DMA-timeout interrupt and uses
94 autoIRQ to find the IRQ line. The DMA channel can be set with the low bits
95 of the otherwise-unused dev->mem_start value (aka PARAM1). If unset it is
96 probed for by enabling each free DMA channel in turn and checking if
97 initialization succeeds.
99 The HP-J2405A board is an exception: with this board it is easy to read the
100 EEPROM-set values for the base, IRQ, and DMA. (Of course you must already
101 _know_ the base address -- that field is for writing the EEPROM.)
103 III. Driver operation
105 IIIa. Ring buffers
106 The LANCE uses ring buffers of Tx and Rx descriptors. Each entry describes
107 the base and length of the data buffer, along with status bits. The length
108 of these buffers is set by LANCE_LOG_{RX,TX}_BUFFERS, which is log_2() of
109 the buffer length (rather than being directly the buffer length) for
110 implementation ease. The current values are 2 (Tx) and 4 (Rx), which leads to
111 ring sizes of 4 (Tx) and 16 (Rx). Increasing the number of ring entries
112 needlessly uses extra space and reduces the chance that an upper layer will
113 be able to reorder queued Tx packets based on priority. Decreasing the number
114 of entries makes it more difficult to achieve back-to-back packet transmission
115 and increases the chance that Rx ring will overflow. (Consider the worst case
116 of receiving back-to-back minimum-sized packets.)
118 The LANCE has the capability to "chain" both Rx and Tx buffers, but this driver
119 statically allocates full-sized (slightly oversized -- PKT_BUF_SZ) buffers to
120 avoid the administrative overhead. For the Rx side this avoids dynamically
121 allocating full-sized buffers "just in case", at the expense of a
122 memory-to-memory data copy for each packet received. For most systems this
123 is a good tradeoff: the Rx buffer will always be in low memory, the copy
124 is inexpensive, and it primes the cache for later packet processing. For Tx
125 the buffers are only used when needed as low-memory bounce buffers.
127 IIIB. 16M memory limitations.
128 For the ISA bus master mode all structures used directly by the LANCE,
129 the initialization block, Rx and Tx rings, and data buffers, must be
130 accessible from the ISA bus, i.e. in the lower 16M of real memory.
131 This is a problem for current Linux kernels on >16M machines. The network
132 devices are initialized after memory initialization, and the kernel doles out
133 memory from the top of memory downward. The current solution is to have a
134 special network initialization routine that's called before memory
135 initialization; this will eventually be generalized for all network devices.
136 As mentioned before, low-memory "bounce-buffers" are used when needed.
138 IIIC. Synchronization
139 The driver runs as two independent, single-threaded flows of control. One
140 is the send-packet routine, which enforces single-threaded use by the
141 dev->tbusy flag. The other thread is the interrupt handler, which is single
142 threaded by the hardware and other software.
144 The send packet thread has partial control over the Tx ring and 'dev->tbusy'
145 flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
146 queue slot is empty, it clears the tbusy flag when finished otherwise it sets
147 the 'lp->tx_full' flag.
149 The interrupt handler has exclusive control over the Rx ring and records stats
150 from the Tx ring. (The Tx-done interrupt can't be selectively turned off, so
151 we can't avoid the interrupt overhead by having the Tx routine reap the Tx
152 stats.) After reaping the stats, it marks the queue entry as empty by setting
153 the 'base' to zero. Iff the 'lp->tx_full' flag is set, it clears both the
154 tx_full and tbusy flags.
158 /* Set the number of Tx and Rx buffers, using Log_2(# buffers).
159 Reasonable default values are 16 Tx buffers, and 16 Rx buffers.
160 That translates to 4 and 4 (16 == 2^^4).
161 This is a compile-time option for efficiency.
163 #ifndef LANCE_LOG_TX_BUFFERS
164 #define LANCE_LOG_TX_BUFFERS 4
165 #define LANCE_LOG_RX_BUFFERS 4
166 #endif
168 #define TX_RING_SIZE (1 << (LANCE_LOG_TX_BUFFERS))
169 #define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
170 #define TX_RING_LEN_BITS ((LANCE_LOG_TX_BUFFERS) << 29)
172 #define RX_RING_SIZE (1 << (LANCE_LOG_RX_BUFFERS))
173 #define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
174 #define RX_RING_LEN_BITS ((LANCE_LOG_RX_BUFFERS) << 29)
176 #define PKT_BUF_SZ 1544
178 /* Offsets from base I/O address. */
179 #define LANCE_DATA 0x10
180 #define LANCE_ADDR 0x12
181 #define LANCE_RESET 0x14
182 #define LANCE_BUS_IF 0x16
183 #define LANCE_TOTAL_SIZE 0x18
185 #define TX_TIMEOUT 20
187 /* The LANCE Rx and Tx ring descriptors. */
188 struct lance_rx_head {
189 s32 base;
190 s16 buf_length; /* This length is 2s complement (negative)! */
191 s16 msg_length; /* This length is "normal". */
194 struct lance_tx_head {
195 s32 base;
196 s16 length; /* Length is 2s complement (negative)! */
197 s16 misc;
200 /* The LANCE initialization block, described in databook. */
201 struct lance_init_block {
202 u16 mode; /* Pre-set mode (reg. 15) */
203 u8 phys_addr[6]; /* Physical ethernet address */
204 u32 filter[2]; /* Multicast filter (unused). */
205 /* Receive and transmit ring base, along with extra bits. */
206 u32 rx_ring; /* Tx and Rx ring base pointers */
207 u32 tx_ring;
210 struct lance_private {
211 /* The Tx and Rx ring entries must be aligned on 8-byte boundaries. */
212 struct lance_rx_head rx_ring[RX_RING_SIZE];
213 struct lance_tx_head tx_ring[TX_RING_SIZE];
214 struct lance_init_block init_block;
215 const char *name;
216 /* The saved address of a sent-in-place packet/buffer, for skfree(). */
217 struct sk_buff* tx_skbuff[TX_RING_SIZE];
218 /* The addresses of receive-in-place skbuffs. */
219 struct sk_buff* rx_skbuff[RX_RING_SIZE];
220 unsigned long rx_buffs; /* Address of Rx and Tx buffers. */
221 /* Tx low-memory "bounce buffer" address. */
222 char (*tx_bounce_buffs)[PKT_BUF_SZ];
223 int cur_rx, cur_tx; /* The next free ring entry */
224 int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */
225 int dma;
226 struct net_device_stats stats;
227 unsigned char chip_version; /* See lance_chip_type. */
228 spinlock_t devlock;
231 #define LANCE_MUST_PAD 0x00000001
232 #define LANCE_ENABLE_AUTOSELECT 0x00000002
233 #define LANCE_MUST_REINIT_RING 0x00000004
234 #define LANCE_MUST_UNRESET 0x00000008
235 #define LANCE_HAS_MISSED_FRAME 0x00000010
237 /* A mapping from the chip ID number to the part number and features.
238 These are from the datasheets -- in real life the '970 version
239 reportedly has the same ID as the '965. */
240 static struct lance_chip_type {
241 int id_number;
242 const char *name;
243 int flags;
244 } chip_table[] = {
245 {0x0000, "LANCE 7990", /* Ancient lance chip. */
246 LANCE_MUST_PAD + LANCE_MUST_UNRESET},
247 {0x0003, "PCnet/ISA 79C960", /* 79C960 PCnet/ISA. */
248 LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
249 LANCE_HAS_MISSED_FRAME},
250 {0x2260, "PCnet/ISA+ 79C961", /* 79C961 PCnet/ISA+, Plug-n-Play. */
251 LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
252 LANCE_HAS_MISSED_FRAME},
253 {0x2420, "PCnet/PCI 79C970", /* 79C970 or 79C974 PCnet-SCSI, PCI. */
254 LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
255 LANCE_HAS_MISSED_FRAME},
256 /* Bug: the PCnet/PCI actually uses the PCnet/VLB ID number, so just call
257 it the PCnet32. */
258 {0x2430, "PCnet32", /* 79C965 PCnet for VL bus. */
259 LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
260 LANCE_HAS_MISSED_FRAME},
261 {0x2621, "PCnet/PCI-II 79C970A", /* 79C970A PCInetPCI II. */
262 LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
263 LANCE_HAS_MISSED_FRAME},
264 {0x0, "PCnet (unknown)",
265 LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
266 LANCE_HAS_MISSED_FRAME},
269 enum {OLD_LANCE = 0, PCNET_ISA=1, PCNET_ISAP=2, PCNET_PCI=3, PCNET_VLB=4, PCNET_PCI_II=5, LANCE_UNKNOWN=6};
272 /* Non-zero if lance_probe1() needs to allocate low-memory bounce buffers.
273 Assume yes until we know the memory size. */
274 static unsigned char lance_need_isa_bounce_buffers = 1;
276 static int lance_open(struct net_device *dev);
277 static int lance_open_fail(struct net_device *dev);
278 static void lance_init_ring(struct net_device *dev, int mode);
279 static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev);
280 static int lance_rx(struct net_device *dev);
281 static irqreturn_t lance_interrupt(int irq, void *dev_id, struct pt_regs *regs);
282 static int lance_close(struct net_device *dev);
283 static struct net_device_stats *lance_get_stats(struct net_device *dev);
284 static void set_multicast_list(struct net_device *dev);
285 static void lance_tx_timeout (struct net_device *dev);
289 #ifdef MODULE
290 #define MAX_CARDS 8 /* Max number of interfaces (cards) per module */
292 static struct net_device dev_lance[MAX_CARDS];
293 static int io[MAX_CARDS];
294 static int dma[MAX_CARDS];
295 static int irq[MAX_CARDS];
297 MODULE_PARM(io, "1-" __MODULE_STRING(MAX_CARDS) "i");
298 MODULE_PARM(dma, "1-" __MODULE_STRING(MAX_CARDS) "i");
299 MODULE_PARM(irq, "1-" __MODULE_STRING(MAX_CARDS) "i");
300 MODULE_PARM(lance_debug, "i");
301 MODULE_PARM_DESC(io, "LANCE/PCnet I/O base address(es),required");
302 MODULE_PARM_DESC(dma, "LANCE/PCnet ISA DMA channel (ignored for some devices)");
303 MODULE_PARM_DESC(irq, "LANCE/PCnet IRQ number (ignored for some devices)");
304 MODULE_PARM_DESC(lance_debug, "LANCE/PCnet debug level (0-7)");
306 int init_module(void)
308 int this_dev, found = 0;
310 for (this_dev = 0; this_dev < MAX_CARDS; this_dev++) {
311 struct net_device *dev = &dev_lance[this_dev];
312 dev->irq = irq[this_dev];
313 dev->base_addr = io[this_dev];
314 dev->dma = dma[this_dev];
315 dev->init = lance_probe;
316 if (io[this_dev] == 0) {
317 if (this_dev != 0) break; /* only complain once */
318 printk(KERN_NOTICE "lance.c: Module autoprobing not allowed. Append \"io=0xNNN\" value(s).\n");
319 return -EPERM;
321 if (register_netdev(dev) != 0) {
322 printk(KERN_WARNING "lance.c: No PCnet/LANCE card found (i/o = 0x%x).\n", io[this_dev]);
323 if (found != 0) return 0; /* Got at least one. */
324 return -ENXIO;
326 found++;
329 return 0;
332 void cleanup_module(void)
334 int this_dev;
336 for (this_dev = 0; this_dev < MAX_CARDS; this_dev++) {
337 struct net_device *dev = &dev_lance[this_dev];
338 if (dev->priv != NULL) {
339 unregister_netdev(dev);
340 free_dma(dev->dma);
341 release_region(dev->base_addr, LANCE_TOTAL_SIZE);
342 kfree(dev->priv);
343 dev->priv = NULL;
347 #endif /* MODULE */
348 MODULE_LICENSE("GPL");
351 /* Starting in v2.1.*, the LANCE/PCnet probe is now similar to the other
352 board probes now that kmalloc() can allocate ISA DMA-able regions.
353 This also allows the LANCE driver to be used as a module.
355 int __init lance_probe(struct net_device *dev)
357 int *port, result;
359 if (high_memory <= phys_to_virt(16*1024*1024))
360 lance_need_isa_bounce_buffers = 0;
362 for (port = lance_portlist; *port; port++) {
363 int ioaddr = *port;
364 struct resource *r = request_region(ioaddr, LANCE_TOTAL_SIZE,
365 "lance-probe");
367 if (r) {
368 /* Detect "normal" 0x57 0x57 and the NI6510EB 0x52 0x44
369 signatures w/ minimal I/O reads */
370 char offset15, offset14 = inb(ioaddr + 14);
372 if ((offset14 == 0x52 || offset14 == 0x57) &&
373 ((offset15 = inb(ioaddr + 15)) == 0x57 ||
374 offset15 == 0x44)) {
375 result = lance_probe1(dev, ioaddr, 0, 0);
376 if (!result) {
377 struct lance_private *lp = dev->priv;
378 int ver = lp->chip_version;
380 r->name = chip_table[ver].name;
381 return 0;
384 release_region(ioaddr, LANCE_TOTAL_SIZE);
387 return -ENODEV;
390 static int __init lance_probe1(struct net_device *dev, int ioaddr, int irq, int options)
392 struct lance_private *lp;
393 long dma_channels; /* Mark spuriously-busy DMA channels */
394 int i, reset_val, lance_version;
395 const char *chipname;
396 /* Flags for specific chips or boards. */
397 unsigned char hpJ2405A = 0; /* HP ISA adaptor */
398 int hp_builtin = 0; /* HP on-board ethernet. */
399 static int did_version; /* Already printed version info. */
400 unsigned long flags;
402 /* First we look for special cases.
403 Check for HP's on-board ethernet by looking for 'HP' in the BIOS.
404 There are two HP versions, check the BIOS for the configuration port.
405 This method provided by L. Julliard, Laurent_Julliard@grenoble.hp.com.
407 if (isa_readw(0x000f0102) == 0x5048) {
408 static const short ioaddr_table[] = { 0x300, 0x320, 0x340, 0x360};
409 int hp_port = (isa_readl(0x000f00f1) & 1) ? 0x499 : 0x99;
410 /* We can have boards other than the built-in! Verify this is on-board. */
411 if ((inb(hp_port) & 0xc0) == 0x80
412 && ioaddr_table[inb(hp_port) & 3] == ioaddr)
413 hp_builtin = hp_port;
415 /* We also recognize the HP Vectra on-board here, but check below. */
416 hpJ2405A = (inb(ioaddr) == 0x08 && inb(ioaddr+1) == 0x00
417 && inb(ioaddr+2) == 0x09);
419 /* Reset the LANCE. */
420 reset_val = inw(ioaddr+LANCE_RESET); /* Reset the LANCE */
422 /* The Un-Reset needed is only needed for the real NE2100, and will
423 confuse the HP board. */
424 if (!hpJ2405A)
425 outw(reset_val, ioaddr+LANCE_RESET);
427 outw(0x0000, ioaddr+LANCE_ADDR); /* Switch to window 0 */
428 if (inw(ioaddr+LANCE_DATA) != 0x0004)
429 return -ENODEV;
431 /* Get the version of the chip. */
432 outw(88, ioaddr+LANCE_ADDR);
433 if (inw(ioaddr+LANCE_ADDR) != 88) {
434 lance_version = 0;
435 } else { /* Good, it's a newer chip. */
436 int chip_version = inw(ioaddr+LANCE_DATA);
437 outw(89, ioaddr+LANCE_ADDR);
438 chip_version |= inw(ioaddr+LANCE_DATA) << 16;
439 if (lance_debug > 2)
440 printk(" LANCE chip version is %#x.\n", chip_version);
441 if ((chip_version & 0xfff) != 0x003)
442 return -ENODEV;
443 chip_version = (chip_version >> 12) & 0xffff;
444 for (lance_version = 1; chip_table[lance_version].id_number; lance_version++) {
445 if (chip_table[lance_version].id_number == chip_version)
446 break;
450 /* We can't use init_etherdev() to allocate dev->priv because it must
451 a ISA DMA-able region. */
452 dev = init_etherdev(dev, 0);
453 if (!dev)
454 return -ENOMEM;
455 SET_MODULE_OWNER(dev);
456 dev->open = lance_open_fail;
457 chipname = chip_table[lance_version].name;
458 printk("%s: %s at %#3x,", dev->name, chipname, ioaddr);
460 /* There is a 16 byte station address PROM at the base address.
461 The first six bytes are the station address. */
462 for (i = 0; i < 6; i++)
463 printk(" %2.2x", dev->dev_addr[i] = inb(ioaddr + i));
465 dev->base_addr = ioaddr;
466 /* Make certain the data structures used by the LANCE are aligned and DMAble. */
468 lp = (struct lance_private *)(((unsigned long)kmalloc(sizeof(*lp)+7,
469 GFP_DMA | GFP_KERNEL)+7) & ~7);
470 if(lp==NULL)
471 return -ENODEV;
472 if (lance_debug > 6) printk(" (#0x%05lx)", (unsigned long)lp);
473 memset(lp, 0, sizeof(*lp));
474 dev->priv = lp;
475 lp->name = chipname;
476 lp->rx_buffs = (unsigned long)kmalloc(PKT_BUF_SZ*RX_RING_SIZE,
477 GFP_DMA | GFP_KERNEL);
478 if (!lp->rx_buffs)
479 goto out_lp;
480 if (lance_need_isa_bounce_buffers) {
481 lp->tx_bounce_buffs = kmalloc(PKT_BUF_SZ*TX_RING_SIZE,
482 GFP_DMA | GFP_KERNEL);
483 if (!lp->tx_bounce_buffs)
484 goto out_rx;
485 } else
486 lp->tx_bounce_buffs = NULL;
488 lp->chip_version = lance_version;
489 lp->devlock = SPIN_LOCK_UNLOCKED;
491 lp->init_block.mode = 0x0003; /* Disable Rx and Tx. */
492 for (i = 0; i < 6; i++)
493 lp->init_block.phys_addr[i] = dev->dev_addr[i];
494 lp->init_block.filter[0] = 0x00000000;
495 lp->init_block.filter[1] = 0x00000000;
496 lp->init_block.rx_ring = ((u32)isa_virt_to_bus(lp->rx_ring) & 0xffffff) | RX_RING_LEN_BITS;
497 lp->init_block.tx_ring = ((u32)isa_virt_to_bus(lp->tx_ring) & 0xffffff) | TX_RING_LEN_BITS;
499 outw(0x0001, ioaddr+LANCE_ADDR);
500 inw(ioaddr+LANCE_ADDR);
501 outw((short) (u32) isa_virt_to_bus(&lp->init_block), ioaddr+LANCE_DATA);
502 outw(0x0002, ioaddr+LANCE_ADDR);
503 inw(ioaddr+LANCE_ADDR);
504 outw(((u32)isa_virt_to_bus(&lp->init_block)) >> 16, ioaddr+LANCE_DATA);
505 outw(0x0000, ioaddr+LANCE_ADDR);
506 inw(ioaddr+LANCE_ADDR);
508 if (irq) { /* Set iff PCI card. */
509 dev->dma = 4; /* Native bus-master, no DMA channel needed. */
510 dev->irq = irq;
511 } else if (hp_builtin) {
512 static const char dma_tbl[4] = {3, 5, 6, 0};
513 static const char irq_tbl[4] = {3, 4, 5, 9};
514 unsigned char port_val = inb(hp_builtin);
515 dev->dma = dma_tbl[(port_val >> 4) & 3];
516 dev->irq = irq_tbl[(port_val >> 2) & 3];
517 printk(" HP Vectra IRQ %d DMA %d.\n", dev->irq, dev->dma);
518 } else if (hpJ2405A) {
519 static const char dma_tbl[4] = {3, 5, 6, 7};
520 static const char irq_tbl[8] = {3, 4, 5, 9, 10, 11, 12, 15};
521 short reset_val = inw(ioaddr+LANCE_RESET);
522 dev->dma = dma_tbl[(reset_val >> 2) & 3];
523 dev->irq = irq_tbl[(reset_val >> 4) & 7];
524 printk(" HP J2405A IRQ %d DMA %d.\n", dev->irq, dev->dma);
525 } else if (lance_version == PCNET_ISAP) { /* The plug-n-play version. */
526 short bus_info;
527 outw(8, ioaddr+LANCE_ADDR);
528 bus_info = inw(ioaddr+LANCE_BUS_IF);
529 dev->dma = bus_info & 0x07;
530 dev->irq = (bus_info >> 4) & 0x0F;
531 } else {
532 /* The DMA channel may be passed in PARAM1. */
533 if (dev->mem_start & 0x07)
534 dev->dma = dev->mem_start & 0x07;
537 if (dev->dma == 0) {
538 /* Read the DMA channel status register, so that we can avoid
539 stuck DMA channels in the DMA detection below. */
540 dma_channels = ((inb(DMA1_STAT_REG) >> 4) & 0x0f) |
541 (inb(DMA2_STAT_REG) & 0xf0);
543 if (dev->irq >= 2)
544 printk(" assigned IRQ %d", dev->irq);
545 else if (lance_version != 0) { /* 7990 boards need DMA detection first. */
546 unsigned long irq_mask, delay;
548 /* To auto-IRQ we enable the initialization-done and DMA error
549 interrupts. For ISA boards we get a DMA error, but VLB and PCI
550 boards will work. */
551 irq_mask = probe_irq_on();
553 /* Trigger an initialization just for the interrupt. */
554 outw(0x0041, ioaddr+LANCE_DATA);
556 delay = jiffies + HZ/50;
557 while (time_before(jiffies, delay)) ;
558 dev->irq = probe_irq_off(irq_mask);
559 if (dev->irq)
560 printk(", probed IRQ %d", dev->irq);
561 else {
562 printk(", failed to detect IRQ line.\n");
563 return -ENODEV;
566 /* Check for the initialization done bit, 0x0100, which means
567 that we don't need a DMA channel. */
568 if (inw(ioaddr+LANCE_DATA) & 0x0100)
569 dev->dma = 4;
572 if (dev->dma == 4) {
573 printk(", no DMA needed.\n");
574 } else if (dev->dma) {
575 if (request_dma(dev->dma, chipname)) {
576 printk("DMA %d allocation failed.\n", dev->dma);
577 return -ENODEV;
578 } else
579 printk(", assigned DMA %d.\n", dev->dma);
580 } else { /* OK, we have to auto-DMA. */
581 for (i = 0; i < 4; i++) {
582 static const char dmas[] = { 5, 6, 7, 3 };
583 int dma = dmas[i];
584 int boguscnt;
586 /* Don't enable a permanently busy DMA channel, or the machine
587 will hang. */
588 if (test_bit(dma, &dma_channels))
589 continue;
590 outw(0x7f04, ioaddr+LANCE_DATA); /* Clear the memory error bits. */
591 if (request_dma(dma, chipname))
592 continue;
594 flags=claim_dma_lock();
595 set_dma_mode(dma, DMA_MODE_CASCADE);
596 enable_dma(dma);
597 release_dma_lock(flags);
599 /* Trigger an initialization. */
600 outw(0x0001, ioaddr+LANCE_DATA);
601 for (boguscnt = 100; boguscnt > 0; --boguscnt)
602 if (inw(ioaddr+LANCE_DATA) & 0x0900)
603 break;
604 if (inw(ioaddr+LANCE_DATA) & 0x0100) {
605 dev->dma = dma;
606 printk(", DMA %d.\n", dev->dma);
607 break;
608 } else {
609 flags=claim_dma_lock();
610 disable_dma(dma);
611 release_dma_lock(flags);
612 free_dma(dma);
615 if (i == 4) { /* Failure: bail. */
616 printk("DMA detection failed.\n");
617 return -ENODEV;
621 if (lance_version == 0 && dev->irq == 0) {
622 /* We may auto-IRQ now that we have a DMA channel. */
623 /* Trigger an initialization just for the interrupt. */
624 unsigned long irq_mask, delay;
626 irq_mask = probe_irq_on();
627 outw(0x0041, ioaddr+LANCE_DATA);
629 delay = jiffies + HZ/25;
630 while (time_before(jiffies, delay)) ;
631 dev->irq = probe_irq_off(irq_mask);
632 if (dev->irq == 0) {
633 printk(" Failed to detect the 7990 IRQ line.\n");
634 return -ENODEV;
636 printk(" Auto-IRQ detected IRQ%d.\n", dev->irq);
639 if (chip_table[lp->chip_version].flags & LANCE_ENABLE_AUTOSELECT) {
640 /* Turn on auto-select of media (10baseT or BNC) so that the user
641 can watch the LEDs even if the board isn't opened. */
642 outw(0x0002, ioaddr+LANCE_ADDR);
643 /* Don't touch 10base2 power bit. */
644 outw(inw(ioaddr+LANCE_BUS_IF) | 0x0002, ioaddr+LANCE_BUS_IF);
647 if (lance_debug > 0 && did_version++ == 0)
648 printk(version);
650 /* The LANCE-specific entries in the device structure. */
651 dev->open = lance_open;
652 dev->hard_start_xmit = lance_start_xmit;
653 dev->stop = lance_close;
654 dev->get_stats = lance_get_stats;
655 dev->set_multicast_list = set_multicast_list;
656 dev->tx_timeout = lance_tx_timeout;
657 dev->watchdog_timeo = TX_TIMEOUT;
659 return 0;
660 out_rx: kfree((void*)lp->rx_buffs);
661 out_lp: kfree(lp);
662 return -ENOMEM;
665 static int
666 lance_open_fail(struct net_device *dev)
668 return -ENODEV;
673 static int
674 lance_open(struct net_device *dev)
676 struct lance_private *lp = dev->priv;
677 int ioaddr = dev->base_addr;
678 int i;
680 if (dev->irq == 0 ||
681 request_irq(dev->irq, &lance_interrupt, 0, lp->name, dev)) {
682 return -EAGAIN;
685 /* We used to allocate DMA here, but that was silly.
686 DMA lines can't be shared! We now permanently allocate them. */
688 /* Reset the LANCE */
689 inw(ioaddr+LANCE_RESET);
691 /* The DMA controller is used as a no-operation slave, "cascade mode". */
692 if (dev->dma != 4) {
693 unsigned long flags=claim_dma_lock();
694 enable_dma(dev->dma);
695 set_dma_mode(dev->dma, DMA_MODE_CASCADE);
696 release_dma_lock(flags);
699 /* Un-Reset the LANCE, needed only for the NE2100. */
700 if (chip_table[lp->chip_version].flags & LANCE_MUST_UNRESET)
701 outw(0, ioaddr+LANCE_RESET);
703 if (chip_table[lp->chip_version].flags & LANCE_ENABLE_AUTOSELECT) {
704 /* This is 79C960-specific: Turn on auto-select of media (AUI, BNC). */
705 outw(0x0002, ioaddr+LANCE_ADDR);
706 /* Only touch autoselect bit. */
707 outw(inw(ioaddr+LANCE_BUS_IF) | 0x0002, ioaddr+LANCE_BUS_IF);
710 if (lance_debug > 1)
711 printk("%s: lance_open() irq %d dma %d tx/rx rings %#x/%#x init %#x.\n",
712 dev->name, dev->irq, dev->dma,
713 (u32) isa_virt_to_bus(lp->tx_ring),
714 (u32) isa_virt_to_bus(lp->rx_ring),
715 (u32) isa_virt_to_bus(&lp->init_block));
717 lance_init_ring(dev, GFP_KERNEL);
718 /* Re-initialize the LANCE, and start it when done. */
719 outw(0x0001, ioaddr+LANCE_ADDR);
720 outw((short) (u32) isa_virt_to_bus(&lp->init_block), ioaddr+LANCE_DATA);
721 outw(0x0002, ioaddr+LANCE_ADDR);
722 outw(((u32)isa_virt_to_bus(&lp->init_block)) >> 16, ioaddr+LANCE_DATA);
724 outw(0x0004, ioaddr+LANCE_ADDR);
725 outw(0x0915, ioaddr+LANCE_DATA);
727 outw(0x0000, ioaddr+LANCE_ADDR);
728 outw(0x0001, ioaddr+LANCE_DATA);
730 netif_start_queue (dev);
732 i = 0;
733 while (i++ < 100)
734 if (inw(ioaddr+LANCE_DATA) & 0x0100)
735 break;
737 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
738 * reports that doing so triggers a bug in the '974.
740 outw(0x0042, ioaddr+LANCE_DATA);
742 if (lance_debug > 2)
743 printk("%s: LANCE open after %d ticks, init block %#x csr0 %4.4x.\n",
744 dev->name, i, (u32) isa_virt_to_bus(&lp->init_block), inw(ioaddr+LANCE_DATA));
746 return 0; /* Always succeed */
749 /* The LANCE has been halted for one reason or another (busmaster memory
750 arbitration error, Tx FIFO underflow, driver stopped it to reconfigure,
751 etc.). Modern LANCE variants always reload their ring-buffer
752 configuration when restarted, so we must reinitialize our ring
753 context before restarting. As part of this reinitialization,
754 find all packets still on the Tx ring and pretend that they had been
755 sent (in effect, drop the packets on the floor) - the higher-level
756 protocols will time out and retransmit. It'd be better to shuffle
757 these skbs to a temp list and then actually re-Tx them after
758 restarting the chip, but I'm too lazy to do so right now. dplatt@3do.com
761 static void
762 lance_purge_ring(struct net_device *dev)
764 struct lance_private *lp = dev->priv;
765 int i;
767 /* Free all the skbuffs in the Rx and Tx queues. */
768 for (i = 0; i < RX_RING_SIZE; i++) {
769 struct sk_buff *skb = lp->rx_skbuff[i];
770 lp->rx_skbuff[i] = 0;
771 lp->rx_ring[i].base = 0; /* Not owned by LANCE chip. */
772 if (skb)
773 dev_kfree_skb_any(skb);
775 for (i = 0; i < TX_RING_SIZE; i++) {
776 if (lp->tx_skbuff[i]) {
777 dev_kfree_skb_any(lp->tx_skbuff[i]);
778 lp->tx_skbuff[i] = NULL;
784 /* Initialize the LANCE Rx and Tx rings. */
785 static void
786 lance_init_ring(struct net_device *dev, int gfp)
788 struct lance_private *lp = dev->priv;
789 int i;
791 lp->cur_rx = lp->cur_tx = 0;
792 lp->dirty_rx = lp->dirty_tx = 0;
794 for (i = 0; i < RX_RING_SIZE; i++) {
795 struct sk_buff *skb;
796 void *rx_buff;
798 skb = alloc_skb(PKT_BUF_SZ, GFP_DMA | gfp);
799 lp->rx_skbuff[i] = skb;
800 if (skb) {
801 skb->dev = dev;
802 rx_buff = skb->tail;
803 } else
804 rx_buff = kmalloc(PKT_BUF_SZ, GFP_DMA | gfp);
805 if (rx_buff == NULL)
806 lp->rx_ring[i].base = 0;
807 else
808 lp->rx_ring[i].base = (u32)isa_virt_to_bus(rx_buff) | 0x80000000;
809 lp->rx_ring[i].buf_length = -PKT_BUF_SZ;
811 /* The Tx buffer address is filled in as needed, but we do need to clear
812 the upper ownership bit. */
813 for (i = 0; i < TX_RING_SIZE; i++) {
814 lp->tx_skbuff[i] = 0;
815 lp->tx_ring[i].base = 0;
818 lp->init_block.mode = 0x0000;
819 for (i = 0; i < 6; i++)
820 lp->init_block.phys_addr[i] = dev->dev_addr[i];
821 lp->init_block.filter[0] = 0x00000000;
822 lp->init_block.filter[1] = 0x00000000;
823 lp->init_block.rx_ring = ((u32)isa_virt_to_bus(lp->rx_ring) & 0xffffff) | RX_RING_LEN_BITS;
824 lp->init_block.tx_ring = ((u32)isa_virt_to_bus(lp->tx_ring) & 0xffffff) | TX_RING_LEN_BITS;
827 static void
828 lance_restart(struct net_device *dev, unsigned int csr0_bits, int must_reinit)
830 struct lance_private *lp = dev->priv;
832 if (must_reinit ||
833 (chip_table[lp->chip_version].flags & LANCE_MUST_REINIT_RING)) {
834 lance_purge_ring(dev);
835 lance_init_ring(dev, GFP_ATOMIC);
837 outw(0x0000, dev->base_addr + LANCE_ADDR);
838 outw(csr0_bits, dev->base_addr + LANCE_DATA);
842 static void lance_tx_timeout (struct net_device *dev)
844 struct lance_private *lp = (struct lance_private *) dev->priv;
845 int ioaddr = dev->base_addr;
847 outw (0, ioaddr + LANCE_ADDR);
848 printk ("%s: transmit timed out, status %4.4x, resetting.\n",
849 dev->name, inw (ioaddr + LANCE_DATA));
850 outw (0x0004, ioaddr + LANCE_DATA);
851 lp->stats.tx_errors++;
852 #ifndef final_version
853 if (lance_debug > 3) {
854 int i;
855 printk (" Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.",
856 lp->dirty_tx, lp->cur_tx, netif_queue_stopped(dev) ? " (full)" : "",
857 lp->cur_rx);
858 for (i = 0; i < RX_RING_SIZE; i++)
859 printk ("%s %08x %04x %04x", i & 0x3 ? "" : "\n ",
860 lp->rx_ring[i].base, -lp->rx_ring[i].buf_length,
861 lp->rx_ring[i].msg_length);
862 for (i = 0; i < TX_RING_SIZE; i++)
863 printk ("%s %08x %04x %04x", i & 0x3 ? "" : "\n ",
864 lp->tx_ring[i].base, -lp->tx_ring[i].length,
865 lp->tx_ring[i].misc);
866 printk ("\n");
868 #endif
869 lance_restart (dev, 0x0043, 1);
871 dev->trans_start = jiffies;
872 netif_wake_queue (dev);
876 static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
878 struct lance_private *lp = dev->priv;
879 int ioaddr = dev->base_addr;
880 int entry;
881 unsigned long flags;
883 spin_lock_irqsave(&lp->devlock, flags);
885 if (lance_debug > 3) {
886 outw(0x0000, ioaddr+LANCE_ADDR);
887 printk("%s: lance_start_xmit() called, csr0 %4.4x.\n", dev->name,
888 inw(ioaddr+LANCE_DATA));
889 outw(0x0000, ioaddr+LANCE_DATA);
892 /* Fill in a Tx ring entry */
894 /* Mask to ring buffer boundary. */
895 entry = lp->cur_tx & TX_RING_MOD_MASK;
897 /* Caution: the write order is important here, set the base address
898 with the "ownership" bits last. */
900 /* The old LANCE chips doesn't automatically pad buffers to min. size. */
901 if (chip_table[lp->chip_version].flags & LANCE_MUST_PAD) {
902 if (skb->len < ETH_ZLEN) {
903 skb = skb_padto(skb, ETH_ZLEN);
904 if (skb == NULL)
905 goto out;
906 lp->tx_ring[entry].length = -ETH_ZLEN;
908 else
909 lp->tx_ring[entry].length = -skb->len;
910 } else
911 lp->tx_ring[entry].length = -skb->len;
913 lp->tx_ring[entry].misc = 0x0000;
915 lp->stats.tx_bytes += skb->len;
917 /* If any part of this buffer is >16M we must copy it to a low-memory
918 buffer. */
919 if ((u32)isa_virt_to_bus(skb->data) + skb->len > 0x01000000) {
920 if (lance_debug > 5)
921 printk("%s: bouncing a high-memory packet (%#x).\n",
922 dev->name, (u32)isa_virt_to_bus(skb->data));
923 memcpy(&lp->tx_bounce_buffs[entry], skb->data, skb->len);
924 lp->tx_ring[entry].base =
925 ((u32)isa_virt_to_bus((lp->tx_bounce_buffs + entry)) & 0xffffff) | 0x83000000;
926 dev_kfree_skb(skb);
927 } else {
928 lp->tx_skbuff[entry] = skb;
929 lp->tx_ring[entry].base = ((u32)isa_virt_to_bus(skb->data) & 0xffffff) | 0x83000000;
931 lp->cur_tx++;
933 /* Trigger an immediate send poll. */
934 outw(0x0000, ioaddr+LANCE_ADDR);
935 outw(0x0048, ioaddr+LANCE_DATA);
937 dev->trans_start = jiffies;
939 if ((lp->cur_tx - lp->dirty_tx) >= TX_RING_SIZE)
940 netif_stop_queue(dev);
942 out:
943 spin_unlock_irqrestore(&lp->devlock, flags);
944 return 0;
947 /* The LANCE interrupt handler. */
948 static irqreturn_t
949 lance_interrupt(int irq, void *dev_id, struct pt_regs * regs)
951 struct net_device *dev = dev_id;
952 struct lance_private *lp;
953 int csr0, ioaddr, boguscnt=10;
954 int must_restart;
956 if (dev == NULL) {
957 printk ("lance_interrupt(): irq %d for unknown device.\n", irq);
958 return IRQ_NONE;
961 ioaddr = dev->base_addr;
962 lp = dev->priv;
964 spin_lock (&lp->devlock);
966 outw(0x00, dev->base_addr + LANCE_ADDR);
967 while ((csr0 = inw(dev->base_addr + LANCE_DATA)) & 0x8600
968 && --boguscnt >= 0) {
969 /* Acknowledge all of the current interrupt sources ASAP. */
970 outw(csr0 & ~0x004f, dev->base_addr + LANCE_DATA);
972 must_restart = 0;
974 if (lance_debug > 5)
975 printk("%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n",
976 dev->name, csr0, inw(dev->base_addr + LANCE_DATA));
978 if (csr0 & 0x0400) /* Rx interrupt */
979 lance_rx(dev);
981 if (csr0 & 0x0200) { /* Tx-done interrupt */
982 int dirty_tx = lp->dirty_tx;
984 while (dirty_tx < lp->cur_tx) {
985 int entry = dirty_tx & TX_RING_MOD_MASK;
986 int status = lp->tx_ring[entry].base;
988 if (status < 0)
989 break; /* It still hasn't been Txed */
991 lp->tx_ring[entry].base = 0;
993 if (status & 0x40000000) {
994 /* There was an major error, log it. */
995 int err_status = lp->tx_ring[entry].misc;
996 lp->stats.tx_errors++;
997 if (err_status & 0x0400) lp->stats.tx_aborted_errors++;
998 if (err_status & 0x0800) lp->stats.tx_carrier_errors++;
999 if (err_status & 0x1000) lp->stats.tx_window_errors++;
1000 if (err_status & 0x4000) {
1001 /* Ackk! On FIFO errors the Tx unit is turned off! */
1002 lp->stats.tx_fifo_errors++;
1003 /* Remove this verbosity later! */
1004 printk("%s: Tx FIFO error! Status %4.4x.\n",
1005 dev->name, csr0);
1006 /* Restart the chip. */
1007 must_restart = 1;
1009 } else {
1010 if (status & 0x18000000)
1011 lp->stats.collisions++;
1012 lp->stats.tx_packets++;
1015 /* We must free the original skb if it's not a data-only copy
1016 in the bounce buffer. */
1017 if (lp->tx_skbuff[entry]) {
1018 dev_kfree_skb_irq(lp->tx_skbuff[entry]);
1019 lp->tx_skbuff[entry] = 0;
1021 dirty_tx++;
1024 #ifndef final_version
1025 if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) {
1026 printk("out-of-sync dirty pointer, %d vs. %d, full=%s.\n",
1027 dirty_tx, lp->cur_tx,
1028 netif_queue_stopped(dev) ? "yes" : "no");
1029 dirty_tx += TX_RING_SIZE;
1031 #endif
1033 /* if the ring is no longer full, accept more packets */
1034 if (netif_queue_stopped(dev) &&
1035 dirty_tx > lp->cur_tx - TX_RING_SIZE + 2)
1036 netif_wake_queue (dev);
1038 lp->dirty_tx = dirty_tx;
1041 /* Log misc errors. */
1042 if (csr0 & 0x4000) lp->stats.tx_errors++; /* Tx babble. */
1043 if (csr0 & 0x1000) lp->stats.rx_errors++; /* Missed a Rx frame. */
1044 if (csr0 & 0x0800) {
1045 printk("%s: Bus master arbitration failure, status %4.4x.\n",
1046 dev->name, csr0);
1047 /* Restart the chip. */
1048 must_restart = 1;
1051 if (must_restart) {
1052 /* stop the chip to clear the error condition, then restart */
1053 outw(0x0000, dev->base_addr + LANCE_ADDR);
1054 outw(0x0004, dev->base_addr + LANCE_DATA);
1055 lance_restart(dev, 0x0002, 0);
1059 /* Clear any other interrupt, and set interrupt enable. */
1060 outw(0x0000, dev->base_addr + LANCE_ADDR);
1061 outw(0x7940, dev->base_addr + LANCE_DATA);
1063 if (lance_debug > 4)
1064 printk("%s: exiting interrupt, csr%d=%#4.4x.\n",
1065 dev->name, inw(ioaddr + LANCE_ADDR),
1066 inw(dev->base_addr + LANCE_DATA));
1068 spin_unlock (&lp->devlock);
1069 return IRQ_HANDLED;
1072 static int
1073 lance_rx(struct net_device *dev)
1075 struct lance_private *lp = dev->priv;
1076 int entry = lp->cur_rx & RX_RING_MOD_MASK;
1077 int i;
1079 /* If we own the next entry, it's a new packet. Send it up. */
1080 while (lp->rx_ring[entry].base >= 0) {
1081 int status = lp->rx_ring[entry].base >> 24;
1083 if (status != 0x03) { /* There was an error. */
1084 /* There is a tricky error noted by John Murphy,
1085 <murf@perftech.com> to Russ Nelson: Even with full-sized
1086 buffers it's possible for a jabber packet to use two
1087 buffers, with only the last correctly noting the error. */
1088 if (status & 0x01) /* Only count a general error at the */
1089 lp->stats.rx_errors++; /* end of a packet.*/
1090 if (status & 0x20) lp->stats.rx_frame_errors++;
1091 if (status & 0x10) lp->stats.rx_over_errors++;
1092 if (status & 0x08) lp->stats.rx_crc_errors++;
1093 if (status & 0x04) lp->stats.rx_fifo_errors++;
1094 lp->rx_ring[entry].base &= 0x03ffffff;
1096 else
1098 /* Malloc up new buffer, compatible with net3. */
1099 short pkt_len = (lp->rx_ring[entry].msg_length & 0xfff)-4;
1100 struct sk_buff *skb;
1102 if(pkt_len<60)
1104 printk("%s: Runt packet!\n",dev->name);
1105 lp->stats.rx_errors++;
1107 else
1109 skb = dev_alloc_skb(pkt_len+2);
1110 if (skb == NULL)
1112 printk("%s: Memory squeeze, deferring packet.\n", dev->name);
1113 for (i=0; i < RX_RING_SIZE; i++)
1114 if (lp->rx_ring[(entry+i) & RX_RING_MOD_MASK].base < 0)
1115 break;
1117 if (i > RX_RING_SIZE -2)
1119 lp->stats.rx_dropped++;
1120 lp->rx_ring[entry].base |= 0x80000000;
1121 lp->cur_rx++;
1123 break;
1125 skb->dev = dev;
1126 skb_reserve(skb,2); /* 16 byte align */
1127 skb_put(skb,pkt_len); /* Make room */
1128 eth_copy_and_sum(skb,
1129 (unsigned char *)isa_bus_to_virt((lp->rx_ring[entry].base & 0x00ffffff)),
1130 pkt_len,0);
1131 skb->protocol=eth_type_trans(skb,dev);
1132 netif_rx(skb);
1133 dev->last_rx = jiffies;
1134 lp->stats.rx_packets++;
1135 lp->stats.rx_bytes+=pkt_len;
1138 /* The docs say that the buffer length isn't touched, but Andrew Boyd
1139 of QNX reports that some revs of the 79C965 clear it. */
1140 lp->rx_ring[entry].buf_length = -PKT_BUF_SZ;
1141 lp->rx_ring[entry].base |= 0x80000000;
1142 entry = (++lp->cur_rx) & RX_RING_MOD_MASK;
1145 /* We should check that at least two ring entries are free. If not,
1146 we should free one and mark stats->rx_dropped++. */
1148 return 0;
1151 static int
1152 lance_close(struct net_device *dev)
1154 int ioaddr = dev->base_addr;
1155 struct lance_private *lp = dev->priv;
1157 netif_stop_queue (dev);
1159 if (chip_table[lp->chip_version].flags & LANCE_HAS_MISSED_FRAME) {
1160 outw(112, ioaddr+LANCE_ADDR);
1161 lp->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
1163 outw(0, ioaddr+LANCE_ADDR);
1165 if (lance_debug > 1)
1166 printk("%s: Shutting down ethercard, status was %2.2x.\n",
1167 dev->name, inw(ioaddr+LANCE_DATA));
1169 /* We stop the LANCE here -- it occasionally polls
1170 memory if we don't. */
1171 outw(0x0004, ioaddr+LANCE_DATA);
1173 if (dev->dma != 4)
1175 unsigned long flags=claim_dma_lock();
1176 disable_dma(dev->dma);
1177 release_dma_lock(flags);
1179 free_irq(dev->irq, dev);
1181 lance_purge_ring(dev);
1183 return 0;
1186 static struct net_device_stats *lance_get_stats(struct net_device *dev)
1188 struct lance_private *lp = dev->priv;
1190 if (chip_table[lp->chip_version].flags & LANCE_HAS_MISSED_FRAME) {
1191 short ioaddr = dev->base_addr;
1192 short saved_addr;
1193 unsigned long flags;
1195 spin_lock_irqsave(&lp->devlock, flags);
1196 saved_addr = inw(ioaddr+LANCE_ADDR);
1197 outw(112, ioaddr+LANCE_ADDR);
1198 lp->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
1199 outw(saved_addr, ioaddr+LANCE_ADDR);
1200 spin_unlock_irqrestore(&lp->devlock, flags);
1203 return &lp->stats;
1206 /* Set or clear the multicast filter for this adaptor.
1209 static void set_multicast_list(struct net_device *dev)
1211 short ioaddr = dev->base_addr;
1213 outw(0, ioaddr+LANCE_ADDR);
1214 outw(0x0004, ioaddr+LANCE_DATA); /* Temporarily stop the lance. */
1216 if (dev->flags&IFF_PROMISC) {
1217 /* Log any net taps. */
1218 printk("%s: Promiscuous mode enabled.\n", dev->name);
1219 outw(15, ioaddr+LANCE_ADDR);
1220 outw(0x8000, ioaddr+LANCE_DATA); /* Set promiscuous mode */
1221 } else {
1222 short multicast_table[4];
1223 int i;
1224 int num_addrs=dev->mc_count;
1225 if(dev->flags&IFF_ALLMULTI)
1226 num_addrs=1;
1227 /* FIXIT: We don't use the multicast table, but rely on upper-layer filtering. */
1228 memset(multicast_table, (num_addrs == 0) ? 0 : -1, sizeof(multicast_table));
1229 for (i = 0; i < 4; i++) {
1230 outw(8 + i, ioaddr+LANCE_ADDR);
1231 outw(multicast_table[i], ioaddr+LANCE_DATA);
1233 outw(15, ioaddr+LANCE_ADDR);
1234 outw(0x0000, ioaddr+LANCE_DATA); /* Unset promiscuous mode */
1237 lance_restart(dev, 0x0142, 0); /* Resume normal operation */