Import 2.3.18pre1
[davej-history.git] / drivers / net / lance.c
blobc392d80a9bc86498d3d9a7bfd8caa8bdaf676a9c
1 /* lance.c: An AMD LANCE/PCnet ethernet driver for Linux. */
2 /*
3 Written/copyright 1993-1998 by Donald Becker.
5 Copyright 1993 United States Government as represented by the
6 Director, National Security Agency.
7 This software may be used and distributed according to the terms
8 of the GNU Public License, incorporated herein by reference.
10 This driver is for the Allied Telesis AT1500 and HP J2405A, and should work
11 with most other LANCE-based bus-master (NE2100/NE2500) ethercards.
13 The author may be reached as becker@CESDIS.gsfc.nasa.gov, or C/O
14 Center of Excellence in Space Data and Information Sciences
15 Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
17 Fixing alignment problem with 1.3.* kernel and some minor changes
18 by Andrey V. Savochkin, 1996.
20 Problems or questions may be send to Donald Becker (see above) or to
21 Andrey Savochkin -- saw@shade.msu.ru or
22 Laboratory of Computation Methods,
23 Department of Mathematics and Mechanics,
24 Moscow State University,
25 Leninskye Gory, Moscow 119899
27 But I should to inform you that I'm not an expert in the LANCE card
28 and it may occurs that you will receive no answer on your mail
29 to Donald Becker. I didn't receive any answer on all my letters
30 to him. Who knows why... But may be you are more lucky? ;->
31 SAW
33 Thomas Bogendoerfer (tsbogend@bigbug.franken.de):
34 - added support for Linux/Alpha, but removed most of it, because
35 it worked only for the PCI chip.
36 - added hook for the 32bit lance driver
37 - added PCnetPCI II (79C970A) to chip table
38 Paul Gortmaker (gpg109@rsphy1.anu.edu.au):
39 - hopefully fix above so Linux/Alpha can use ISA cards too.
40 8/20/96 Fixed 7990 autoIRQ failure and reversed unneeded alignment -djb
41 v1.12 10/27/97 Module support -djb
42 v1.14 2/3/98 Module support modified, made PCI support optional -djb
44 Forward ported v1.14 to 2.1.129, merged the PCI and misc changes from
45 the 2.1 version of the old driver - Alan Cox
48 static const char *version = "lance.c:v1.14ac 1998/11/20 dplatt@3do.com, becker@cesdis.gsfc.nasa.gov\n";
50 #include <linux/config.h>
51 #include <linux/module.h>
52 #include <linux/kernel.h>
53 #include <linux/sched.h>
54 #include <linux/string.h>
55 #include <linux/ptrace.h>
56 #include <linux/errno.h>
57 #include <linux/ioport.h>
58 #include <linux/malloc.h>
59 #include <linux/interrupt.h>
60 #include <linux/pci.h>
61 #include <linux/init.h>
62 #include <asm/bitops.h>
63 #include <asm/io.h>
64 #include <asm/dma.h>
66 #include <linux/netdevice.h>
67 #include <linux/etherdevice.h>
68 #include <linux/skbuff.h>
70 static unsigned int lance_portlist[] __initdata = { 0x300, 0x320, 0x340, 0x360, 0};
71 int lance_probe(struct net_device *dev);
72 int lance_probe1(struct net_device *dev, int ioaddr, int irq, int options);
74 #ifdef LANCE_DEBUG
75 int lance_debug = LANCE_DEBUG;
76 #else
77 int lance_debug = 1;
78 #endif
81 Theory of Operation
83 I. Board Compatibility
85 This device driver is designed for the AMD 79C960, the "PCnet-ISA
86 single-chip ethernet controller for ISA". This chip is used in a wide
87 variety of boards from vendors such as Allied Telesis, HP, Kingston,
88 and Boca. This driver is also intended to work with older AMD 7990
89 designs, such as the NE1500 and NE2100, and newer 79C961. For convenience,
90 I use the name LANCE to refer to all of the AMD chips, even though it properly
91 refers only to the original 7990.
93 II. Board-specific settings
95 The driver is designed to work the boards that use the faster
96 bus-master mode, rather than in shared memory mode. (Only older designs
97 have on-board buffer memory needed to support the slower shared memory mode.)
99 Most ISA boards have jumpered settings for the I/O base, IRQ line, and DMA
100 channel. This driver probes the likely base addresses:
101 {0x300, 0x320, 0x340, 0x360}.
102 After the board is found it generates a DMA-timeout interrupt and uses
103 autoIRQ to find the IRQ line. The DMA channel can be set with the low bits
104 of the otherwise-unused dev->mem_start value (aka PARAM1). If unset it is
105 probed for by enabling each free DMA channel in turn and checking if
106 initialization succeeds.
108 The HP-J2405A board is an exception: with this board it is easy to read the
109 EEPROM-set values for the base, IRQ, and DMA. (Of course you must already
110 _know_ the base address -- that field is for writing the EEPROM.)
112 III. Driver operation
114 IIIa. Ring buffers
115 The LANCE uses ring buffers of Tx and Rx descriptors. Each entry describes
116 the base and length of the data buffer, along with status bits. The length
117 of these buffers is set by LANCE_LOG_{RX,TX}_BUFFERS, which is log_2() of
118 the buffer length (rather than being directly the buffer length) for
119 implementation ease. The current values are 2 (Tx) and 4 (Rx), which leads to
120 ring sizes of 4 (Tx) and 16 (Rx). Increasing the number of ring entries
121 needlessly uses extra space and reduces the chance that an upper layer will
122 be able to reorder queued Tx packets based on priority. Decreasing the number
123 of entries makes it more difficult to achieve back-to-back packet transmission
124 and increases the chance that Rx ring will overflow. (Consider the worst case
125 of receiving back-to-back minimum-sized packets.)
127 The LANCE has the capability to "chain" both Rx and Tx buffers, but this driver
128 statically allocates full-sized (slightly oversized -- PKT_BUF_SZ) buffers to
129 avoid the administrative overhead. For the Rx side this avoids dynamically
130 allocating full-sized buffers "just in case", at the expense of a
131 memory-to-memory data copy for each packet received. For most systems this
132 is a good tradeoff: the Rx buffer will always be in low memory, the copy
133 is inexpensive, and it primes the cache for later packet processing. For Tx
134 the buffers are only used when needed as low-memory bounce buffers.
136 IIIB. 16M memory limitations.
137 For the ISA bus master mode all structures used directly by the LANCE,
138 the initialization block, Rx and Tx rings, and data buffers, must be
139 accessible from the ISA bus, i.e. in the lower 16M of real memory.
140 This is a problem for current Linux kernels on >16M machines. The network
141 devices are initialized after memory initialization, and the kernel doles out
142 memory from the top of memory downward. The current solution is to have a
143 special network initialization routine that's called before memory
144 initialization; this will eventually be generalized for all network devices.
145 As mentioned before, low-memory "bounce-buffers" are used when needed.
147 IIIC. Synchronization
148 The driver runs as two independent, single-threaded flows of control. One
149 is the send-packet routine, which enforces single-threaded use by the
150 dev->tbusy flag. The other thread is the interrupt handler, which is single
151 threaded by the hardware and other software.
153 The send packet thread has partial control over the Tx ring and 'dev->tbusy'
154 flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
155 queue slot is empty, it clears the tbusy flag when finished otherwise it sets
156 the 'lp->tx_full' flag.
158 The interrupt handler has exclusive control over the Rx ring and records stats
159 from the Tx ring. (The Tx-done interrupt can't be selectively turned off, so
160 we can't avoid the interrupt overhead by having the Tx routine reap the Tx
161 stats.) After reaping the stats, it marks the queue entry as empty by setting
162 the 'base' to zero. Iff the 'lp->tx_full' flag is set, it clears both the
163 tx_full and tbusy flags.
167 /* Set the number of Tx and Rx buffers, using Log_2(# buffers).
168 Reasonable default values are 16 Tx buffers, and 16 Rx buffers.
169 That translates to 4 and 4 (16 == 2^^4).
170 This is a compile-time option for efficiency.
172 #ifndef LANCE_LOG_TX_BUFFERS
173 #define LANCE_LOG_TX_BUFFERS 4
174 #define LANCE_LOG_RX_BUFFERS 4
175 #endif
177 #define TX_RING_SIZE (1 << (LANCE_LOG_TX_BUFFERS))
178 #define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
179 #define TX_RING_LEN_BITS ((LANCE_LOG_TX_BUFFERS) << 29)
181 #define RX_RING_SIZE (1 << (LANCE_LOG_RX_BUFFERS))
182 #define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
183 #define RX_RING_LEN_BITS ((LANCE_LOG_RX_BUFFERS) << 29)
185 #define PKT_BUF_SZ 1544
187 /* Offsets from base I/O address. */
188 #define LANCE_DATA 0x10
189 #define LANCE_ADDR 0x12
190 #define LANCE_RESET 0x14
191 #define LANCE_BUS_IF 0x16
192 #define LANCE_TOTAL_SIZE 0x18
194 /* The LANCE Rx and Tx ring descriptors. */
195 struct lance_rx_head {
196 s32 base;
197 s16 buf_length; /* This length is 2s complement (negative)! */
198 s16 msg_length; /* This length is "normal". */
201 struct lance_tx_head {
202 s32 base;
203 s16 length; /* Length is 2s complement (negative)! */
204 s16 misc;
207 /* The LANCE initialization block, described in databook. */
208 struct lance_init_block {
209 u16 mode; /* Pre-set mode (reg. 15) */
210 u8 phys_addr[6]; /* Physical ethernet address */
211 u32 filter[2]; /* Multicast filter (unused). */
212 /* Receive and transmit ring base, along with extra bits. */
213 u32 rx_ring; /* Tx and Rx ring base pointers */
214 u32 tx_ring;
217 struct lance_private {
218 /* The Tx and Rx ring entries must be aligned on 8-byte boundaries. */
219 struct lance_rx_head rx_ring[RX_RING_SIZE];
220 struct lance_tx_head tx_ring[TX_RING_SIZE];
221 struct lance_init_block init_block;
222 const char *name;
223 /* The saved address of a sent-in-place packet/buffer, for skfree(). */
224 struct sk_buff* tx_skbuff[TX_RING_SIZE];
225 /* The addresses of receive-in-place skbuffs. */
226 struct sk_buff* rx_skbuff[RX_RING_SIZE];
227 unsigned long rx_buffs; /* Address of Rx and Tx buffers. */
228 /* Tx low-memory "bounce buffer" address. */
229 char (*tx_bounce_buffs)[PKT_BUF_SZ];
230 int cur_rx, cur_tx; /* The next free ring entry */
231 int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */
232 int dma;
233 struct net_device_stats stats;
234 unsigned char chip_version; /* See lance_chip_type. */
235 char tx_full;
236 unsigned long lock;
239 #define LANCE_MUST_PAD 0x00000001
240 #define LANCE_ENABLE_AUTOSELECT 0x00000002
241 #define LANCE_MUST_REINIT_RING 0x00000004
242 #define LANCE_MUST_UNRESET 0x00000008
243 #define LANCE_HAS_MISSED_FRAME 0x00000010
245 /* A mapping from the chip ID number to the part number and features.
246 These are from the datasheets -- in real life the '970 version
247 reportedly has the same ID as the '965. */
248 static struct lance_chip_type {
249 int id_number;
250 const char *name;
251 int flags;
252 } chip_table[] = {
253 {0x0000, "LANCE 7990", /* Ancient lance chip. */
254 LANCE_MUST_PAD + LANCE_MUST_UNRESET},
255 {0x0003, "PCnet/ISA 79C960", /* 79C960 PCnet/ISA. */
256 LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
257 LANCE_HAS_MISSED_FRAME},
258 {0x2260, "PCnet/ISA+ 79C961", /* 79C961 PCnet/ISA+, Plug-n-Play. */
259 LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
260 LANCE_HAS_MISSED_FRAME},
261 {0x2420, "PCnet/PCI 79C970", /* 79C970 or 79C974 PCnet-SCSI, PCI. */
262 LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
263 LANCE_HAS_MISSED_FRAME},
264 /* Bug: the PCnet/PCI actually uses the PCnet/VLB ID number, so just call
265 it the PCnet32. */
266 {0x2430, "PCnet32", /* 79C965 PCnet for VL bus. */
267 LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
268 LANCE_HAS_MISSED_FRAME},
269 {0x2621, "PCnet/PCI-II 79C970A", /* 79C970A PCInetPCI II. */
270 LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
271 LANCE_HAS_MISSED_FRAME},
272 {0x0, "PCnet (unknown)",
273 LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
274 LANCE_HAS_MISSED_FRAME},
277 enum {OLD_LANCE = 0, PCNET_ISA=1, PCNET_ISAP=2, PCNET_PCI=3, PCNET_VLB=4, PCNET_PCI_II=5, LANCE_UNKNOWN=6};
279 /* Non-zero only if the current card is a PCI with BIOS-set IRQ. */
280 static unsigned int pci_irq_line = 0;
282 /* Non-zero if lance_probe1() needs to allocate low-memory bounce buffers.
283 Assume yes until we know the memory size. */
284 static unsigned char lance_need_isa_bounce_buffers = 1;
286 static int lance_open(struct net_device *dev);
287 static int lance_open_fail(struct net_device *dev);
288 static void lance_init_ring(struct net_device *dev, int mode);
289 static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev);
290 static int lance_rx(struct net_device *dev);
291 static void lance_interrupt(int irq, void *dev_id, struct pt_regs *regs);
292 static int lance_close(struct net_device *dev);
293 static struct net_device_stats *lance_get_stats(struct net_device *dev);
294 static void set_multicast_list(struct net_device *dev);
298 #ifdef MODULE
299 #define MAX_CARDS 8 /* Max number of interfaces (cards) per module */
300 #define IF_NAMELEN 8 /* # of chars for storing dev->name */
302 static int io[MAX_CARDS] = { 0, };
303 static int dma[MAX_CARDS] = { 0, };
304 static int irq[MAX_CARDS] = { 0, };
306 MODULE_PARM(io, "1-" __MODULE_STRING(MAX_CARDS) "i");
307 MODULE_PARM(dma, "1-" __MODULE_STRING(MAX_CARDS) "i");
308 MODULE_PARM(irq, "1-" __MODULE_STRING(MAX_CARDS) "i");
310 static char ifnames[MAX_CARDS][IF_NAMELEN] = { {0, }, };
311 static struct net_device dev_lance[MAX_CARDS] =
313 0, /* device name is inserted by linux/drivers/net/net_init.c */
314 0, 0, 0, 0,
315 0, 0,
316 0, 0, 0, NULL, NULL}};
318 int init_module(void)
320 int this_dev, found = 0;
322 for (this_dev = 0; this_dev < MAX_CARDS; this_dev++) {
323 struct net_device *dev = &dev_lance[this_dev];
324 dev->name = ifnames[this_dev];
325 dev->irq = irq[this_dev];
326 dev->base_addr = io[this_dev];
327 dev->dma = dma[this_dev];
328 dev->init = lance_probe;
329 if (io[this_dev] == 0) {
330 if (this_dev != 0) break; /* only complain once */
331 printk(KERN_NOTICE "lance.c: Module autoprobing not allowed. Append \"io=0xNNN\" value(s).\n");
332 return -EPERM;
334 if (register_netdev(dev) != 0) {
335 printk(KERN_WARNING "lance.c: No PCnet/LANCE card found (i/o = 0x%x).\n", io[this_dev]);
336 if (found != 0) return 0; /* Got at least one. */
337 return -ENXIO;
339 found++;
342 return 0;
345 void cleanup_module(void)
347 int this_dev;
349 for (this_dev = 0; this_dev < MAX_CARDS; this_dev++) {
350 struct net_device *dev = &dev_lance[this_dev];
351 if (dev->priv != NULL) {
352 kfree(dev->priv);
353 dev->priv = NULL;
354 free_dma(dev->dma);
355 release_region(dev->base_addr, LANCE_TOTAL_SIZE);
356 unregister_netdev(dev);
360 #endif /* MODULE */
362 /* Starting in v2.1.*, the LANCE/PCnet probe is now similar to the other
363 board probes now that kmalloc() can allocate ISA DMA-able regions.
364 This also allows the LANCE driver to be used as a module.
366 int lance_probe(struct net_device *dev)
368 int *port, result;
370 if (high_memory <= phys_to_virt(16*1024*1024))
371 lance_need_isa_bounce_buffers = 0;
373 #if defined(CONFIG_PCI)
374 if (pci_present())
376 struct pci_dev *pdev = NULL;
377 if (lance_debug > 1)
378 printk("lance.c: PCI bios is present, checking for devices...\n");
380 while ((pdev = pci_find_device(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE, pdev))) {
381 unsigned int pci_ioaddr;
382 unsigned short pci_command;
384 pci_irq_line = pdev->irq;
385 pci_ioaddr = pdev->resource[0].start;
386 /* PCI Spec 2.1 states that it is either the driver or PCI card's
387 * responsibility to set the PCI Master Enable Bit if needed.
388 * (From Mark Stockton <marks@schooner.sys.hou.compaq.com>)
390 pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
391 if ( ! (pci_command & PCI_COMMAND_MASTER)) {
392 printk("PCI Master Bit has not been set. Setting...\n");
393 pci_command |= PCI_COMMAND_MASTER;
394 pci_write_config_word(pdev, PCI_COMMAND, pci_command);
396 printk("Found PCnet/PCI at %#x, irq %d.\n",
397 pci_ioaddr, pci_irq_line);
398 result = lance_probe1(dev, pci_ioaddr, pci_irq_line, 0);
399 pci_irq_line = 0;
400 if (!result) return 0;
403 #endif /* defined(CONFIG_PCI) */
405 for (port = lance_portlist; *port; port++) {
406 int ioaddr = *port;
408 if ( check_region(ioaddr, LANCE_TOTAL_SIZE) == 0) {
409 /* Detect "normal" 0x57 0x57 and the NI6510EB 0x52 0x44
410 signatures w/ minimal I/O reads */
411 char offset15, offset14 = inb(ioaddr + 14);
413 if ((offset14 == 0x52 || offset14 == 0x57) &&
414 ((offset15 = inb(ioaddr + 15)) == 0x57 || offset15 == 0x44)) {
415 result = lance_probe1(dev, ioaddr, 0, 0);
416 if ( !result ) return 0;
420 return -ENODEV;
423 int __init lance_probe1(struct net_device *dev, int ioaddr, int irq, int options)
425 struct lance_private *lp;
426 short dma_channels; /* Mark spuriously-busy DMA channels */
427 int i, reset_val, lance_version;
428 const char *chipname;
429 /* Flags for specific chips or boards. */
430 unsigned char hpJ2405A = 0; /* HP ISA adaptor */
431 int hp_builtin = 0; /* HP on-board ethernet. */
432 static int did_version = 0; /* Already printed version info. */
433 unsigned long flags;
435 /* First we look for special cases.
436 Check for HP's on-board ethernet by looking for 'HP' in the BIOS.
437 There are two HP versions, check the BIOS for the configuration port.
438 This method provided by L. Julliard, Laurent_Julliard@grenoble.hp.com.
440 if (readw(0x000f0102) == 0x5048) {
441 static const short ioaddr_table[] = { 0x300, 0x320, 0x340, 0x360};
442 int hp_port = (readl(0x000f00f1) & 1) ? 0x499 : 0x99;
443 /* We can have boards other than the built-in! Verify this is on-board. */
444 if ((inb(hp_port) & 0xc0) == 0x80
445 && ioaddr_table[inb(hp_port) & 3] == ioaddr)
446 hp_builtin = hp_port;
448 /* We also recognize the HP Vectra on-board here, but check below. */
449 hpJ2405A = (inb(ioaddr) == 0x08 && inb(ioaddr+1) == 0x00
450 && inb(ioaddr+2) == 0x09);
452 /* Reset the LANCE. */
453 reset_val = inw(ioaddr+LANCE_RESET); /* Reset the LANCE */
455 /* The Un-Reset needed is only needed for the real NE2100, and will
456 confuse the HP board. */
457 if (!hpJ2405A)
458 outw(reset_val, ioaddr+LANCE_RESET);
460 outw(0x0000, ioaddr+LANCE_ADDR); /* Switch to window 0 */
461 if (inw(ioaddr+LANCE_DATA) != 0x0004)
462 return -ENODEV;
464 /* Get the version of the chip. */
465 outw(88, ioaddr+LANCE_ADDR);
466 if (inw(ioaddr+LANCE_ADDR) != 88) {
467 lance_version = 0;
468 } else { /* Good, it's a newer chip. */
469 int chip_version = inw(ioaddr+LANCE_DATA);
470 outw(89, ioaddr+LANCE_ADDR);
471 chip_version |= inw(ioaddr+LANCE_DATA) << 16;
472 if (lance_debug > 2)
473 printk(" LANCE chip version is %#x.\n", chip_version);
474 if ((chip_version & 0xfff) != 0x003)
475 return -ENODEV;
476 chip_version = (chip_version >> 12) & 0xffff;
477 for (lance_version = 1; chip_table[lance_version].id_number; lance_version++) {
478 if (chip_table[lance_version].id_number == chip_version)
479 break;
483 /* We can't use init_etherdev() to allocate dev->priv because it must
484 a ISA DMA-able region. */
485 dev = init_etherdev(dev, 0);
486 dev->open = lance_open_fail;
487 chipname = chip_table[lance_version].name;
488 printk("%s: %s at %#3x,", dev->name, chipname, ioaddr);
490 /* There is a 16 byte station address PROM at the base address.
491 The first six bytes are the station address. */
492 for (i = 0; i < 6; i++)
493 printk(" %2.2x", dev->dev_addr[i] = inb(ioaddr + i));
495 dev->base_addr = ioaddr;
496 request_region(ioaddr, LANCE_TOTAL_SIZE, chip_table[lance_version].name);
498 /* Make certain the data structures used by the LANCE are aligned and DMAble. */
500 lp = (struct lance_private *)(((unsigned long)kmalloc(sizeof(*lp)+7,
501 GFP_DMA | GFP_KERNEL)+7) & ~7);
502 if(lp==NULL)
503 return -ENODEV;
504 if (lance_debug > 6) printk(" (#0x%05lx)", (unsigned long)lp);
505 memset(lp, 0, sizeof(*lp));
506 dev->priv = lp;
507 lp->name = chipname;
508 lp->rx_buffs = (unsigned long)kmalloc(PKT_BUF_SZ*RX_RING_SIZE,
509 GFP_DMA | GFP_KERNEL);
510 if (lance_need_isa_bounce_buffers)
511 lp->tx_bounce_buffs = kmalloc(PKT_BUF_SZ*TX_RING_SIZE,
512 GFP_DMA | GFP_KERNEL);
513 else
514 lp->tx_bounce_buffs = NULL;
516 lp->chip_version = lance_version;
518 lp->init_block.mode = 0x0003; /* Disable Rx and Tx. */
519 for (i = 0; i < 6; i++)
520 lp->init_block.phys_addr[i] = dev->dev_addr[i];
521 lp->init_block.filter[0] = 0x00000000;
522 lp->init_block.filter[1] = 0x00000000;
523 lp->init_block.rx_ring = ((u32)virt_to_bus(lp->rx_ring) & 0xffffff) | RX_RING_LEN_BITS;
524 lp->init_block.tx_ring = ((u32)virt_to_bus(lp->tx_ring) & 0xffffff) | TX_RING_LEN_BITS;
526 outw(0x0001, ioaddr+LANCE_ADDR);
527 inw(ioaddr+LANCE_ADDR);
528 outw((short) (u32) virt_to_bus(&lp->init_block), ioaddr+LANCE_DATA);
529 outw(0x0002, ioaddr+LANCE_ADDR);
530 inw(ioaddr+LANCE_ADDR);
531 outw(((u32)virt_to_bus(&lp->init_block)) >> 16, ioaddr+LANCE_DATA);
532 outw(0x0000, ioaddr+LANCE_ADDR);
533 inw(ioaddr+LANCE_ADDR);
535 if (irq) { /* Set iff PCI card. */
536 dev->dma = 4; /* Native bus-master, no DMA channel needed. */
537 dev->irq = irq;
538 } else if (hp_builtin) {
539 static const char dma_tbl[4] = {3, 5, 6, 0};
540 static const char irq_tbl[4] = {3, 4, 5, 9};
541 unsigned char port_val = inb(hp_builtin);
542 dev->dma = dma_tbl[(port_val >> 4) & 3];
543 dev->irq = irq_tbl[(port_val >> 2) & 3];
544 printk(" HP Vectra IRQ %d DMA %d.\n", dev->irq, dev->dma);
545 } else if (hpJ2405A) {
546 static const char dma_tbl[4] = {3, 5, 6, 7};
547 static const char irq_tbl[8] = {3, 4, 5, 9, 10, 11, 12, 15};
548 short reset_val = inw(ioaddr+LANCE_RESET);
549 dev->dma = dma_tbl[(reset_val >> 2) & 3];
550 dev->irq = irq_tbl[(reset_val >> 4) & 7];
551 printk(" HP J2405A IRQ %d DMA %d.\n", dev->irq, dev->dma);
552 } else if (lance_version == PCNET_ISAP) { /* The plug-n-play version. */
553 short bus_info;
554 outw(8, ioaddr+LANCE_ADDR);
555 bus_info = inw(ioaddr+LANCE_BUS_IF);
556 dev->dma = bus_info & 0x07;
557 dev->irq = (bus_info >> 4) & 0x0F;
558 } else {
559 /* The DMA channel may be passed in PARAM1. */
560 if (dev->mem_start & 0x07)
561 dev->dma = dev->mem_start & 0x07;
564 if (dev->dma == 0) {
565 /* Read the DMA channel status register, so that we can avoid
566 stuck DMA channels in the DMA detection below. */
567 dma_channels = ((inb(DMA1_STAT_REG) >> 4) & 0x0f) |
568 (inb(DMA2_STAT_REG) & 0xf0);
570 if (dev->irq >= 2)
571 printk(" assigned IRQ %d", dev->irq);
572 else if (lance_version != 0) { /* 7990 boards need DMA detection first. */
573 /* To auto-IRQ we enable the initialization-done and DMA error
574 interrupts. For ISA boards we get a DMA error, but VLB and PCI
575 boards will work. */
576 autoirq_setup(0);
578 /* Trigger an initialization just for the interrupt. */
579 outw(0x0041, ioaddr+LANCE_DATA);
581 dev->irq = autoirq_report(2);
582 if (dev->irq)
583 printk(", probed IRQ %d", dev->irq);
584 else {
585 printk(", failed to detect IRQ line.\n");
586 return -ENODEV;
589 /* Check for the initialization done bit, 0x0100, which means
590 that we don't need a DMA channel. */
591 if (inw(ioaddr+LANCE_DATA) & 0x0100)
592 dev->dma = 4;
595 if (dev->dma == 4) {
596 printk(", no DMA needed.\n");
597 } else if (dev->dma) {
598 if (request_dma(dev->dma, chipname)) {
599 printk("DMA %d allocation failed.\n", dev->dma);
600 return -ENODEV;
601 } else
602 printk(", assigned DMA %d.\n", dev->dma);
603 } else { /* OK, we have to auto-DMA. */
604 for (i = 0; i < 4; i++) {
605 static const char dmas[] = { 5, 6, 7, 3 };
606 int dma = dmas[i];
607 int boguscnt;
609 /* Don't enable a permanently busy DMA channel, or the machine
610 will hang. */
611 if (test_bit(dma, &dma_channels))
612 continue;
613 outw(0x7f04, ioaddr+LANCE_DATA); /* Clear the memory error bits. */
614 if (request_dma(dma, chipname))
615 continue;
617 flags=claim_dma_lock();
618 set_dma_mode(dma, DMA_MODE_CASCADE);
619 enable_dma(dma);
620 release_dma_lock(flags);
622 /* Trigger an initialization. */
623 outw(0x0001, ioaddr+LANCE_DATA);
624 for (boguscnt = 100; boguscnt > 0; --boguscnt)
625 if (inw(ioaddr+LANCE_DATA) & 0x0900)
626 break;
627 if (inw(ioaddr+LANCE_DATA) & 0x0100) {
628 dev->dma = dma;
629 printk(", DMA %d.\n", dev->dma);
630 break;
631 } else {
632 flags=claim_dma_lock();
633 disable_dma(dma);
634 release_dma_lock(flags);
635 free_dma(dma);
638 if (i == 4) { /* Failure: bail. */
639 printk("DMA detection failed.\n");
640 return -ENODEV;
644 if (lance_version == 0 && dev->irq == 0) {
645 /* We may auto-IRQ now that we have a DMA channel. */
646 /* Trigger an initialization just for the interrupt. */
647 autoirq_setup(0);
648 outw(0x0041, ioaddr+LANCE_DATA);
650 dev->irq = autoirq_report(4);
651 if (dev->irq == 0) {
652 printk(" Failed to detect the 7990 IRQ line.\n");
653 return -ENODEV;
655 printk(" Auto-IRQ detected IRQ%d.\n", dev->irq);
658 if (chip_table[lp->chip_version].flags & LANCE_ENABLE_AUTOSELECT) {
659 /* Turn on auto-select of media (10baseT or BNC) so that the user
660 can watch the LEDs even if the board isn't opened. */
661 outw(0x0002, ioaddr+LANCE_ADDR);
662 /* Don't touch 10base2 power bit. */
663 outw(inw(ioaddr+LANCE_BUS_IF) | 0x0002, ioaddr+LANCE_BUS_IF);
666 if (lance_debug > 0 && did_version++ == 0)
667 printk(version);
669 /* The LANCE-specific entries in the device structure. */
670 dev->open = lance_open;
671 dev->hard_start_xmit = lance_start_xmit;
672 dev->stop = lance_close;
673 dev->get_stats = lance_get_stats;
674 dev->set_multicast_list = set_multicast_list;
676 return 0;
679 static int
680 lance_open_fail(struct net_device *dev)
682 return -ENODEV;
687 static int
688 lance_open(struct net_device *dev)
690 struct lance_private *lp = (struct lance_private *)dev->priv;
691 int ioaddr = dev->base_addr;
692 int i;
694 if (dev->irq == 0 ||
695 request_irq(dev->irq, &lance_interrupt, 0, lp->name, dev)) {
696 return -EAGAIN;
699 MOD_INC_USE_COUNT;
701 /* We used to allocate DMA here, but that was silly.
702 DMA lines can't be shared! We now permanently allocate them. */
704 /* Reset the LANCE */
705 inw(ioaddr+LANCE_RESET);
707 /* The DMA controller is used as a no-operation slave, "cascade mode". */
708 if (dev->dma != 4) {
709 unsigned long flags=claim_dma_lock();
710 enable_dma(dev->dma);
711 set_dma_mode(dev->dma, DMA_MODE_CASCADE);
712 release_dma_lock(flags);
715 /* Un-Reset the LANCE, needed only for the NE2100. */
716 if (chip_table[lp->chip_version].flags & LANCE_MUST_UNRESET)
717 outw(0, ioaddr+LANCE_RESET);
719 if (chip_table[lp->chip_version].flags & LANCE_ENABLE_AUTOSELECT) {
720 /* This is 79C960-specific: Turn on auto-select of media (AUI, BNC). */
721 outw(0x0002, ioaddr+LANCE_ADDR);
722 /* Only touch autoselect bit. */
723 outw(inw(ioaddr+LANCE_BUS_IF) | 0x0002, ioaddr+LANCE_BUS_IF);
726 if (lance_debug > 1)
727 printk("%s: lance_open() irq %d dma %d tx/rx rings %#x/%#x init %#x.\n",
728 dev->name, dev->irq, dev->dma,
729 (u32) virt_to_bus(lp->tx_ring),
730 (u32) virt_to_bus(lp->rx_ring),
731 (u32) virt_to_bus(&lp->init_block));
733 lance_init_ring(dev, GFP_KERNEL);
734 /* Re-initialize the LANCE, and start it when done. */
735 outw(0x0001, ioaddr+LANCE_ADDR);
736 outw((short) (u32) virt_to_bus(&lp->init_block), ioaddr+LANCE_DATA);
737 outw(0x0002, ioaddr+LANCE_ADDR);
738 outw(((u32)virt_to_bus(&lp->init_block)) >> 16, ioaddr+LANCE_DATA);
740 outw(0x0004, ioaddr+LANCE_ADDR);
741 outw(0x0915, ioaddr+LANCE_DATA);
743 outw(0x0000, ioaddr+LANCE_ADDR);
744 outw(0x0001, ioaddr+LANCE_DATA);
746 dev->tbusy = 0;
747 dev->interrupt = 0;
748 dev->start = 1;
749 i = 0;
750 while (i++ < 100)
751 if (inw(ioaddr+LANCE_DATA) & 0x0100)
752 break;
754 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
755 * reports that doing so triggers a bug in the '974.
757 outw(0x0042, ioaddr+LANCE_DATA);
759 if (lance_debug > 2)
760 printk("%s: LANCE open after %d ticks, init block %#x csr0 %4.4x.\n",
761 dev->name, i, (u32) virt_to_bus(&lp->init_block), inw(ioaddr+LANCE_DATA));
763 return 0; /* Always succeed */
766 /* The LANCE has been halted for one reason or another (busmaster memory
767 arbitration error, Tx FIFO underflow, driver stopped it to reconfigure,
768 etc.). Modern LANCE variants always reload their ring-buffer
769 configuration when restarted, so we must reinitialize our ring
770 context before restarting. As part of this reinitialization,
771 find all packets still on the Tx ring and pretend that they had been
772 sent (in effect, drop the packets on the floor) - the higher-level
773 protocols will time out and retransmit. It'd be better to shuffle
774 these skbs to a temp list and then actually re-Tx them after
775 restarting the chip, but I'm too lazy to do so right now. dplatt@3do.com
778 static void
779 lance_purge_tx_ring(struct net_device *dev)
781 struct lance_private *lp = (struct lance_private *)dev->priv;
782 int i;
784 for (i = 0; i < TX_RING_SIZE; i++) {
785 if (lp->tx_skbuff[i]) {
786 dev_kfree_skb(lp->tx_skbuff[i]);
787 lp->tx_skbuff[i] = NULL;
793 /* Initialize the LANCE Rx and Tx rings. */
794 static void
795 lance_init_ring(struct net_device *dev, int gfp)
797 struct lance_private *lp = (struct lance_private *)dev->priv;
798 int i;
800 lp->lock = 0, lp->tx_full = 0;
801 lp->cur_rx = lp->cur_tx = 0;
802 lp->dirty_rx = lp->dirty_tx = 0;
804 for (i = 0; i < RX_RING_SIZE; i++) {
805 struct sk_buff *skb;
806 void *rx_buff;
808 skb = alloc_skb(PKT_BUF_SZ, GFP_DMA | gfp);
809 lp->rx_skbuff[i] = skb;
810 if (skb) {
811 skb->dev = dev;
812 rx_buff = skb->tail;
813 } else
814 rx_buff = kmalloc(PKT_BUF_SZ, GFP_DMA | gfp);
815 if (rx_buff == NULL)
816 lp->rx_ring[i].base = 0;
817 else
818 lp->rx_ring[i].base = (u32)virt_to_bus(rx_buff) | 0x80000000;
819 lp->rx_ring[i].buf_length = -PKT_BUF_SZ;
821 /* The Tx buffer address is filled in as needed, but we do need to clear
822 the upper ownership bit. */
823 for (i = 0; i < TX_RING_SIZE; i++) {
824 lp->tx_skbuff[i] = 0;
825 lp->tx_ring[i].base = 0;
828 lp->init_block.mode = 0x0000;
829 for (i = 0; i < 6; i++)
830 lp->init_block.phys_addr[i] = dev->dev_addr[i];
831 lp->init_block.filter[0] = 0x00000000;
832 lp->init_block.filter[1] = 0x00000000;
833 lp->init_block.rx_ring = ((u32)virt_to_bus(lp->rx_ring) & 0xffffff) | RX_RING_LEN_BITS;
834 lp->init_block.tx_ring = ((u32)virt_to_bus(lp->tx_ring) & 0xffffff) | TX_RING_LEN_BITS;
837 static void
838 lance_restart(struct net_device *dev, unsigned int csr0_bits, int must_reinit)
840 struct lance_private *lp = (struct lance_private *)dev->priv;
842 if (must_reinit ||
843 (chip_table[lp->chip_version].flags & LANCE_MUST_REINIT_RING)) {
844 lance_purge_tx_ring(dev);
845 lance_init_ring(dev, GFP_ATOMIC);
847 outw(0x0000, dev->base_addr + LANCE_ADDR);
848 outw(csr0_bits, dev->base_addr + LANCE_DATA);
851 static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
853 struct lance_private *lp = (struct lance_private *)dev->priv;
854 int ioaddr = dev->base_addr;
855 int entry;
856 unsigned long flags;
858 /* Transmitter timeout, serious problems. */
859 if (dev->tbusy) {
860 int tickssofar = jiffies - dev->trans_start;
861 if (tickssofar < 20)
862 return 1;
863 outw(0, ioaddr+LANCE_ADDR);
864 printk("%s: transmit timed out, status %4.4x, resetting.\n",
865 dev->name, inw(ioaddr+LANCE_DATA));
866 outw(0x0004, ioaddr+LANCE_DATA);
867 lp->stats.tx_errors++;
868 #ifndef final_version
870 int i;
871 printk(" Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.",
872 lp->dirty_tx, lp->cur_tx, lp->tx_full ? " (full)" : "",
873 lp->cur_rx);
874 for (i = 0 ; i < RX_RING_SIZE; i++)
875 printk("%s %08x %04x %04x", i & 0x3 ? "" : "\n ",
876 lp->rx_ring[i].base, -lp->rx_ring[i].buf_length,
877 lp->rx_ring[i].msg_length);
878 for (i = 0 ; i < TX_RING_SIZE; i++)
879 printk("%s %08x %04x %04x", i & 0x3 ? "" : "\n ",
880 lp->tx_ring[i].base, -lp->tx_ring[i].length,
881 lp->tx_ring[i].misc);
882 printk("\n");
884 #endif
885 lance_restart(dev, 0x0043, 1);
887 dev->tbusy=0;
888 dev->trans_start = jiffies;
890 return 0;
893 if (lance_debug > 3) {
894 outw(0x0000, ioaddr+LANCE_ADDR);
895 printk("%s: lance_start_xmit() called, csr0 %4.4x.\n", dev->name,
896 inw(ioaddr+LANCE_DATA));
897 outw(0x0000, ioaddr+LANCE_DATA);
900 /* Block a timer-based transmit from overlapping. This could better be
901 done with atomic_swap(1, dev->tbusy), but set_bit() works as well. */
902 if (test_and_set_bit(0, (void*)&dev->tbusy) != 0) {
903 printk("%s: Transmitter access conflict.\n", dev->name);
904 return 1;
907 if (test_and_set_bit(0, (void*)&lp->lock) != 0) {
908 if (lance_debug > 0)
909 printk("%s: tx queue lock!.\n", dev->name);
910 /* don't clear dev->tbusy flag. */
911 return 1;
914 /* Fill in a Tx ring entry */
916 /* Mask to ring buffer boundary. */
917 entry = lp->cur_tx & TX_RING_MOD_MASK;
919 /* Caution: the write order is important here, set the base address
920 with the "ownership" bits last. */
922 /* The old LANCE chips doesn't automatically pad buffers to min. size. */
923 if (chip_table[lp->chip_version].flags & LANCE_MUST_PAD) {
924 lp->tx_ring[entry].length =
925 -(ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN);
926 } else
927 lp->tx_ring[entry].length = -skb->len;
929 lp->tx_ring[entry].misc = 0x0000;
931 /* If any part of this buffer is >16M we must copy it to a low-memory
932 buffer. */
933 if ((u32)virt_to_bus(skb->data) + skb->len > 0x01000000) {
934 if (lance_debug > 5)
935 printk("%s: bouncing a high-memory packet (%#x).\n",
936 dev->name, (u32)virt_to_bus(skb->data));
937 memcpy(&lp->tx_bounce_buffs[entry], skb->data, skb->len);
938 lp->tx_ring[entry].base =
939 ((u32)virt_to_bus((lp->tx_bounce_buffs + entry)) & 0xffffff) | 0x83000000;
940 dev_kfree_skb(skb);
941 } else {
942 lp->tx_skbuff[entry] = skb;
943 lp->tx_ring[entry].base = ((u32)virt_to_bus(skb->data) & 0xffffff) | 0x83000000;
945 lp->cur_tx++;
946 lp->stats.tx_bytes += skb->len;
948 /* Trigger an immediate send poll. */
949 outw(0x0000, ioaddr+LANCE_ADDR);
950 outw(0x0048, ioaddr+LANCE_DATA);
952 dev->trans_start = jiffies;
954 save_flags(flags);
955 cli();
956 lp->lock = 0;
957 if (lp->tx_ring[(entry+1) & TX_RING_MOD_MASK].base == 0)
958 dev->tbusy=0;
959 else
960 lp->tx_full = 1;
961 restore_flags(flags);
963 return 0;
966 /* The LANCE interrupt handler. */
967 static void
968 lance_interrupt(int irq, void *dev_id, struct pt_regs * regs)
970 struct net_device *dev = dev_id;
971 struct lance_private *lp;
972 int csr0, ioaddr, boguscnt=10;
973 int must_restart;
975 if (dev == NULL) {
976 printk ("lance_interrupt(): irq %d for unknown device.\n", irq);
977 return;
980 ioaddr = dev->base_addr;
981 lp = (struct lance_private *)dev->priv;
982 if (dev->interrupt)
983 printk(KERN_WARNING "%s: Re-entering the interrupt handler.\n", dev->name);
985 dev->interrupt = 1;
987 outw(0x00, dev->base_addr + LANCE_ADDR);
988 while ((csr0 = inw(dev->base_addr + LANCE_DATA)) & 0x8600
989 && --boguscnt >= 0) {
990 /* Acknowledge all of the current interrupt sources ASAP. */
991 outw(csr0 & ~0x004f, dev->base_addr + LANCE_DATA);
993 must_restart = 0;
995 if (lance_debug > 5)
996 printk("%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n",
997 dev->name, csr0, inw(dev->base_addr + LANCE_DATA));
999 if (csr0 & 0x0400) /* Rx interrupt */
1000 lance_rx(dev);
1002 if (csr0 & 0x0200) { /* Tx-done interrupt */
1003 int dirty_tx = lp->dirty_tx;
1005 while (dirty_tx < lp->cur_tx) {
1006 int entry = dirty_tx & TX_RING_MOD_MASK;
1007 int status = lp->tx_ring[entry].base;
1009 if (status < 0)
1010 break; /* It still hasn't been Txed */
1012 lp->tx_ring[entry].base = 0;
1014 if (status & 0x40000000) {
1015 /* There was an major error, log it. */
1016 int err_status = lp->tx_ring[entry].misc;
1017 lp->stats.tx_errors++;
1018 if (err_status & 0x0400) lp->stats.tx_aborted_errors++;
1019 if (err_status & 0x0800) lp->stats.tx_carrier_errors++;
1020 if (err_status & 0x1000) lp->stats.tx_window_errors++;
1021 if (err_status & 0x4000) {
1022 /* Ackk! On FIFO errors the Tx unit is turned off! */
1023 lp->stats.tx_fifo_errors++;
1024 /* Remove this verbosity later! */
1025 printk("%s: Tx FIFO error! Status %4.4x.\n",
1026 dev->name, csr0);
1027 /* Restart the chip. */
1028 must_restart = 1;
1030 } else {
1031 if (status & 0x18000000)
1032 lp->stats.collisions++;
1033 lp->stats.tx_packets++;
1036 /* We must free the original skb if it's not a data-only copy
1037 in the bounce buffer. */
1038 if (lp->tx_skbuff[entry]) {
1039 dev_kfree_skb(lp->tx_skbuff[entry]);
1040 lp->tx_skbuff[entry] = 0;
1042 dirty_tx++;
1045 #ifndef final_version
1046 if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) {
1047 printk("out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
1048 dirty_tx, lp->cur_tx, lp->tx_full);
1049 dirty_tx += TX_RING_SIZE;
1051 #endif
1053 if (lp->tx_full && dev->tbusy
1054 && dirty_tx > lp->cur_tx - TX_RING_SIZE + 2) {
1055 /* The ring is no longer full, clear tbusy. */
1056 lp->tx_full = 0;
1057 dev->tbusy = 0;
1058 mark_bh(NET_BH);
1061 lp->dirty_tx = dirty_tx;
1064 /* Log misc errors. */
1065 if (csr0 & 0x4000) lp->stats.tx_errors++; /* Tx babble. */
1066 if (csr0 & 0x1000) lp->stats.rx_errors++; /* Missed a Rx frame. */
1067 if (csr0 & 0x0800) {
1068 printk("%s: Bus master arbitration failure, status %4.4x.\n",
1069 dev->name, csr0);
1070 /* Restart the chip. */
1071 must_restart = 1;
1074 if (must_restart) {
1075 /* stop the chip to clear the error condition, then restart */
1076 outw(0x0000, dev->base_addr + LANCE_ADDR);
1077 outw(0x0004, dev->base_addr + LANCE_DATA);
1078 lance_restart(dev, 0x0002, 0);
1082 /* Clear any other interrupt, and set interrupt enable. */
1083 outw(0x0000, dev->base_addr + LANCE_ADDR);
1084 outw(0x7940, dev->base_addr + LANCE_DATA);
1086 if (lance_debug > 4)
1087 printk("%s: exiting interrupt, csr%d=%#4.4x.\n",
1088 dev->name, inw(ioaddr + LANCE_ADDR),
1089 inw(dev->base_addr + LANCE_DATA));
1091 dev->interrupt = 0;
1092 return;
1095 static int
1096 lance_rx(struct net_device *dev)
1098 struct lance_private *lp = (struct lance_private *)dev->priv;
1099 int entry = lp->cur_rx & RX_RING_MOD_MASK;
1100 int i;
1102 /* If we own the next entry, it's a new packet. Send it up. */
1103 while (lp->rx_ring[entry].base >= 0) {
1104 int status = lp->rx_ring[entry].base >> 24;
1106 if (status != 0x03) { /* There was an error. */
1107 /* There is a tricky error noted by John Murphy,
1108 <murf@perftech.com> to Russ Nelson: Even with full-sized
1109 buffers it's possible for a jabber packet to use two
1110 buffers, with only the last correctly noting the error. */
1111 if (status & 0x01) /* Only count a general error at the */
1112 lp->stats.rx_errors++; /* end of a packet.*/
1113 if (status & 0x20) lp->stats.rx_frame_errors++;
1114 if (status & 0x10) lp->stats.rx_over_errors++;
1115 if (status & 0x08) lp->stats.rx_crc_errors++;
1116 if (status & 0x04) lp->stats.rx_fifo_errors++;
1117 lp->rx_ring[entry].base &= 0x03ffffff;
1119 else
1121 /* Malloc up new buffer, compatible with net3. */
1122 short pkt_len = (lp->rx_ring[entry].msg_length & 0xfff)-4;
1123 struct sk_buff *skb;
1125 if(pkt_len<60)
1127 printk("%s: Runt packet!\n",dev->name);
1128 lp->stats.rx_errors++;
1130 else
1132 skb = dev_alloc_skb(pkt_len+2);
1133 if (skb == NULL)
1135 printk("%s: Memory squeeze, deferring packet.\n", dev->name);
1136 for (i=0; i < RX_RING_SIZE; i++)
1137 if (lp->rx_ring[(entry+i) & RX_RING_MOD_MASK].base < 0)
1138 break;
1140 if (i > RX_RING_SIZE -2)
1142 lp->stats.rx_dropped++;
1143 lp->rx_ring[entry].base |= 0x80000000;
1144 lp->cur_rx++;
1146 break;
1148 skb->dev = dev;
1149 skb_reserve(skb,2); /* 16 byte align */
1150 skb_put(skb,pkt_len); /* Make room */
1151 eth_copy_and_sum(skb,
1152 (unsigned char *)bus_to_virt((lp->rx_ring[entry].base & 0x00ffffff)),
1153 pkt_len,0);
1154 lp->stats.rx_bytes+=skb->len;
1155 skb->protocol=eth_type_trans(skb,dev);
1156 lp->stats.rx_packets++;
1157 netif_rx(skb);
1160 /* The docs say that the buffer length isn't touched, but Andrew Boyd
1161 of QNX reports that some revs of the 79C965 clear it. */
1162 lp->rx_ring[entry].buf_length = -PKT_BUF_SZ;
1163 lp->rx_ring[entry].base |= 0x80000000;
1164 entry = (++lp->cur_rx) & RX_RING_MOD_MASK;
1167 /* We should check that at least two ring entries are free. If not,
1168 we should free one and mark stats->rx_dropped++. */
1170 return 0;
1173 static int
1174 lance_close(struct net_device *dev)
1176 int ioaddr = dev->base_addr;
1177 struct lance_private *lp = (struct lance_private *)dev->priv;
1178 int i;
1180 dev->start = 0;
1181 dev->tbusy = 1;
1183 if (chip_table[lp->chip_version].flags & LANCE_HAS_MISSED_FRAME) {
1184 outw(112, ioaddr+LANCE_ADDR);
1185 lp->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
1187 outw(0, ioaddr+LANCE_ADDR);
1189 if (lance_debug > 1)
1190 printk("%s: Shutting down ethercard, status was %2.2x.\n",
1191 dev->name, inw(ioaddr+LANCE_DATA));
1193 /* We stop the LANCE here -- it occasionally polls
1194 memory if we don't. */
1195 outw(0x0004, ioaddr+LANCE_DATA);
1197 if (dev->dma != 4)
1199 unsigned long flags=claim_dma_lock();
1200 disable_dma(dev->dma);
1201 release_dma_lock(flags);
1203 free_irq(dev->irq, dev);
1205 /* Free all the skbuffs in the Rx and Tx queues. */
1206 for (i = 0; i < RX_RING_SIZE; i++) {
1207 struct sk_buff *skb = lp->rx_skbuff[i];
1208 lp->rx_skbuff[i] = 0;
1209 lp->rx_ring[i].base = 0; /* Not owned by LANCE chip. */
1210 if (skb)
1211 dev_kfree_skb(skb);
1213 for (i = 0; i < TX_RING_SIZE; i++) {
1214 if (lp->tx_skbuff[i])
1215 dev_kfree_skb(lp->tx_skbuff[i]);
1216 lp->tx_skbuff[i] = 0;
1219 MOD_DEC_USE_COUNT;
1220 return 0;
1223 static struct net_device_stats *lance_get_stats(struct net_device *dev)
1225 struct lance_private *lp = (struct lance_private *)dev->priv;
1226 short ioaddr = dev->base_addr;
1227 short saved_addr;
1228 unsigned long flags;
1230 if (chip_table[lp->chip_version].flags & LANCE_HAS_MISSED_FRAME) {
1231 save_flags(flags);
1232 cli();
1233 saved_addr = inw(ioaddr+LANCE_ADDR);
1234 outw(112, ioaddr+LANCE_ADDR);
1235 lp->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
1236 outw(saved_addr, ioaddr+LANCE_ADDR);
1237 restore_flags(flags);
1240 return &lp->stats;
1243 /* Set or clear the multicast filter for this adaptor.
1246 static void set_multicast_list(struct net_device *dev)
1248 short ioaddr = dev->base_addr;
1250 outw(0, ioaddr+LANCE_ADDR);
1251 outw(0x0004, ioaddr+LANCE_DATA); /* Temporarily stop the lance. */
1253 if (dev->flags&IFF_PROMISC) {
1254 /* Log any net taps. */
1255 printk("%s: Promiscuous mode enabled.\n", dev->name);
1256 outw(15, ioaddr+LANCE_ADDR);
1257 outw(0x8000, ioaddr+LANCE_DATA); /* Set promiscuous mode */
1258 } else {
1259 short multicast_table[4];
1260 int i;
1261 int num_addrs=dev->mc_count;
1262 if(dev->flags&IFF_ALLMULTI)
1263 num_addrs=1;
1264 /* FIXIT: We don't use the multicast table, but rely on upper-layer filtering. */
1265 memset(multicast_table, (num_addrs == 0) ? 0 : -1, sizeof(multicast_table));
1266 for (i = 0; i < 4; i++) {
1267 outw(8 + i, ioaddr+LANCE_ADDR);
1268 outw(multicast_table[i], ioaddr+LANCE_DATA);
1270 outw(15, ioaddr+LANCE_ADDR);
1271 outw(0x0000, ioaddr+LANCE_DATA); /* Unset promiscuous mode */
1274 lance_restart(dev, 0x0142, 0); /* Resume normal operation */