pci: use pci_ioremap_bar() in drivers/edac
[linux-2.6/x86.git] / drivers / net / 3c527.c
blob2df3af3b9b20130ead927f93db05730029430d9c
1 /* 3c527.c: 3Com Etherlink/MC32 driver for Linux 2.4 and 2.6.
3 * (c) Copyright 1998 Red Hat Software Inc
4 * Written by Alan Cox.
5 * Further debugging by Carl Drougge.
6 * Initial SMP support by Felipe W Damasio <felipewd@terra.com.br>
7 * Heavily modified by Richard Procter <rnp@paradise.net.nz>
9 * Based on skeleton.c written 1993-94 by Donald Becker and ne2.c
10 * (for the MCA stuff) written by Wim Dumon.
12 * Thanks to 3Com for making this possible by providing me with the
13 * documentation.
15 * This software may be used and distributed according to the terms
16 * of the GNU General Public License, incorporated herein by reference.
20 #define DRV_NAME "3c527"
21 #define DRV_VERSION "0.7-SMP"
22 #define DRV_RELDATE "2003/09/21"
24 static const char *version =
25 DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Richard Procter <rnp@paradise.net.nz>\n";
27 /**
28 * DOC: Traps for the unwary
30 * The diagram (Figure 1-1) and the POS summary disagree with the
31 * "Interrupt Level" section in the manual.
33 * The manual contradicts itself when describing the minimum number
34 * buffers in the 'configure lists' command.
35 * My card accepts a buffer config of 4/4.
37 * Setting the SAV BP bit does not save bad packets, but
38 * only enables RX on-card stats collection.
40 * The documentation in places seems to miss things. In actual fact
41 * I've always eventually found everything is documented, it just
42 * requires careful study.
44 * DOC: Theory Of Operation
46 * The 3com 3c527 is a 32bit MCA bus mastering adapter with a large
47 * amount of on board intelligence that housekeeps a somewhat dumber
48 * Intel NIC. For performance we want to keep the transmit queue deep
49 * as the card can transmit packets while fetching others from main
50 * memory by bus master DMA. Transmission and reception are driven by
51 * circular buffer queues.
53 * The mailboxes can be used for controlling how the card traverses
54 * its buffer rings, but are used only for inital setup in this
55 * implementation. The exec mailbox allows a variety of commands to
56 * be executed. Each command must complete before the next is
57 * executed. Primarily we use the exec mailbox for controlling the
58 * multicast lists. We have to do a certain amount of interesting
59 * hoop jumping as the multicast list changes can occur in interrupt
60 * state when the card has an exec command pending. We defer such
61 * events until the command completion interrupt.
63 * A copy break scheme (taken from 3c59x.c) is employed whereby
64 * received frames exceeding a configurable length are passed
65 * directly to the higher networking layers without incuring a copy,
66 * in what amounts to a time/space trade-off.
68 * The card also keeps a large amount of statistical information
69 * on-board. In a perfect world, these could be used safely at no
70 * cost. However, lacking information to the contrary, processing
71 * them without races would involve so much extra complexity as to
72 * make it unworthwhile to do so. In the end, a hybrid SW/HW
73 * implementation was made necessary --- see mc32_update_stats().
75 * DOC: Notes
77 * It should be possible to use two or more cards, but at this stage
78 * only by loading two copies of the same module.
80 * The on-board 82586 NIC has trouble receiving multiple
81 * back-to-back frames and so is likely to drop packets from fast
82 * senders.
83 **/
85 #include <linux/module.h>
87 #include <linux/errno.h>
88 #include <linux/netdevice.h>
89 #include <linux/etherdevice.h>
90 #include <linux/if_ether.h>
91 #include <linux/init.h>
92 #include <linux/kernel.h>
93 #include <linux/types.h>
94 #include <linux/fcntl.h>
95 #include <linux/interrupt.h>
96 #include <linux/mca-legacy.h>
97 #include <linux/ioport.h>
98 #include <linux/in.h>
99 #include <linux/skbuff.h>
100 #include <linux/slab.h>
101 #include <linux/string.h>
102 #include <linux/wait.h>
103 #include <linux/ethtool.h>
104 #include <linux/completion.h>
105 #include <linux/bitops.h>
106 #include <linux/semaphore.h>
108 #include <asm/uaccess.h>
109 #include <asm/system.h>
110 #include <asm/io.h>
111 #include <asm/dma.h>
113 #include "3c527.h"
115 MODULE_LICENSE("GPL");
118 * The name of the card. Is used for messages and in the requests for
119 * io regions, irqs and dma channels
121 static const char* cardname = DRV_NAME;
123 /* use 0 for production, 1 for verification, >2 for debug */
124 #ifndef NET_DEBUG
125 #define NET_DEBUG 2
126 #endif
128 #undef DEBUG_IRQ
130 static unsigned int mc32_debug = NET_DEBUG;
132 /* The number of low I/O ports used by the ethercard. */
133 #define MC32_IO_EXTENT 8
135 /* As implemented, values must be a power-of-2 -- 4/8/16/32 */
136 #define TX_RING_LEN 32 /* Typically the card supports 37 */
137 #define RX_RING_LEN 8 /* " " " */
139 /* Copy break point, see above for details.
140 * Setting to > 1512 effectively disables this feature. */
141 #define RX_COPYBREAK 200 /* Value from 3c59x.c */
143 /* Issue the 82586 workaround command - this is for "busy lans", but
144 * basically means for all lans now days - has a performance (latency)
145 * cost, but best set. */
146 static const int WORKAROUND_82586=1;
148 /* Pointers to buffers and their on-card records */
149 struct mc32_ring_desc
151 volatile struct skb_header *p;
152 struct sk_buff *skb;
155 /* Information that needs to be kept for each board. */
156 struct mc32_local
158 int slot;
160 u32 base;
161 volatile struct mc32_mailbox *rx_box;
162 volatile struct mc32_mailbox *tx_box;
163 volatile struct mc32_mailbox *exec_box;
164 volatile struct mc32_stats *stats; /* Start of on-card statistics */
165 u16 tx_chain; /* Transmit list start offset */
166 u16 rx_chain; /* Receive list start offset */
167 u16 tx_len; /* Transmit list count */
168 u16 rx_len; /* Receive list count */
170 u16 xceiver_desired_state; /* HALTED or RUNNING */
171 u16 cmd_nonblocking; /* Thread is uninterested in command result */
172 u16 mc_reload_wait; /* A multicast load request is pending */
173 u32 mc_list_valid; /* True when the mclist is set */
175 struct mc32_ring_desc tx_ring[TX_RING_LEN]; /* Host Transmit ring */
176 struct mc32_ring_desc rx_ring[RX_RING_LEN]; /* Host Receive ring */
178 atomic_t tx_count; /* buffers left */
179 atomic_t tx_ring_head; /* index to tx en-queue end */
180 u16 tx_ring_tail; /* index to tx de-queue end */
182 u16 rx_ring_tail; /* index to rx de-queue end */
184 struct semaphore cmd_mutex; /* Serialises issuing of execute commands */
185 struct completion execution_cmd; /* Card has completed an execute command */
186 struct completion xceiver_cmd; /* Card has completed a tx or rx command */
189 /* The station (ethernet) address prefix, used for a sanity check. */
190 #define SA_ADDR0 0x02
191 #define SA_ADDR1 0x60
192 #define SA_ADDR2 0xAC
194 struct mca_adapters_t {
195 unsigned int id;
196 char *name;
199 static const struct mca_adapters_t mc32_adapters[] = {
200 { 0x0041, "3COM EtherLink MC/32" },
201 { 0x8EF5, "IBM High Performance Lan Adapter" },
202 { 0x0000, NULL }
206 /* Macros for ring index manipulations */
207 static inline u16 next_rx(u16 rx) { return (rx+1)&(RX_RING_LEN-1); };
208 static inline u16 prev_rx(u16 rx) { return (rx-1)&(RX_RING_LEN-1); };
210 static inline u16 next_tx(u16 tx) { return (tx+1)&(TX_RING_LEN-1); };
213 /* Index to functions, as function prototypes. */
214 static int mc32_probe1(struct net_device *dev, int ioaddr);
215 static int mc32_command(struct net_device *dev, u16 cmd, void *data, int len);
216 static int mc32_open(struct net_device *dev);
217 static void mc32_timeout(struct net_device *dev);
218 static int mc32_send_packet(struct sk_buff *skb, struct net_device *dev);
219 static irqreturn_t mc32_interrupt(int irq, void *dev_id);
220 static int mc32_close(struct net_device *dev);
221 static struct net_device_stats *mc32_get_stats(struct net_device *dev);
222 static void mc32_set_multicast_list(struct net_device *dev);
223 static void mc32_reset_multicast_list(struct net_device *dev);
224 static const struct ethtool_ops netdev_ethtool_ops;
226 static void cleanup_card(struct net_device *dev)
228 struct mc32_local *lp = netdev_priv(dev);
229 unsigned slot = lp->slot;
230 mca_mark_as_unused(slot);
231 mca_set_adapter_name(slot, NULL);
232 free_irq(dev->irq, dev);
233 release_region(dev->base_addr, MC32_IO_EXTENT);
237 * mc32_probe - Search for supported boards
238 * @unit: interface number to use
240 * Because MCA bus is a real bus and we can scan for cards we could do a
241 * single scan for all boards here. Right now we use the passed in device
242 * structure and scan for only one board. This needs fixing for modules
243 * in particular.
246 struct net_device *__init mc32_probe(int unit)
248 struct net_device *dev = alloc_etherdev(sizeof(struct mc32_local));
249 static int current_mca_slot = -1;
250 int i;
251 int err;
253 if (!dev)
254 return ERR_PTR(-ENOMEM);
256 if (unit >= 0)
257 sprintf(dev->name, "eth%d", unit);
259 /* Do not check any supplied i/o locations.
260 POS registers usually don't fail :) */
262 /* MCA cards have POS registers.
263 Autodetecting MCA cards is extremely simple.
264 Just search for the card. */
266 for(i = 0; (mc32_adapters[i].name != NULL); i++) {
267 current_mca_slot =
268 mca_find_unused_adapter(mc32_adapters[i].id, 0);
270 if(current_mca_slot != MCA_NOTFOUND) {
271 if(!mc32_probe1(dev, current_mca_slot))
273 mca_set_adapter_name(current_mca_slot,
274 mc32_adapters[i].name);
275 mca_mark_as_used(current_mca_slot);
276 err = register_netdev(dev);
277 if (err) {
278 cleanup_card(dev);
279 free_netdev(dev);
280 dev = ERR_PTR(err);
282 return dev;
287 free_netdev(dev);
288 return ERR_PTR(-ENODEV);
292 * mc32_probe1 - Check a given slot for a board and test the card
293 * @dev: Device structure to fill in
294 * @slot: The MCA bus slot being used by this card
296 * Decode the slot data and configure the card structures. Having done this we
297 * can reset the card and configure it. The card does a full self test cycle
298 * in firmware so we have to wait for it to return and post us either a
299 * failure case or some addresses we use to find the board internals.
302 static int __init mc32_probe1(struct net_device *dev, int slot)
304 static unsigned version_printed;
305 int i, err;
306 u8 POS;
307 u32 base;
308 struct mc32_local *lp = netdev_priv(dev);
309 static u16 mca_io_bases[]={
310 0x7280,0x7290,
311 0x7680,0x7690,
312 0x7A80,0x7A90,
313 0x7E80,0x7E90
315 static u32 mca_mem_bases[]={
316 0x00C0000,
317 0x00C4000,
318 0x00C8000,
319 0x00CC000,
320 0x00D0000,
321 0x00D4000,
322 0x00D8000,
323 0x00DC000
325 static char *failures[]={
326 "Processor instruction",
327 "Processor data bus",
328 "Processor data bus",
329 "Processor data bus",
330 "Adapter bus",
331 "ROM checksum",
332 "Base RAM",
333 "Extended RAM",
334 "82586 internal loopback",
335 "82586 initialisation failure",
336 "Adapter list configuration error"
339 /* Time to play MCA games */
341 if (mc32_debug && version_printed++ == 0)
342 printk(KERN_DEBUG "%s", version);
344 printk(KERN_INFO "%s: %s found in slot %d:", dev->name, cardname, slot);
346 POS = mca_read_stored_pos(slot, 2);
348 if(!(POS&1))
350 printk(" disabled.\n");
351 return -ENODEV;
354 /* Fill in the 'dev' fields. */
355 dev->base_addr = mca_io_bases[(POS>>1)&7];
356 dev->mem_start = mca_mem_bases[(POS>>4)&7];
358 POS = mca_read_stored_pos(slot, 4);
359 if(!(POS&1))
361 printk("memory window disabled.\n");
362 return -ENODEV;
365 POS = mca_read_stored_pos(slot, 5);
367 i=(POS>>4)&3;
368 if(i==3)
370 printk("invalid memory window.\n");
371 return -ENODEV;
374 i*=16384;
375 i+=16384;
377 dev->mem_end=dev->mem_start + i;
379 dev->irq = ((POS>>2)&3)+9;
381 if(!request_region(dev->base_addr, MC32_IO_EXTENT, cardname))
383 printk("io 0x%3lX, which is busy.\n", dev->base_addr);
384 return -EBUSY;
387 printk("io 0x%3lX irq %d mem 0x%lX (%dK)\n",
388 dev->base_addr, dev->irq, dev->mem_start, i/1024);
391 /* We ought to set the cache line size here.. */
395 * Go PROM browsing
398 /* Retrieve and print the ethernet address. */
399 for (i = 0; i < 6; i++)
401 mca_write_pos(slot, 6, i+12);
402 mca_write_pos(slot, 7, 0);
404 dev->dev_addr[i] = mca_read_pos(slot,3);
407 printk("%s: Address %pM", dev->name, dev->dev_addr);
409 mca_write_pos(slot, 6, 0);
410 mca_write_pos(slot, 7, 0);
412 POS = mca_read_stored_pos(slot, 4);
414 if(POS&2)
415 printk(" : BNC port selected.\n");
416 else
417 printk(" : AUI port selected.\n");
419 POS=inb(dev->base_addr+HOST_CTRL);
420 POS|=HOST_CTRL_ATTN|HOST_CTRL_RESET;
421 POS&=~HOST_CTRL_INTE;
422 outb(POS, dev->base_addr+HOST_CTRL);
423 /* Reset adapter */
424 udelay(100);
425 /* Reset off */
426 POS&=~(HOST_CTRL_ATTN|HOST_CTRL_RESET);
427 outb(POS, dev->base_addr+HOST_CTRL);
429 udelay(300);
432 * Grab the IRQ
435 err = request_irq(dev->irq, &mc32_interrupt, IRQF_SHARED | IRQF_SAMPLE_RANDOM, DRV_NAME, dev);
436 if (err) {
437 release_region(dev->base_addr, MC32_IO_EXTENT);
438 printk(KERN_ERR "%s: unable to get IRQ %d.\n", DRV_NAME, dev->irq);
439 goto err_exit_ports;
442 memset(lp, 0, sizeof(struct mc32_local));
443 lp->slot = slot;
445 i=0;
447 base = inb(dev->base_addr);
449 while(base == 0xFF)
451 i++;
452 if(i == 1000)
454 printk(KERN_ERR "%s: failed to boot adapter.\n", dev->name);
455 err = -ENODEV;
456 goto err_exit_irq;
458 udelay(1000);
459 if(inb(dev->base_addr+2)&(1<<5))
460 base = inb(dev->base_addr);
463 if(base>0)
465 if(base < 0x0C)
466 printk(KERN_ERR "%s: %s%s.\n", dev->name, failures[base-1],
467 base<0x0A?" test failure":"");
468 else
469 printk(KERN_ERR "%s: unknown failure %d.\n", dev->name, base);
470 err = -ENODEV;
471 goto err_exit_irq;
474 base=0;
475 for(i=0;i<4;i++)
477 int n=0;
479 while(!(inb(dev->base_addr+2)&(1<<5)))
481 n++;
482 udelay(50);
483 if(n>100)
485 printk(KERN_ERR "%s: mailbox read fail (%d).\n", dev->name, i);
486 err = -ENODEV;
487 goto err_exit_irq;
491 base|=(inb(dev->base_addr)<<(8*i));
494 lp->exec_box=isa_bus_to_virt(dev->mem_start+base);
496 base=lp->exec_box->data[1]<<16|lp->exec_box->data[0];
498 lp->base = dev->mem_start+base;
500 lp->rx_box=isa_bus_to_virt(lp->base + lp->exec_box->data[2]);
501 lp->tx_box=isa_bus_to_virt(lp->base + lp->exec_box->data[3]);
503 lp->stats = isa_bus_to_virt(lp->base + lp->exec_box->data[5]);
506 * Descriptor chains (card relative)
509 lp->tx_chain = lp->exec_box->data[8]; /* Transmit list start offset */
510 lp->rx_chain = lp->exec_box->data[10]; /* Receive list start offset */
511 lp->tx_len = lp->exec_box->data[9]; /* Transmit list count */
512 lp->rx_len = lp->exec_box->data[11]; /* Receive list count */
514 init_MUTEX_LOCKED(&lp->cmd_mutex);
515 init_completion(&lp->execution_cmd);
516 init_completion(&lp->xceiver_cmd);
518 printk("%s: Firmware Rev %d. %d RX buffers, %d TX buffers. Base of 0x%08X.\n",
519 dev->name, lp->exec_box->data[12], lp->rx_len, lp->tx_len, lp->base);
521 dev->open = mc32_open;
522 dev->stop = mc32_close;
523 dev->hard_start_xmit = mc32_send_packet;
524 dev->get_stats = mc32_get_stats;
525 dev->set_multicast_list = mc32_set_multicast_list;
526 dev->tx_timeout = mc32_timeout;
527 dev->watchdog_timeo = HZ*5; /* Board does all the work */
528 dev->ethtool_ops = &netdev_ethtool_ops;
530 return 0;
532 err_exit_irq:
533 free_irq(dev->irq, dev);
534 err_exit_ports:
535 release_region(dev->base_addr, MC32_IO_EXTENT);
536 return err;
541 * mc32_ready_poll - wait until we can feed it a command
542 * @dev: The device to wait for
544 * Wait until the card becomes ready to accept a command via the
545 * command register. This tells us nothing about the completion
546 * status of any pending commands and takes very little time at all.
549 static inline void mc32_ready_poll(struct net_device *dev)
551 int ioaddr = dev->base_addr;
552 while(!(inb(ioaddr+HOST_STATUS)&HOST_STATUS_CRR));
557 * mc32_command_nowait - send a command non blocking
558 * @dev: The 3c527 to issue the command to
559 * @cmd: The command word to write to the mailbox
560 * @data: A data block if the command expects one
561 * @len: Length of the data block
563 * Send a command from interrupt state. If there is a command
564 * currently being executed then we return an error of -1. It
565 * simply isn't viable to wait around as commands may be
566 * slow. This can theoretically be starved on SMP, but it's hard
567 * to see a realistic situation. We do not wait for the command
568 * to complete --- we rely on the interrupt handler to tidy up
569 * after us.
572 static int mc32_command_nowait(struct net_device *dev, u16 cmd, void *data, int len)
574 struct mc32_local *lp = netdev_priv(dev);
575 int ioaddr = dev->base_addr;
576 int ret = -1;
578 if (down_trylock(&lp->cmd_mutex) == 0)
580 lp->cmd_nonblocking=1;
581 lp->exec_box->mbox=0;
582 lp->exec_box->mbox=cmd;
583 memcpy((void *)lp->exec_box->data, data, len);
584 barrier(); /* the memcpy forgot the volatile so be sure */
586 /* Send the command */
587 mc32_ready_poll(dev);
588 outb(1<<6, ioaddr+HOST_CMD);
590 ret = 0;
592 /* Interrupt handler will signal mutex on completion */
595 return ret;
600 * mc32_command - send a command and sleep until completion
601 * @dev: The 3c527 card to issue the command to
602 * @cmd: The command word to write to the mailbox
603 * @data: A data block if the command expects one
604 * @len: Length of the data block
606 * Sends exec commands in a user context. This permits us to wait around
607 * for the replies and also to wait for the command buffer to complete
608 * from a previous command before we execute our command. After our
609 * command completes we will attempt any pending multicast reload
610 * we blocked off by hogging the exec buffer.
612 * You feed the card a command, you wait, it interrupts you get a
613 * reply. All well and good. The complication arises because you use
614 * commands for filter list changes which come in at bh level from things
615 * like IPV6 group stuff.
618 static int mc32_command(struct net_device *dev, u16 cmd, void *data, int len)
620 struct mc32_local *lp = netdev_priv(dev);
621 int ioaddr = dev->base_addr;
622 int ret = 0;
624 down(&lp->cmd_mutex);
627 * My Turn
630 lp->cmd_nonblocking=0;
631 lp->exec_box->mbox=0;
632 lp->exec_box->mbox=cmd;
633 memcpy((void *)lp->exec_box->data, data, len);
634 barrier(); /* the memcpy forgot the volatile so be sure */
636 mc32_ready_poll(dev);
637 outb(1<<6, ioaddr+HOST_CMD);
639 wait_for_completion(&lp->execution_cmd);
641 if(lp->exec_box->mbox&(1<<13))
642 ret = -1;
644 up(&lp->cmd_mutex);
647 * A multicast set got blocked - try it now
650 if(lp->mc_reload_wait)
652 mc32_reset_multicast_list(dev);
655 return ret;
660 * mc32_start_transceiver - tell board to restart tx/rx
661 * @dev: The 3c527 card to issue the command to
663 * This may be called from the interrupt state, where it is used
664 * to restart the rx ring if the card runs out of rx buffers.
666 * We must first check if it's ok to (re)start the transceiver. See
667 * mc32_close for details.
670 static void mc32_start_transceiver(struct net_device *dev) {
672 struct mc32_local *lp = netdev_priv(dev);
673 int ioaddr = dev->base_addr;
675 /* Ignore RX overflow on device closure */
676 if (lp->xceiver_desired_state==HALTED)
677 return;
679 /* Give the card the offset to the post-EOL-bit RX descriptor */
680 mc32_ready_poll(dev);
681 lp->rx_box->mbox=0;
682 lp->rx_box->data[0]=lp->rx_ring[prev_rx(lp->rx_ring_tail)].p->next;
683 outb(HOST_CMD_START_RX, ioaddr+HOST_CMD);
685 mc32_ready_poll(dev);
686 lp->tx_box->mbox=0;
687 outb(HOST_CMD_RESTRT_TX, ioaddr+HOST_CMD); /* card ignores this on RX restart */
689 /* We are not interrupted on start completion */
694 * mc32_halt_transceiver - tell board to stop tx/rx
695 * @dev: The 3c527 card to issue the command to
697 * We issue the commands to halt the card's transceiver. In fact,
698 * after some experimenting we now simply tell the card to
699 * suspend. When issuing aborts occasionally odd things happened.
701 * We then sleep until the card has notified us that both rx and
702 * tx have been suspended.
705 static void mc32_halt_transceiver(struct net_device *dev)
707 struct mc32_local *lp = netdev_priv(dev);
708 int ioaddr = dev->base_addr;
710 mc32_ready_poll(dev);
711 lp->rx_box->mbox=0;
712 outb(HOST_CMD_SUSPND_RX, ioaddr+HOST_CMD);
713 wait_for_completion(&lp->xceiver_cmd);
715 mc32_ready_poll(dev);
716 lp->tx_box->mbox=0;
717 outb(HOST_CMD_SUSPND_TX, ioaddr+HOST_CMD);
718 wait_for_completion(&lp->xceiver_cmd);
723 * mc32_load_rx_ring - load the ring of receive buffers
724 * @dev: 3c527 to build the ring for
726 * This initalises the on-card and driver datastructures to
727 * the point where mc32_start_transceiver() can be called.
729 * The card sets up the receive ring for us. We are required to use the
730 * ring it provides, although the size of the ring is configurable.
732 * We allocate an sk_buff for each ring entry in turn and
733 * initalise its house-keeping info. At the same time, we read
734 * each 'next' pointer in our rx_ring array. This reduces slow
735 * shared-memory reads and makes it easy to access predecessor
736 * descriptors.
738 * We then set the end-of-list bit for the last entry so that the
739 * card will know when it has run out of buffers.
742 static int mc32_load_rx_ring(struct net_device *dev)
744 struct mc32_local *lp = netdev_priv(dev);
745 int i;
746 u16 rx_base;
747 volatile struct skb_header *p;
749 rx_base=lp->rx_chain;
751 for(i=0; i<RX_RING_LEN; i++) {
752 lp->rx_ring[i].skb=alloc_skb(1532, GFP_KERNEL);
753 if (lp->rx_ring[i].skb==NULL) {
754 for (;i>=0;i--)
755 kfree_skb(lp->rx_ring[i].skb);
756 return -ENOBUFS;
758 skb_reserve(lp->rx_ring[i].skb, 18);
760 p=isa_bus_to_virt(lp->base+rx_base);
762 p->control=0;
763 p->data=isa_virt_to_bus(lp->rx_ring[i].skb->data);
764 p->status=0;
765 p->length=1532;
767 lp->rx_ring[i].p=p;
768 rx_base=p->next;
771 lp->rx_ring[i-1].p->control |= CONTROL_EOL;
773 lp->rx_ring_tail=0;
775 return 0;
780 * mc32_flush_rx_ring - free the ring of receive buffers
781 * @lp: Local data of 3c527 to flush the rx ring of
783 * Free the buffer for each ring slot. This may be called
784 * before mc32_load_rx_ring(), eg. on error in mc32_open().
785 * Requires rx skb pointers to point to a valid skb, or NULL.
788 static void mc32_flush_rx_ring(struct net_device *dev)
790 struct mc32_local *lp = netdev_priv(dev);
791 int i;
793 for(i=0; i < RX_RING_LEN; i++)
795 if (lp->rx_ring[i].skb) {
796 dev_kfree_skb(lp->rx_ring[i].skb);
797 lp->rx_ring[i].skb = NULL;
799 lp->rx_ring[i].p=NULL;
805 * mc32_load_tx_ring - load transmit ring
806 * @dev: The 3c527 card to issue the command to
808 * This sets up the host transmit data-structures.
810 * First, we obtain from the card it's current postion in the tx
811 * ring, so that we will know where to begin transmitting
812 * packets.
814 * Then, we read the 'next' pointers from the on-card tx ring into
815 * our tx_ring array to reduce slow shared-mem reads. Finally, we
816 * intitalise the tx house keeping variables.
820 static void mc32_load_tx_ring(struct net_device *dev)
822 struct mc32_local *lp = netdev_priv(dev);
823 volatile struct skb_header *p;
824 int i;
825 u16 tx_base;
827 tx_base=lp->tx_box->data[0];
829 for(i=0 ; i<TX_RING_LEN ; i++)
831 p=isa_bus_to_virt(lp->base+tx_base);
832 lp->tx_ring[i].p=p;
833 lp->tx_ring[i].skb=NULL;
835 tx_base=p->next;
838 /* -1 so that tx_ring_head cannot "lap" tx_ring_tail */
839 /* see mc32_tx_ring */
841 atomic_set(&lp->tx_count, TX_RING_LEN-1);
842 atomic_set(&lp->tx_ring_head, 0);
843 lp->tx_ring_tail=0;
848 * mc32_flush_tx_ring - free transmit ring
849 * @lp: Local data of 3c527 to flush the tx ring of
851 * If the ring is non-empty, zip over the it, freeing any
852 * allocated skb_buffs. The tx ring house-keeping variables are
853 * then reset. Requires rx skb pointers to point to a valid skb,
854 * or NULL.
857 static void mc32_flush_tx_ring(struct net_device *dev)
859 struct mc32_local *lp = netdev_priv(dev);
860 int i;
862 for (i=0; i < TX_RING_LEN; i++)
864 if (lp->tx_ring[i].skb)
866 dev_kfree_skb(lp->tx_ring[i].skb);
867 lp->tx_ring[i].skb = NULL;
871 atomic_set(&lp->tx_count, 0);
872 atomic_set(&lp->tx_ring_head, 0);
873 lp->tx_ring_tail=0;
878 * mc32_open - handle 'up' of card
879 * @dev: device to open
881 * The user is trying to bring the card into ready state. This requires
882 * a brief dialogue with the card. Firstly we enable interrupts and then
883 * 'indications'. Without these enabled the card doesn't bother telling
884 * us what it has done. This had me puzzled for a week.
886 * We configure the number of card descriptors, then load the network
887 * address and multicast filters. Turn on the workaround mode. This
888 * works around a bug in the 82586 - it asks the firmware to do
889 * so. It has a performance (latency) hit but is needed on busy
890 * [read most] lans. We load the ring with buffers then we kick it
891 * all off.
894 static int mc32_open(struct net_device *dev)
896 int ioaddr = dev->base_addr;
897 struct mc32_local *lp = netdev_priv(dev);
898 u8 one=1;
899 u8 regs;
900 u16 descnumbuffs[2] = {TX_RING_LEN, RX_RING_LEN};
903 * Interrupts enabled
906 regs=inb(ioaddr+HOST_CTRL);
907 regs|=HOST_CTRL_INTE;
908 outb(regs, ioaddr+HOST_CTRL);
911 * Allow ourselves to issue commands
914 up(&lp->cmd_mutex);
918 * Send the indications on command
921 mc32_command(dev, 4, &one, 2);
924 * Poke it to make sure it's really dead.
927 mc32_halt_transceiver(dev);
928 mc32_flush_tx_ring(dev);
931 * Ask card to set up on-card descriptors to our spec
934 if(mc32_command(dev, 8, descnumbuffs, 4)) {
935 printk("%s: %s rejected our buffer configuration!\n",
936 dev->name, cardname);
937 mc32_close(dev);
938 return -ENOBUFS;
941 /* Report new configuration */
942 mc32_command(dev, 6, NULL, 0);
944 lp->tx_chain = lp->exec_box->data[8]; /* Transmit list start offset */
945 lp->rx_chain = lp->exec_box->data[10]; /* Receive list start offset */
946 lp->tx_len = lp->exec_box->data[9]; /* Transmit list count */
947 lp->rx_len = lp->exec_box->data[11]; /* Receive list count */
949 /* Set Network Address */
950 mc32_command(dev, 1, dev->dev_addr, 6);
952 /* Set the filters */
953 mc32_set_multicast_list(dev);
955 if (WORKAROUND_82586) {
956 u16 zero_word=0;
957 mc32_command(dev, 0x0D, &zero_word, 2); /* 82586 bug workaround on */
960 mc32_load_tx_ring(dev);
962 if(mc32_load_rx_ring(dev))
964 mc32_close(dev);
965 return -ENOBUFS;
968 lp->xceiver_desired_state = RUNNING;
970 /* And finally, set the ball rolling... */
971 mc32_start_transceiver(dev);
973 netif_start_queue(dev);
975 return 0;
980 * mc32_timeout - handle a timeout from the network layer
981 * @dev: 3c527 that timed out
983 * Handle a timeout on transmit from the 3c527. This normally means
984 * bad things as the hardware handles cable timeouts and mess for
985 * us.
989 static void mc32_timeout(struct net_device *dev)
991 printk(KERN_WARNING "%s: transmit timed out?\n", dev->name);
992 /* Try to restart the adaptor. */
993 netif_wake_queue(dev);
998 * mc32_send_packet - queue a frame for transmit
999 * @skb: buffer to transmit
1000 * @dev: 3c527 to send it out of
1002 * Transmit a buffer. This normally means throwing the buffer onto
1003 * the transmit queue as the queue is quite large. If the queue is
1004 * full then we set tx_busy and return. Once the interrupt handler
1005 * gets messages telling it to reclaim transmit queue entries, we will
1006 * clear tx_busy and the kernel will start calling this again.
1008 * We do not disable interrupts or acquire any locks; this can
1009 * run concurrently with mc32_tx_ring(), and the function itself
1010 * is serialised at a higher layer. However, similarly for the
1011 * card itself, we must ensure that we update tx_ring_head only
1012 * after we've established a valid packet on the tx ring (and
1013 * before we let the card "see" it, to prevent it racing with the
1014 * irq handler).
1018 static int mc32_send_packet(struct sk_buff *skb, struct net_device *dev)
1020 struct mc32_local *lp = netdev_priv(dev);
1021 u32 head = atomic_read(&lp->tx_ring_head);
1023 volatile struct skb_header *p, *np;
1025 netif_stop_queue(dev);
1027 if(atomic_read(&lp->tx_count)==0) {
1028 return 1;
1031 if (skb_padto(skb, ETH_ZLEN)) {
1032 netif_wake_queue(dev);
1033 return 0;
1036 atomic_dec(&lp->tx_count);
1038 /* P is the last sending/sent buffer as a pointer */
1039 p=lp->tx_ring[head].p;
1041 head = next_tx(head);
1043 /* NP is the buffer we will be loading */
1044 np=lp->tx_ring[head].p;
1046 /* We will need this to flush the buffer out */
1047 lp->tx_ring[head].skb=skb;
1049 np->length = unlikely(skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len;
1050 np->data = isa_virt_to_bus(skb->data);
1051 np->status = 0;
1052 np->control = CONTROL_EOP | CONTROL_EOL;
1053 wmb();
1056 * The new frame has been setup; we can now
1057 * let the interrupt handler and card "see" it
1060 atomic_set(&lp->tx_ring_head, head);
1061 p->control &= ~CONTROL_EOL;
1063 netif_wake_queue(dev);
1064 return 0;
1069 * mc32_update_stats - pull off the on board statistics
1070 * @dev: 3c527 to service
1073 * Query and reset the on-card stats. There's the small possibility
1074 * of a race here, which would result in an underestimation of
1075 * actual errors. As such, we'd prefer to keep all our stats
1076 * collection in software. As a rule, we do. However it can't be
1077 * used for rx errors and collisions as, by default, the card discards
1078 * bad rx packets.
1080 * Setting the SAV BP in the rx filter command supposedly
1081 * stops this behaviour. However, testing shows that it only seems to
1082 * enable the collation of on-card rx statistics --- the driver
1083 * never sees an RX descriptor with an error status set.
1087 static void mc32_update_stats(struct net_device *dev)
1089 struct mc32_local *lp = netdev_priv(dev);
1090 volatile struct mc32_stats *st = lp->stats;
1092 u32 rx_errors=0;
1094 rx_errors+=dev->stats.rx_crc_errors +=st->rx_crc_errors;
1095 st->rx_crc_errors=0;
1096 rx_errors+=dev->stats.rx_fifo_errors +=st->rx_overrun_errors;
1097 st->rx_overrun_errors=0;
1098 rx_errors+=dev->stats.rx_frame_errors +=st->rx_alignment_errors;
1099 st->rx_alignment_errors=0;
1100 rx_errors+=dev->stats.rx_length_errors+=st->rx_tooshort_errors;
1101 st->rx_tooshort_errors=0;
1102 rx_errors+=dev->stats.rx_missed_errors+=st->rx_outofresource_errors;
1103 st->rx_outofresource_errors=0;
1104 dev->stats.rx_errors=rx_errors;
1106 /* Number of packets which saw one collision */
1107 dev->stats.collisions+=st->dataC[10];
1108 st->dataC[10]=0;
1110 /* Number of packets which saw 2--15 collisions */
1111 dev->stats.collisions+=st->dataC[11];
1112 st->dataC[11]=0;
1117 * mc32_rx_ring - process the receive ring
1118 * @dev: 3c527 that needs its receive ring processing
1121 * We have received one or more indications from the card that a
1122 * receive has completed. The buffer ring thus contains dirty
1123 * entries. We walk the ring by iterating over the circular rx_ring
1124 * array, starting at the next dirty buffer (which happens to be the
1125 * one we finished up at last time around).
1127 * For each completed packet, we will either copy it and pass it up
1128 * the stack or, if the packet is near MTU sized, we allocate
1129 * another buffer and flip the old one up the stack.
1131 * We must succeed in keeping a buffer on the ring. If necessary we
1132 * will toss a received packet rather than lose a ring entry. Once
1133 * the first uncompleted descriptor is found, we move the
1134 * End-Of-List bit to include the buffers just processed.
1138 static void mc32_rx_ring(struct net_device *dev)
1140 struct mc32_local *lp = netdev_priv(dev);
1141 volatile struct skb_header *p;
1142 u16 rx_ring_tail;
1143 u16 rx_old_tail;
1144 int x=0;
1146 rx_old_tail = rx_ring_tail = lp->rx_ring_tail;
1150 p=lp->rx_ring[rx_ring_tail].p;
1152 if(!(p->status & (1<<7))) { /* Not COMPLETED */
1153 break;
1155 if(p->status & (1<<6)) /* COMPLETED_OK */
1158 u16 length=p->length;
1159 struct sk_buff *skb;
1160 struct sk_buff *newskb;
1162 /* Try to save time by avoiding a copy on big frames */
1164 if ((length > RX_COPYBREAK)
1165 && ((newskb=dev_alloc_skb(1532)) != NULL))
1167 skb=lp->rx_ring[rx_ring_tail].skb;
1168 skb_put(skb, length);
1170 skb_reserve(newskb,18);
1171 lp->rx_ring[rx_ring_tail].skb=newskb;
1172 p->data=isa_virt_to_bus(newskb->data);
1174 else
1176 skb=dev_alloc_skb(length+2);
1178 if(skb==NULL) {
1179 dev->stats.rx_dropped++;
1180 goto dropped;
1183 skb_reserve(skb,2);
1184 memcpy(skb_put(skb, length),
1185 lp->rx_ring[rx_ring_tail].skb->data, length);
1188 skb->protocol=eth_type_trans(skb,dev);
1189 dev->stats.rx_packets++;
1190 dev->stats.rx_bytes += length;
1191 netif_rx(skb);
1194 dropped:
1195 p->length = 1532;
1196 p->status = 0;
1198 rx_ring_tail=next_rx(rx_ring_tail);
1200 while(x++<48);
1202 /* If there was actually a frame to be processed, place the EOL bit */
1203 /* at the descriptor prior to the one to be filled next */
1205 if (rx_ring_tail != rx_old_tail)
1207 lp->rx_ring[prev_rx(rx_ring_tail)].p->control |= CONTROL_EOL;
1208 lp->rx_ring[prev_rx(rx_old_tail)].p->control &= ~CONTROL_EOL;
1210 lp->rx_ring_tail=rx_ring_tail;
1216 * mc32_tx_ring - process completed transmits
1217 * @dev: 3c527 that needs its transmit ring processing
1220 * This operates in a similar fashion to mc32_rx_ring. We iterate
1221 * over the transmit ring. For each descriptor which has been
1222 * processed by the card, we free its associated buffer and note
1223 * any errors. This continues until the transmit ring is emptied
1224 * or we reach a descriptor that hasn't yet been processed by the
1225 * card.
1229 static void mc32_tx_ring(struct net_device *dev)
1231 struct mc32_local *lp = netdev_priv(dev);
1232 volatile struct skb_header *np;
1235 * We rely on head==tail to mean 'queue empty'.
1236 * This is why lp->tx_count=TX_RING_LEN-1: in order to prevent
1237 * tx_ring_head wrapping to tail and confusing a 'queue empty'
1238 * condition with 'queue full'
1241 while (lp->tx_ring_tail != atomic_read(&lp->tx_ring_head))
1243 u16 t;
1245 t=next_tx(lp->tx_ring_tail);
1246 np=lp->tx_ring[t].p;
1248 if(!(np->status & (1<<7)))
1250 /* Not COMPLETED */
1251 break;
1253 dev->stats.tx_packets++;
1254 if(!(np->status & (1<<6))) /* Not COMPLETED_OK */
1256 dev->stats.tx_errors++;
1258 switch(np->status&0x0F)
1260 case 1:
1261 dev->stats.tx_aborted_errors++;
1262 break; /* Max collisions */
1263 case 2:
1264 dev->stats.tx_fifo_errors++;
1265 break;
1266 case 3:
1267 dev->stats.tx_carrier_errors++;
1268 break;
1269 case 4:
1270 dev->stats.tx_window_errors++;
1271 break; /* CTS Lost */
1272 case 5:
1273 dev->stats.tx_aborted_errors++;
1274 break; /* Transmit timeout */
1277 /* Packets are sent in order - this is
1278 basically a FIFO queue of buffers matching
1279 the card ring */
1280 dev->stats.tx_bytes+=lp->tx_ring[t].skb->len;
1281 dev_kfree_skb_irq(lp->tx_ring[t].skb);
1282 lp->tx_ring[t].skb=NULL;
1283 atomic_inc(&lp->tx_count);
1284 netif_wake_queue(dev);
1286 lp->tx_ring_tail=t;
1293 * mc32_interrupt - handle an interrupt from a 3c527
1294 * @irq: Interrupt number
1295 * @dev_id: 3c527 that requires servicing
1296 * @regs: Registers (unused)
1299 * An interrupt is raised whenever the 3c527 writes to the command
1300 * register. This register contains the message it wishes to send us
1301 * packed into a single byte field. We keep reading status entries
1302 * until we have processed all the control items, but simply count
1303 * transmit and receive reports. When all reports are in we empty the
1304 * transceiver rings as appropriate. This saves the overhead of
1305 * multiple command requests.
1307 * Because MCA is level-triggered, we shouldn't miss indications.
1308 * Therefore, we needn't ask the card to suspend interrupts within
1309 * this handler. The card receives an implicit acknowledgment of the
1310 * current interrupt when we read the command register.
1314 static irqreturn_t mc32_interrupt(int irq, void *dev_id)
1316 struct net_device *dev = dev_id;
1317 struct mc32_local *lp;
1318 int ioaddr, status, boguscount = 0;
1319 int rx_event = 0;
1320 int tx_event = 0;
1322 ioaddr = dev->base_addr;
1323 lp = netdev_priv(dev);
1325 /* See whats cooking */
1327 while((inb(ioaddr+HOST_STATUS)&HOST_STATUS_CWR) && boguscount++<2000)
1329 status=inb(ioaddr+HOST_CMD);
1331 #ifdef DEBUG_IRQ
1332 printk("Status TX%d RX%d EX%d OV%d BC%d\n",
1333 (status&7), (status>>3)&7, (status>>6)&1,
1334 (status>>7)&1, boguscount);
1335 #endif
1337 switch(status&7)
1339 case 0:
1340 break;
1341 case 6: /* TX fail */
1342 case 2: /* TX ok */
1343 tx_event = 1;
1344 break;
1345 case 3: /* Halt */
1346 case 4: /* Abort */
1347 complete(&lp->xceiver_cmd);
1348 break;
1349 default:
1350 printk("%s: strange tx ack %d\n", dev->name, status&7);
1352 status>>=3;
1353 switch(status&7)
1355 case 0:
1356 break;
1357 case 2: /* RX */
1358 rx_event=1;
1359 break;
1360 case 3: /* Halt */
1361 case 4: /* Abort */
1362 complete(&lp->xceiver_cmd);
1363 break;
1364 case 6:
1365 /* Out of RX buffers stat */
1366 /* Must restart rx */
1367 dev->stats.rx_dropped++;
1368 mc32_rx_ring(dev);
1369 mc32_start_transceiver(dev);
1370 break;
1371 default:
1372 printk("%s: strange rx ack %d\n",
1373 dev->name, status&7);
1375 status>>=3;
1376 if(status&1)
1379 * No thread is waiting: we need to tidy
1380 * up ourself.
1383 if (lp->cmd_nonblocking) {
1384 up(&lp->cmd_mutex);
1385 if (lp->mc_reload_wait)
1386 mc32_reset_multicast_list(dev);
1388 else complete(&lp->execution_cmd);
1390 if(status&2)
1393 * We get interrupted once per
1394 * counter that is about to overflow.
1397 mc32_update_stats(dev);
1403 * Process the transmit and receive rings
1406 if(tx_event)
1407 mc32_tx_ring(dev);
1409 if(rx_event)
1410 mc32_rx_ring(dev);
1412 return IRQ_HANDLED;
1417 * mc32_close - user configuring the 3c527 down
1418 * @dev: 3c527 card to shut down
1420 * The 3c527 is a bus mastering device. We must be careful how we
1421 * shut it down. It may also be running shared interrupt so we have
1422 * to be sure to silence it properly
1424 * We indicate that the card is closing to the rest of the
1425 * driver. Otherwise, it is possible that the card may run out
1426 * of receive buffers and restart the transceiver while we're
1427 * trying to close it.
1429 * We abort any receive and transmits going on and then wait until
1430 * any pending exec commands have completed in other code threads.
1431 * In theory we can't get here while that is true, in practice I am
1432 * paranoid
1434 * We turn off the interrupt enable for the board to be sure it can't
1435 * intefere with other devices.
1438 static int mc32_close(struct net_device *dev)
1440 struct mc32_local *lp = netdev_priv(dev);
1441 int ioaddr = dev->base_addr;
1443 u8 regs;
1444 u16 one=1;
1446 lp->xceiver_desired_state = HALTED;
1447 netif_stop_queue(dev);
1450 * Send the indications on command (handy debug check)
1453 mc32_command(dev, 4, &one, 2);
1455 /* Shut down the transceiver */
1457 mc32_halt_transceiver(dev);
1459 /* Ensure we issue no more commands beyond this point */
1461 down(&lp->cmd_mutex);
1463 /* Ok the card is now stopping */
1465 regs=inb(ioaddr+HOST_CTRL);
1466 regs&=~HOST_CTRL_INTE;
1467 outb(regs, ioaddr+HOST_CTRL);
1469 mc32_flush_rx_ring(dev);
1470 mc32_flush_tx_ring(dev);
1472 mc32_update_stats(dev);
1474 return 0;
1479 * mc32_get_stats - hand back stats to network layer
1480 * @dev: The 3c527 card to handle
1482 * We've collected all the stats we can in software already. Now
1483 * it's time to update those kept on-card and return the lot.
1487 static struct net_device_stats *mc32_get_stats(struct net_device *dev)
1489 mc32_update_stats(dev);
1490 return &dev->stats;
1495 * do_mc32_set_multicast_list - attempt to update multicasts
1496 * @dev: 3c527 device to load the list on
1497 * @retry: indicates this is not the first call.
1500 * Actually set or clear the multicast filter for this adaptor. The
1501 * locking issues are handled by this routine. We have to track
1502 * state as it may take multiple calls to get the command sequence
1503 * completed. We just keep trying to schedule the loads until we
1504 * manage to process them all.
1506 * num_addrs == -1 Promiscuous mode, receive all packets
1508 * num_addrs == 0 Normal mode, clear multicast list
1510 * num_addrs > 0 Multicast mode, receive normal and MC packets,
1511 * and do best-effort filtering.
1513 * See mc32_update_stats() regards setting the SAV BP bit.
1517 static void do_mc32_set_multicast_list(struct net_device *dev, int retry)
1519 struct mc32_local *lp = netdev_priv(dev);
1520 u16 filt = (1<<2); /* Save Bad Packets, for stats purposes */
1522 if ((dev->flags&IFF_PROMISC) ||
1523 (dev->flags&IFF_ALLMULTI) ||
1524 dev->mc_count > 10)
1525 /* Enable promiscuous mode */
1526 filt |= 1;
1527 else if(dev->mc_count)
1529 unsigned char block[62];
1530 unsigned char *bp;
1531 struct dev_mc_list *dmc=dev->mc_list;
1533 int i;
1535 if(retry==0)
1536 lp->mc_list_valid = 0;
1537 if(!lp->mc_list_valid)
1539 block[1]=0;
1540 block[0]=dev->mc_count;
1541 bp=block+2;
1543 for(i=0;i<dev->mc_count;i++)
1545 memcpy(bp, dmc->dmi_addr, 6);
1546 bp+=6;
1547 dmc=dmc->next;
1549 if(mc32_command_nowait(dev, 2, block, 2+6*dev->mc_count)==-1)
1551 lp->mc_reload_wait = 1;
1552 return;
1554 lp->mc_list_valid=1;
1558 if(mc32_command_nowait(dev, 0, &filt, 2)==-1)
1560 lp->mc_reload_wait = 1;
1562 else {
1563 lp->mc_reload_wait = 0;
1569 * mc32_set_multicast_list - queue multicast list update
1570 * @dev: The 3c527 to use
1572 * Commence loading the multicast list. This is called when the kernel
1573 * changes the lists. It will override any pending list we are trying to
1574 * load.
1577 static void mc32_set_multicast_list(struct net_device *dev)
1579 do_mc32_set_multicast_list(dev,0);
1584 * mc32_reset_multicast_list - reset multicast list
1585 * @dev: The 3c527 to use
1587 * Attempt the next step in loading the multicast lists. If this attempt
1588 * fails to complete then it will be scheduled and this function called
1589 * again later from elsewhere.
1592 static void mc32_reset_multicast_list(struct net_device *dev)
1594 do_mc32_set_multicast_list(dev,1);
1597 static void netdev_get_drvinfo(struct net_device *dev,
1598 struct ethtool_drvinfo *info)
1600 strcpy(info->driver, DRV_NAME);
1601 strcpy(info->version, DRV_VERSION);
1602 sprintf(info->bus_info, "MCA 0x%lx", dev->base_addr);
1605 static u32 netdev_get_msglevel(struct net_device *dev)
1607 return mc32_debug;
1610 static void netdev_set_msglevel(struct net_device *dev, u32 level)
1612 mc32_debug = level;
1615 static const struct ethtool_ops netdev_ethtool_ops = {
1616 .get_drvinfo = netdev_get_drvinfo,
1617 .get_msglevel = netdev_get_msglevel,
1618 .set_msglevel = netdev_set_msglevel,
1621 #ifdef MODULE
1623 static struct net_device *this_device;
1626 * init_module - entry point
1628 * Probe and locate a 3c527 card. This really should probe and locate
1629 * all the 3c527 cards in the machine not just one of them. Yes you can
1630 * insmod multiple modules for now but it's a hack.
1633 int __init init_module(void)
1635 this_device = mc32_probe(-1);
1636 if (IS_ERR(this_device))
1637 return PTR_ERR(this_device);
1638 return 0;
1642 * cleanup_module - free resources for an unload
1644 * Unloading time. We release the MCA bus resources and the interrupt
1645 * at which point everything is ready to unload. The card must be stopped
1646 * at this point or we would not have been called. When we unload we
1647 * leave the card stopped but not totally shut down. When the card is
1648 * initialized it must be rebooted or the rings reloaded before any
1649 * transmit operations are allowed to start scribbling into memory.
1652 void __exit cleanup_module(void)
1654 unregister_netdev(this_device);
1655 cleanup_card(this_device);
1656 free_netdev(this_device);
1659 #endif /* MODULE */