mac80211: radiotap: assume modulation from rates
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / net / 3c527.c
blobabc84f765973e2c30ef55b8e5c34973799dad415
1 /* 3c527.c: 3Com Etherlink/MC32 driver for Linux 2.4 and 2.6.
3 * (c) Copyright 1998 Red Hat Software Inc
4 * Written by Alan Cox.
5 * Further debugging by Carl Drougge.
6 * Initial SMP support by Felipe W Damasio <felipewd@terra.com.br>
7 * Heavily modified by Richard Procter <rnp@paradise.net.nz>
9 * Based on skeleton.c written 1993-94 by Donald Becker and ne2.c
10 * (for the MCA stuff) written by Wim Dumon.
12 * Thanks to 3Com for making this possible by providing me with the
13 * documentation.
15 * This software may be used and distributed according to the terms
16 * of the GNU General Public License, incorporated herein by reference.
20 #define DRV_NAME "3c527"
21 #define DRV_VERSION "0.7-SMP"
22 #define DRV_RELDATE "2003/09/21"
24 static const char *version =
25 DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Richard Procter <rnp@paradise.net.nz>\n";
27 /**
28 * DOC: Traps for the unwary
30 * The diagram (Figure 1-1) and the POS summary disagree with the
31 * "Interrupt Level" section in the manual.
33 * The manual contradicts itself when describing the minimum number
34 * buffers in the 'configure lists' command.
35 * My card accepts a buffer config of 4/4.
37 * Setting the SAV BP bit does not save bad packets, but
38 * only enables RX on-card stats collection.
40 * The documentation in places seems to miss things. In actual fact
41 * I've always eventually found everything is documented, it just
42 * requires careful study.
44 * DOC: Theory Of Operation
46 * The 3com 3c527 is a 32bit MCA bus mastering adapter with a large
47 * amount of on board intelligence that housekeeps a somewhat dumber
48 * Intel NIC. For performance we want to keep the transmit queue deep
49 * as the card can transmit packets while fetching others from main
50 * memory by bus master DMA. Transmission and reception are driven by
51 * circular buffer queues.
53 * The mailboxes can be used for controlling how the card traverses
54 * its buffer rings, but are used only for inital setup in this
55 * implementation. The exec mailbox allows a variety of commands to
56 * be executed. Each command must complete before the next is
57 * executed. Primarily we use the exec mailbox for controlling the
58 * multicast lists. We have to do a certain amount of interesting
59 * hoop jumping as the multicast list changes can occur in interrupt
60 * state when the card has an exec command pending. We defer such
61 * events until the command completion interrupt.
63 * A copy break scheme (taken from 3c59x.c) is employed whereby
64 * received frames exceeding a configurable length are passed
65 * directly to the higher networking layers without incuring a copy,
66 * in what amounts to a time/space trade-off.
68 * The card also keeps a large amount of statistical information
69 * on-board. In a perfect world, these could be used safely at no
70 * cost. However, lacking information to the contrary, processing
71 * them without races would involve so much extra complexity as to
72 * make it unworthwhile to do so. In the end, a hybrid SW/HW
73 * implementation was made necessary --- see mc32_update_stats().
75 * DOC: Notes
77 * It should be possible to use two or more cards, but at this stage
78 * only by loading two copies of the same module.
80 * The on-board 82586 NIC has trouble receiving multiple
81 * back-to-back frames and so is likely to drop packets from fast
82 * senders.
83 **/
85 #include <linux/module.h>
87 #include <linux/errno.h>
88 #include <linux/netdevice.h>
89 #include <linux/etherdevice.h>
90 #include <linux/if_ether.h>
91 #include <linux/init.h>
92 #include <linux/kernel.h>
93 #include <linux/types.h>
94 #include <linux/fcntl.h>
95 #include <linux/interrupt.h>
96 #include <linux/mca-legacy.h>
97 #include <linux/ioport.h>
98 #include <linux/in.h>
99 #include <linux/skbuff.h>
100 #include <linux/slab.h>
101 #include <linux/string.h>
102 #include <linux/wait.h>
103 #include <linux/ethtool.h>
104 #include <linux/completion.h>
105 #include <linux/bitops.h>
106 #include <linux/semaphore.h>
108 #include <asm/uaccess.h>
109 #include <asm/system.h>
110 #include <asm/io.h>
111 #include <asm/dma.h>
113 #include "3c527.h"
115 MODULE_LICENSE("GPL");
118 * The name of the card. Is used for messages and in the requests for
119 * io regions, irqs and dma channels
121 static const char* cardname = DRV_NAME;
123 /* use 0 for production, 1 for verification, >2 for debug */
124 #ifndef NET_DEBUG
125 #define NET_DEBUG 2
126 #endif
128 #undef DEBUG_IRQ
130 static unsigned int mc32_debug = NET_DEBUG;
132 /* The number of low I/O ports used by the ethercard. */
133 #define MC32_IO_EXTENT 8
135 /* As implemented, values must be a power-of-2 -- 4/8/16/32 */
136 #define TX_RING_LEN 32 /* Typically the card supports 37 */
137 #define RX_RING_LEN 8 /* " " " */
139 /* Copy break point, see above for details.
140 * Setting to > 1512 effectively disables this feature. */
141 #define RX_COPYBREAK 200 /* Value from 3c59x.c */
143 /* Issue the 82586 workaround command - this is for "busy lans", but
144 * basically means for all lans now days - has a performance (latency)
145 * cost, but best set. */
146 static const int WORKAROUND_82586=1;
148 /* Pointers to buffers and their on-card records */
149 struct mc32_ring_desc
151 volatile struct skb_header *p;
152 struct sk_buff *skb;
155 /* Information that needs to be kept for each board. */
156 struct mc32_local
158 int slot;
160 u32 base;
161 volatile struct mc32_mailbox *rx_box;
162 volatile struct mc32_mailbox *tx_box;
163 volatile struct mc32_mailbox *exec_box;
164 volatile struct mc32_stats *stats; /* Start of on-card statistics */
165 u16 tx_chain; /* Transmit list start offset */
166 u16 rx_chain; /* Receive list start offset */
167 u16 tx_len; /* Transmit list count */
168 u16 rx_len; /* Receive list count */
170 u16 xceiver_desired_state; /* HALTED or RUNNING */
171 u16 cmd_nonblocking; /* Thread is uninterested in command result */
172 u16 mc_reload_wait; /* A multicast load request is pending */
173 u32 mc_list_valid; /* True when the mclist is set */
175 struct mc32_ring_desc tx_ring[TX_RING_LEN]; /* Host Transmit ring */
176 struct mc32_ring_desc rx_ring[RX_RING_LEN]; /* Host Receive ring */
178 atomic_t tx_count; /* buffers left */
179 atomic_t tx_ring_head; /* index to tx en-queue end */
180 u16 tx_ring_tail; /* index to tx de-queue end */
182 u16 rx_ring_tail; /* index to rx de-queue end */
184 struct semaphore cmd_mutex; /* Serialises issuing of execute commands */
185 struct completion execution_cmd; /* Card has completed an execute command */
186 struct completion xceiver_cmd; /* Card has completed a tx or rx command */
189 /* The station (ethernet) address prefix, used for a sanity check. */
190 #define SA_ADDR0 0x02
191 #define SA_ADDR1 0x60
192 #define SA_ADDR2 0xAC
194 struct mca_adapters_t {
195 unsigned int id;
196 char *name;
199 static const struct mca_adapters_t mc32_adapters[] = {
200 { 0x0041, "3COM EtherLink MC/32" },
201 { 0x8EF5, "IBM High Performance Lan Adapter" },
202 { 0x0000, NULL }
206 /* Macros for ring index manipulations */
207 static inline u16 next_rx(u16 rx) { return (rx+1)&(RX_RING_LEN-1); };
208 static inline u16 prev_rx(u16 rx) { return (rx-1)&(RX_RING_LEN-1); };
210 static inline u16 next_tx(u16 tx) { return (tx+1)&(TX_RING_LEN-1); };
213 /* Index to functions, as function prototypes. */
214 static int mc32_probe1(struct net_device *dev, int ioaddr);
215 static int mc32_command(struct net_device *dev, u16 cmd, void *data, int len);
216 static int mc32_open(struct net_device *dev);
217 static void mc32_timeout(struct net_device *dev);
218 static int mc32_send_packet(struct sk_buff *skb, struct net_device *dev);
219 static irqreturn_t mc32_interrupt(int irq, void *dev_id);
220 static int mc32_close(struct net_device *dev);
221 static struct net_device_stats *mc32_get_stats(struct net_device *dev);
222 static void mc32_set_multicast_list(struct net_device *dev);
223 static void mc32_reset_multicast_list(struct net_device *dev);
224 static const struct ethtool_ops netdev_ethtool_ops;
226 static void cleanup_card(struct net_device *dev)
228 struct mc32_local *lp = netdev_priv(dev);
229 unsigned slot = lp->slot;
230 mca_mark_as_unused(slot);
231 mca_set_adapter_name(slot, NULL);
232 free_irq(dev->irq, dev);
233 release_region(dev->base_addr, MC32_IO_EXTENT);
237 * mc32_probe - Search for supported boards
238 * @unit: interface number to use
240 * Because MCA bus is a real bus and we can scan for cards we could do a
241 * single scan for all boards here. Right now we use the passed in device
242 * structure and scan for only one board. This needs fixing for modules
243 * in particular.
246 struct net_device *__init mc32_probe(int unit)
248 struct net_device *dev = alloc_etherdev(sizeof(struct mc32_local));
249 static int current_mca_slot = -1;
250 int i;
251 int err;
253 if (!dev)
254 return ERR_PTR(-ENOMEM);
256 if (unit >= 0)
257 sprintf(dev->name, "eth%d", unit);
259 /* Do not check any supplied i/o locations.
260 POS registers usually don't fail :) */
262 /* MCA cards have POS registers.
263 Autodetecting MCA cards is extremely simple.
264 Just search for the card. */
266 for(i = 0; (mc32_adapters[i].name != NULL); i++) {
267 current_mca_slot =
268 mca_find_unused_adapter(mc32_adapters[i].id, 0);
270 if(current_mca_slot != MCA_NOTFOUND) {
271 if(!mc32_probe1(dev, current_mca_slot))
273 mca_set_adapter_name(current_mca_slot,
274 mc32_adapters[i].name);
275 mca_mark_as_used(current_mca_slot);
276 err = register_netdev(dev);
277 if (err) {
278 cleanup_card(dev);
279 free_netdev(dev);
280 dev = ERR_PTR(err);
282 return dev;
287 free_netdev(dev);
288 return ERR_PTR(-ENODEV);
292 * mc32_probe1 - Check a given slot for a board and test the card
293 * @dev: Device structure to fill in
294 * @slot: The MCA bus slot being used by this card
296 * Decode the slot data and configure the card structures. Having done this we
297 * can reset the card and configure it. The card does a full self test cycle
298 * in firmware so we have to wait for it to return and post us either a
299 * failure case or some addresses we use to find the board internals.
302 static int __init mc32_probe1(struct net_device *dev, int slot)
304 static unsigned version_printed;
305 int i, err;
306 u8 POS;
307 u32 base;
308 struct mc32_local *lp = netdev_priv(dev);
309 static u16 mca_io_bases[]={
310 0x7280,0x7290,
311 0x7680,0x7690,
312 0x7A80,0x7A90,
313 0x7E80,0x7E90
315 static u32 mca_mem_bases[]={
316 0x00C0000,
317 0x00C4000,
318 0x00C8000,
319 0x00CC000,
320 0x00D0000,
321 0x00D4000,
322 0x00D8000,
323 0x00DC000
325 static char *failures[]={
326 "Processor instruction",
327 "Processor data bus",
328 "Processor data bus",
329 "Processor data bus",
330 "Adapter bus",
331 "ROM checksum",
332 "Base RAM",
333 "Extended RAM",
334 "82586 internal loopback",
335 "82586 initialisation failure",
336 "Adapter list configuration error"
338 DECLARE_MAC_BUF(mac);
340 /* Time to play MCA games */
342 if (mc32_debug && version_printed++ == 0)
343 printk(KERN_DEBUG "%s", version);
345 printk(KERN_INFO "%s: %s found in slot %d:", dev->name, cardname, slot);
347 POS = mca_read_stored_pos(slot, 2);
349 if(!(POS&1))
351 printk(" disabled.\n");
352 return -ENODEV;
355 /* Fill in the 'dev' fields. */
356 dev->base_addr = mca_io_bases[(POS>>1)&7];
357 dev->mem_start = mca_mem_bases[(POS>>4)&7];
359 POS = mca_read_stored_pos(slot, 4);
360 if(!(POS&1))
362 printk("memory window disabled.\n");
363 return -ENODEV;
366 POS = mca_read_stored_pos(slot, 5);
368 i=(POS>>4)&3;
369 if(i==3)
371 printk("invalid memory window.\n");
372 return -ENODEV;
375 i*=16384;
376 i+=16384;
378 dev->mem_end=dev->mem_start + i;
380 dev->irq = ((POS>>2)&3)+9;
382 if(!request_region(dev->base_addr, MC32_IO_EXTENT, cardname))
384 printk("io 0x%3lX, which is busy.\n", dev->base_addr);
385 return -EBUSY;
388 printk("io 0x%3lX irq %d mem 0x%lX (%dK)\n",
389 dev->base_addr, dev->irq, dev->mem_start, i/1024);
392 /* We ought to set the cache line size here.. */
396 * Go PROM browsing
399 /* Retrieve and print the ethernet address. */
400 for (i = 0; i < 6; i++)
402 mca_write_pos(slot, 6, i+12);
403 mca_write_pos(slot, 7, 0);
405 dev->dev_addr[i] = mca_read_pos(slot,3);
408 printk("%s: Address %s", dev->name, print_mac(mac, dev->dev_addr));
410 mca_write_pos(slot, 6, 0);
411 mca_write_pos(slot, 7, 0);
413 POS = mca_read_stored_pos(slot, 4);
415 if(POS&2)
416 printk(" : BNC port selected.\n");
417 else
418 printk(" : AUI port selected.\n");
420 POS=inb(dev->base_addr+HOST_CTRL);
421 POS|=HOST_CTRL_ATTN|HOST_CTRL_RESET;
422 POS&=~HOST_CTRL_INTE;
423 outb(POS, dev->base_addr+HOST_CTRL);
424 /* Reset adapter */
425 udelay(100);
426 /* Reset off */
427 POS&=~(HOST_CTRL_ATTN|HOST_CTRL_RESET);
428 outb(POS, dev->base_addr+HOST_CTRL);
430 udelay(300);
433 * Grab the IRQ
436 err = request_irq(dev->irq, &mc32_interrupt, IRQF_SHARED | IRQF_SAMPLE_RANDOM, DRV_NAME, dev);
437 if (err) {
438 release_region(dev->base_addr, MC32_IO_EXTENT);
439 printk(KERN_ERR "%s: unable to get IRQ %d.\n", DRV_NAME, dev->irq);
440 goto err_exit_ports;
443 memset(lp, 0, sizeof(struct mc32_local));
444 lp->slot = slot;
446 i=0;
448 base = inb(dev->base_addr);
450 while(base == 0xFF)
452 i++;
453 if(i == 1000)
455 printk(KERN_ERR "%s: failed to boot adapter.\n", dev->name);
456 err = -ENODEV;
457 goto err_exit_irq;
459 udelay(1000);
460 if(inb(dev->base_addr+2)&(1<<5))
461 base = inb(dev->base_addr);
464 if(base>0)
466 if(base < 0x0C)
467 printk(KERN_ERR "%s: %s%s.\n", dev->name, failures[base-1],
468 base<0x0A?" test failure":"");
469 else
470 printk(KERN_ERR "%s: unknown failure %d.\n", dev->name, base);
471 err = -ENODEV;
472 goto err_exit_irq;
475 base=0;
476 for(i=0;i<4;i++)
478 int n=0;
480 while(!(inb(dev->base_addr+2)&(1<<5)))
482 n++;
483 udelay(50);
484 if(n>100)
486 printk(KERN_ERR "%s: mailbox read fail (%d).\n", dev->name, i);
487 err = -ENODEV;
488 goto err_exit_irq;
492 base|=(inb(dev->base_addr)<<(8*i));
495 lp->exec_box=isa_bus_to_virt(dev->mem_start+base);
497 base=lp->exec_box->data[1]<<16|lp->exec_box->data[0];
499 lp->base = dev->mem_start+base;
501 lp->rx_box=isa_bus_to_virt(lp->base + lp->exec_box->data[2]);
502 lp->tx_box=isa_bus_to_virt(lp->base + lp->exec_box->data[3]);
504 lp->stats = isa_bus_to_virt(lp->base + lp->exec_box->data[5]);
507 * Descriptor chains (card relative)
510 lp->tx_chain = lp->exec_box->data[8]; /* Transmit list start offset */
511 lp->rx_chain = lp->exec_box->data[10]; /* Receive list start offset */
512 lp->tx_len = lp->exec_box->data[9]; /* Transmit list count */
513 lp->rx_len = lp->exec_box->data[11]; /* Receive list count */
515 init_MUTEX_LOCKED(&lp->cmd_mutex);
516 init_completion(&lp->execution_cmd);
517 init_completion(&lp->xceiver_cmd);
519 printk("%s: Firmware Rev %d. %d RX buffers, %d TX buffers. Base of 0x%08X.\n",
520 dev->name, lp->exec_box->data[12], lp->rx_len, lp->tx_len, lp->base);
522 dev->open = mc32_open;
523 dev->stop = mc32_close;
524 dev->hard_start_xmit = mc32_send_packet;
525 dev->get_stats = mc32_get_stats;
526 dev->set_multicast_list = mc32_set_multicast_list;
527 dev->tx_timeout = mc32_timeout;
528 dev->watchdog_timeo = HZ*5; /* Board does all the work */
529 dev->ethtool_ops = &netdev_ethtool_ops;
531 return 0;
533 err_exit_irq:
534 free_irq(dev->irq, dev);
535 err_exit_ports:
536 release_region(dev->base_addr, MC32_IO_EXTENT);
537 return err;
542 * mc32_ready_poll - wait until we can feed it a command
543 * @dev: The device to wait for
545 * Wait until the card becomes ready to accept a command via the
546 * command register. This tells us nothing about the completion
547 * status of any pending commands and takes very little time at all.
550 static inline void mc32_ready_poll(struct net_device *dev)
552 int ioaddr = dev->base_addr;
553 while(!(inb(ioaddr+HOST_STATUS)&HOST_STATUS_CRR));
558 * mc32_command_nowait - send a command non blocking
559 * @dev: The 3c527 to issue the command to
560 * @cmd: The command word to write to the mailbox
561 * @data: A data block if the command expects one
562 * @len: Length of the data block
564 * Send a command from interrupt state. If there is a command
565 * currently being executed then we return an error of -1. It
566 * simply isn't viable to wait around as commands may be
567 * slow. This can theoretically be starved on SMP, but it's hard
568 * to see a realistic situation. We do not wait for the command
569 * to complete --- we rely on the interrupt handler to tidy up
570 * after us.
573 static int mc32_command_nowait(struct net_device *dev, u16 cmd, void *data, int len)
575 struct mc32_local *lp = netdev_priv(dev);
576 int ioaddr = dev->base_addr;
577 int ret = -1;
579 if (down_trylock(&lp->cmd_mutex) == 0)
581 lp->cmd_nonblocking=1;
582 lp->exec_box->mbox=0;
583 lp->exec_box->mbox=cmd;
584 memcpy((void *)lp->exec_box->data, data, len);
585 barrier(); /* the memcpy forgot the volatile so be sure */
587 /* Send the command */
588 mc32_ready_poll(dev);
589 outb(1<<6, ioaddr+HOST_CMD);
591 ret = 0;
593 /* Interrupt handler will signal mutex on completion */
596 return ret;
601 * mc32_command - send a command and sleep until completion
602 * @dev: The 3c527 card to issue the command to
603 * @cmd: The command word to write to the mailbox
604 * @data: A data block if the command expects one
605 * @len: Length of the data block
607 * Sends exec commands in a user context. This permits us to wait around
608 * for the replies and also to wait for the command buffer to complete
609 * from a previous command before we execute our command. After our
610 * command completes we will attempt any pending multicast reload
611 * we blocked off by hogging the exec buffer.
613 * You feed the card a command, you wait, it interrupts you get a
614 * reply. All well and good. The complication arises because you use
615 * commands for filter list changes which come in at bh level from things
616 * like IPV6 group stuff.
619 static int mc32_command(struct net_device *dev, u16 cmd, void *data, int len)
621 struct mc32_local *lp = netdev_priv(dev);
622 int ioaddr = dev->base_addr;
623 int ret = 0;
625 down(&lp->cmd_mutex);
628 * My Turn
631 lp->cmd_nonblocking=0;
632 lp->exec_box->mbox=0;
633 lp->exec_box->mbox=cmd;
634 memcpy((void *)lp->exec_box->data, data, len);
635 barrier(); /* the memcpy forgot the volatile so be sure */
637 mc32_ready_poll(dev);
638 outb(1<<6, ioaddr+HOST_CMD);
640 wait_for_completion(&lp->execution_cmd);
642 if(lp->exec_box->mbox&(1<<13))
643 ret = -1;
645 up(&lp->cmd_mutex);
648 * A multicast set got blocked - try it now
651 if(lp->mc_reload_wait)
653 mc32_reset_multicast_list(dev);
656 return ret;
661 * mc32_start_transceiver - tell board to restart tx/rx
662 * @dev: The 3c527 card to issue the command to
664 * This may be called from the interrupt state, where it is used
665 * to restart the rx ring if the card runs out of rx buffers.
667 * We must first check if it's ok to (re)start the transceiver. See
668 * mc32_close for details.
671 static void mc32_start_transceiver(struct net_device *dev) {
673 struct mc32_local *lp = netdev_priv(dev);
674 int ioaddr = dev->base_addr;
676 /* Ignore RX overflow on device closure */
677 if (lp->xceiver_desired_state==HALTED)
678 return;
680 /* Give the card the offset to the post-EOL-bit RX descriptor */
681 mc32_ready_poll(dev);
682 lp->rx_box->mbox=0;
683 lp->rx_box->data[0]=lp->rx_ring[prev_rx(lp->rx_ring_tail)].p->next;
684 outb(HOST_CMD_START_RX, ioaddr+HOST_CMD);
686 mc32_ready_poll(dev);
687 lp->tx_box->mbox=0;
688 outb(HOST_CMD_RESTRT_TX, ioaddr+HOST_CMD); /* card ignores this on RX restart */
690 /* We are not interrupted on start completion */
695 * mc32_halt_transceiver - tell board to stop tx/rx
696 * @dev: The 3c527 card to issue the command to
698 * We issue the commands to halt the card's transceiver. In fact,
699 * after some experimenting we now simply tell the card to
700 * suspend. When issuing aborts occasionally odd things happened.
702 * We then sleep until the card has notified us that both rx and
703 * tx have been suspended.
706 static void mc32_halt_transceiver(struct net_device *dev)
708 struct mc32_local *lp = netdev_priv(dev);
709 int ioaddr = dev->base_addr;
711 mc32_ready_poll(dev);
712 lp->rx_box->mbox=0;
713 outb(HOST_CMD_SUSPND_RX, ioaddr+HOST_CMD);
714 wait_for_completion(&lp->xceiver_cmd);
716 mc32_ready_poll(dev);
717 lp->tx_box->mbox=0;
718 outb(HOST_CMD_SUSPND_TX, ioaddr+HOST_CMD);
719 wait_for_completion(&lp->xceiver_cmd);
724 * mc32_load_rx_ring - load the ring of receive buffers
725 * @dev: 3c527 to build the ring for
727 * This initalises the on-card and driver datastructures to
728 * the point where mc32_start_transceiver() can be called.
730 * The card sets up the receive ring for us. We are required to use the
731 * ring it provides, although the size of the ring is configurable.
733 * We allocate an sk_buff for each ring entry in turn and
734 * initalise its house-keeping info. At the same time, we read
735 * each 'next' pointer in our rx_ring array. This reduces slow
736 * shared-memory reads and makes it easy to access predecessor
737 * descriptors.
739 * We then set the end-of-list bit for the last entry so that the
740 * card will know when it has run out of buffers.
743 static int mc32_load_rx_ring(struct net_device *dev)
745 struct mc32_local *lp = netdev_priv(dev);
746 int i;
747 u16 rx_base;
748 volatile struct skb_header *p;
750 rx_base=lp->rx_chain;
752 for(i=0; i<RX_RING_LEN; i++) {
753 lp->rx_ring[i].skb=alloc_skb(1532, GFP_KERNEL);
754 if (lp->rx_ring[i].skb==NULL) {
755 for (;i>=0;i--)
756 kfree_skb(lp->rx_ring[i].skb);
757 return -ENOBUFS;
759 skb_reserve(lp->rx_ring[i].skb, 18);
761 p=isa_bus_to_virt(lp->base+rx_base);
763 p->control=0;
764 p->data=isa_virt_to_bus(lp->rx_ring[i].skb->data);
765 p->status=0;
766 p->length=1532;
768 lp->rx_ring[i].p=p;
769 rx_base=p->next;
772 lp->rx_ring[i-1].p->control |= CONTROL_EOL;
774 lp->rx_ring_tail=0;
776 return 0;
781 * mc32_flush_rx_ring - free the ring of receive buffers
782 * @lp: Local data of 3c527 to flush the rx ring of
784 * Free the buffer for each ring slot. This may be called
785 * before mc32_load_rx_ring(), eg. on error in mc32_open().
786 * Requires rx skb pointers to point to a valid skb, or NULL.
789 static void mc32_flush_rx_ring(struct net_device *dev)
791 struct mc32_local *lp = netdev_priv(dev);
792 int i;
794 for(i=0; i < RX_RING_LEN; i++)
796 if (lp->rx_ring[i].skb) {
797 dev_kfree_skb(lp->rx_ring[i].skb);
798 lp->rx_ring[i].skb = NULL;
800 lp->rx_ring[i].p=NULL;
806 * mc32_load_tx_ring - load transmit ring
807 * @dev: The 3c527 card to issue the command to
809 * This sets up the host transmit data-structures.
811 * First, we obtain from the card it's current postion in the tx
812 * ring, so that we will know where to begin transmitting
813 * packets.
815 * Then, we read the 'next' pointers from the on-card tx ring into
816 * our tx_ring array to reduce slow shared-mem reads. Finally, we
817 * intitalise the tx house keeping variables.
821 static void mc32_load_tx_ring(struct net_device *dev)
823 struct mc32_local *lp = netdev_priv(dev);
824 volatile struct skb_header *p;
825 int i;
826 u16 tx_base;
828 tx_base=lp->tx_box->data[0];
830 for(i=0 ; i<TX_RING_LEN ; i++)
832 p=isa_bus_to_virt(lp->base+tx_base);
833 lp->tx_ring[i].p=p;
834 lp->tx_ring[i].skb=NULL;
836 tx_base=p->next;
839 /* -1 so that tx_ring_head cannot "lap" tx_ring_tail */
840 /* see mc32_tx_ring */
842 atomic_set(&lp->tx_count, TX_RING_LEN-1);
843 atomic_set(&lp->tx_ring_head, 0);
844 lp->tx_ring_tail=0;
849 * mc32_flush_tx_ring - free transmit ring
850 * @lp: Local data of 3c527 to flush the tx ring of
852 * If the ring is non-empty, zip over the it, freeing any
853 * allocated skb_buffs. The tx ring house-keeping variables are
854 * then reset. Requires rx skb pointers to point to a valid skb,
855 * or NULL.
858 static void mc32_flush_tx_ring(struct net_device *dev)
860 struct mc32_local *lp = netdev_priv(dev);
861 int i;
863 for (i=0; i < TX_RING_LEN; i++)
865 if (lp->tx_ring[i].skb)
867 dev_kfree_skb(lp->tx_ring[i].skb);
868 lp->tx_ring[i].skb = NULL;
872 atomic_set(&lp->tx_count, 0);
873 atomic_set(&lp->tx_ring_head, 0);
874 lp->tx_ring_tail=0;
879 * mc32_open - handle 'up' of card
880 * @dev: device to open
882 * The user is trying to bring the card into ready state. This requires
883 * a brief dialogue with the card. Firstly we enable interrupts and then
884 * 'indications'. Without these enabled the card doesn't bother telling
885 * us what it has done. This had me puzzled for a week.
887 * We configure the number of card descriptors, then load the network
888 * address and multicast filters. Turn on the workaround mode. This
889 * works around a bug in the 82586 - it asks the firmware to do
890 * so. It has a performance (latency) hit but is needed on busy
891 * [read most] lans. We load the ring with buffers then we kick it
892 * all off.
895 static int mc32_open(struct net_device *dev)
897 int ioaddr = dev->base_addr;
898 struct mc32_local *lp = netdev_priv(dev);
899 u8 one=1;
900 u8 regs;
901 u16 descnumbuffs[2] = {TX_RING_LEN, RX_RING_LEN};
904 * Interrupts enabled
907 regs=inb(ioaddr+HOST_CTRL);
908 regs|=HOST_CTRL_INTE;
909 outb(regs, ioaddr+HOST_CTRL);
912 * Allow ourselves to issue commands
915 up(&lp->cmd_mutex);
919 * Send the indications on command
922 mc32_command(dev, 4, &one, 2);
925 * Poke it to make sure it's really dead.
928 mc32_halt_transceiver(dev);
929 mc32_flush_tx_ring(dev);
932 * Ask card to set up on-card descriptors to our spec
935 if(mc32_command(dev, 8, descnumbuffs, 4)) {
936 printk("%s: %s rejected our buffer configuration!\n",
937 dev->name, cardname);
938 mc32_close(dev);
939 return -ENOBUFS;
942 /* Report new configuration */
943 mc32_command(dev, 6, NULL, 0);
945 lp->tx_chain = lp->exec_box->data[8]; /* Transmit list start offset */
946 lp->rx_chain = lp->exec_box->data[10]; /* Receive list start offset */
947 lp->tx_len = lp->exec_box->data[9]; /* Transmit list count */
948 lp->rx_len = lp->exec_box->data[11]; /* Receive list count */
950 /* Set Network Address */
951 mc32_command(dev, 1, dev->dev_addr, 6);
953 /* Set the filters */
954 mc32_set_multicast_list(dev);
956 if (WORKAROUND_82586) {
957 u16 zero_word=0;
958 mc32_command(dev, 0x0D, &zero_word, 2); /* 82586 bug workaround on */
961 mc32_load_tx_ring(dev);
963 if(mc32_load_rx_ring(dev))
965 mc32_close(dev);
966 return -ENOBUFS;
969 lp->xceiver_desired_state = RUNNING;
971 /* And finally, set the ball rolling... */
972 mc32_start_transceiver(dev);
974 netif_start_queue(dev);
976 return 0;
981 * mc32_timeout - handle a timeout from the network layer
982 * @dev: 3c527 that timed out
984 * Handle a timeout on transmit from the 3c527. This normally means
985 * bad things as the hardware handles cable timeouts and mess for
986 * us.
990 static void mc32_timeout(struct net_device *dev)
992 printk(KERN_WARNING "%s: transmit timed out?\n", dev->name);
993 /* Try to restart the adaptor. */
994 netif_wake_queue(dev);
999 * mc32_send_packet - queue a frame for transmit
1000 * @skb: buffer to transmit
1001 * @dev: 3c527 to send it out of
1003 * Transmit a buffer. This normally means throwing the buffer onto
1004 * the transmit queue as the queue is quite large. If the queue is
1005 * full then we set tx_busy and return. Once the interrupt handler
1006 * gets messages telling it to reclaim transmit queue entries, we will
1007 * clear tx_busy and the kernel will start calling this again.
1009 * We do not disable interrupts or acquire any locks; this can
1010 * run concurrently with mc32_tx_ring(), and the function itself
1011 * is serialised at a higher layer. However, similarly for the
1012 * card itself, we must ensure that we update tx_ring_head only
1013 * after we've established a valid packet on the tx ring (and
1014 * before we let the card "see" it, to prevent it racing with the
1015 * irq handler).
1019 static int mc32_send_packet(struct sk_buff *skb, struct net_device *dev)
1021 struct mc32_local *lp = netdev_priv(dev);
1022 u32 head = atomic_read(&lp->tx_ring_head);
1024 volatile struct skb_header *p, *np;
1026 netif_stop_queue(dev);
1028 if(atomic_read(&lp->tx_count)==0) {
1029 return 1;
1032 if (skb_padto(skb, ETH_ZLEN)) {
1033 netif_wake_queue(dev);
1034 return 0;
1037 atomic_dec(&lp->tx_count);
1039 /* P is the last sending/sent buffer as a pointer */
1040 p=lp->tx_ring[head].p;
1042 head = next_tx(head);
1044 /* NP is the buffer we will be loading */
1045 np=lp->tx_ring[head].p;
1047 /* We will need this to flush the buffer out */
1048 lp->tx_ring[head].skb=skb;
1050 np->length = unlikely(skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len;
1051 np->data = isa_virt_to_bus(skb->data);
1052 np->status = 0;
1053 np->control = CONTROL_EOP | CONTROL_EOL;
1054 wmb();
1057 * The new frame has been setup; we can now
1058 * let the interrupt handler and card "see" it
1061 atomic_set(&lp->tx_ring_head, head);
1062 p->control &= ~CONTROL_EOL;
1064 netif_wake_queue(dev);
1065 return 0;
1070 * mc32_update_stats - pull off the on board statistics
1071 * @dev: 3c527 to service
1074 * Query and reset the on-card stats. There's the small possibility
1075 * of a race here, which would result in an underestimation of
1076 * actual errors. As such, we'd prefer to keep all our stats
1077 * collection in software. As a rule, we do. However it can't be
1078 * used for rx errors and collisions as, by default, the card discards
1079 * bad rx packets.
1081 * Setting the SAV BP in the rx filter command supposedly
1082 * stops this behaviour. However, testing shows that it only seems to
1083 * enable the collation of on-card rx statistics --- the driver
1084 * never sees an RX descriptor with an error status set.
1088 static void mc32_update_stats(struct net_device *dev)
1090 struct mc32_local *lp = netdev_priv(dev);
1091 volatile struct mc32_stats *st = lp->stats;
1093 u32 rx_errors=0;
1095 rx_errors+=dev->stats.rx_crc_errors +=st->rx_crc_errors;
1096 st->rx_crc_errors=0;
1097 rx_errors+=dev->stats.rx_fifo_errors +=st->rx_overrun_errors;
1098 st->rx_overrun_errors=0;
1099 rx_errors+=dev->stats.rx_frame_errors +=st->rx_alignment_errors;
1100 st->rx_alignment_errors=0;
1101 rx_errors+=dev->stats.rx_length_errors+=st->rx_tooshort_errors;
1102 st->rx_tooshort_errors=0;
1103 rx_errors+=dev->stats.rx_missed_errors+=st->rx_outofresource_errors;
1104 st->rx_outofresource_errors=0;
1105 dev->stats.rx_errors=rx_errors;
1107 /* Number of packets which saw one collision */
1108 dev->stats.collisions+=st->dataC[10];
1109 st->dataC[10]=0;
1111 /* Number of packets which saw 2--15 collisions */
1112 dev->stats.collisions+=st->dataC[11];
1113 st->dataC[11]=0;
1118 * mc32_rx_ring - process the receive ring
1119 * @dev: 3c527 that needs its receive ring processing
1122 * We have received one or more indications from the card that a
1123 * receive has completed. The buffer ring thus contains dirty
1124 * entries. We walk the ring by iterating over the circular rx_ring
1125 * array, starting at the next dirty buffer (which happens to be the
1126 * one we finished up at last time around).
1128 * For each completed packet, we will either copy it and pass it up
1129 * the stack or, if the packet is near MTU sized, we allocate
1130 * another buffer and flip the old one up the stack.
1132 * We must succeed in keeping a buffer on the ring. If necessary we
1133 * will toss a received packet rather than lose a ring entry. Once
1134 * the first uncompleted descriptor is found, we move the
1135 * End-Of-List bit to include the buffers just processed.
1139 static void mc32_rx_ring(struct net_device *dev)
1141 struct mc32_local *lp = netdev_priv(dev);
1142 volatile struct skb_header *p;
1143 u16 rx_ring_tail;
1144 u16 rx_old_tail;
1145 int x=0;
1147 rx_old_tail = rx_ring_tail = lp->rx_ring_tail;
1151 p=lp->rx_ring[rx_ring_tail].p;
1153 if(!(p->status & (1<<7))) { /* Not COMPLETED */
1154 break;
1156 if(p->status & (1<<6)) /* COMPLETED_OK */
1159 u16 length=p->length;
1160 struct sk_buff *skb;
1161 struct sk_buff *newskb;
1163 /* Try to save time by avoiding a copy on big frames */
1165 if ((length > RX_COPYBREAK)
1166 && ((newskb=dev_alloc_skb(1532)) != NULL))
1168 skb=lp->rx_ring[rx_ring_tail].skb;
1169 skb_put(skb, length);
1171 skb_reserve(newskb,18);
1172 lp->rx_ring[rx_ring_tail].skb=newskb;
1173 p->data=isa_virt_to_bus(newskb->data);
1175 else
1177 skb=dev_alloc_skb(length+2);
1179 if(skb==NULL) {
1180 dev->stats.rx_dropped++;
1181 goto dropped;
1184 skb_reserve(skb,2);
1185 memcpy(skb_put(skb, length),
1186 lp->rx_ring[rx_ring_tail].skb->data, length);
1189 skb->protocol=eth_type_trans(skb,dev);
1190 dev->last_rx = jiffies;
1191 dev->stats.rx_packets++;
1192 dev->stats.rx_bytes += length;
1193 netif_rx(skb);
1196 dropped:
1197 p->length = 1532;
1198 p->status = 0;
1200 rx_ring_tail=next_rx(rx_ring_tail);
1202 while(x++<48);
1204 /* If there was actually a frame to be processed, place the EOL bit */
1205 /* at the descriptor prior to the one to be filled next */
1207 if (rx_ring_tail != rx_old_tail)
1209 lp->rx_ring[prev_rx(rx_ring_tail)].p->control |= CONTROL_EOL;
1210 lp->rx_ring[prev_rx(rx_old_tail)].p->control &= ~CONTROL_EOL;
1212 lp->rx_ring_tail=rx_ring_tail;
1218 * mc32_tx_ring - process completed transmits
1219 * @dev: 3c527 that needs its transmit ring processing
1222 * This operates in a similar fashion to mc32_rx_ring. We iterate
1223 * over the transmit ring. For each descriptor which has been
1224 * processed by the card, we free its associated buffer and note
1225 * any errors. This continues until the transmit ring is emptied
1226 * or we reach a descriptor that hasn't yet been processed by the
1227 * card.
1231 static void mc32_tx_ring(struct net_device *dev)
1233 struct mc32_local *lp = netdev_priv(dev);
1234 volatile struct skb_header *np;
1237 * We rely on head==tail to mean 'queue empty'.
1238 * This is why lp->tx_count=TX_RING_LEN-1: in order to prevent
1239 * tx_ring_head wrapping to tail and confusing a 'queue empty'
1240 * condition with 'queue full'
1243 while (lp->tx_ring_tail != atomic_read(&lp->tx_ring_head))
1245 u16 t;
1247 t=next_tx(lp->tx_ring_tail);
1248 np=lp->tx_ring[t].p;
1250 if(!(np->status & (1<<7)))
1252 /* Not COMPLETED */
1253 break;
1255 dev->stats.tx_packets++;
1256 if(!(np->status & (1<<6))) /* Not COMPLETED_OK */
1258 dev->stats.tx_errors++;
1260 switch(np->status&0x0F)
1262 case 1:
1263 dev->stats.tx_aborted_errors++;
1264 break; /* Max collisions */
1265 case 2:
1266 dev->stats.tx_fifo_errors++;
1267 break;
1268 case 3:
1269 dev->stats.tx_carrier_errors++;
1270 break;
1271 case 4:
1272 dev->stats.tx_window_errors++;
1273 break; /* CTS Lost */
1274 case 5:
1275 dev->stats.tx_aborted_errors++;
1276 break; /* Transmit timeout */
1279 /* Packets are sent in order - this is
1280 basically a FIFO queue of buffers matching
1281 the card ring */
1282 dev->stats.tx_bytes+=lp->tx_ring[t].skb->len;
1283 dev_kfree_skb_irq(lp->tx_ring[t].skb);
1284 lp->tx_ring[t].skb=NULL;
1285 atomic_inc(&lp->tx_count);
1286 netif_wake_queue(dev);
1288 lp->tx_ring_tail=t;
1295 * mc32_interrupt - handle an interrupt from a 3c527
1296 * @irq: Interrupt number
1297 * @dev_id: 3c527 that requires servicing
1298 * @regs: Registers (unused)
1301 * An interrupt is raised whenever the 3c527 writes to the command
1302 * register. This register contains the message it wishes to send us
1303 * packed into a single byte field. We keep reading status entries
1304 * until we have processed all the control items, but simply count
1305 * transmit and receive reports. When all reports are in we empty the
1306 * transceiver rings as appropriate. This saves the overhead of
1307 * multiple command requests.
1309 * Because MCA is level-triggered, we shouldn't miss indications.
1310 * Therefore, we needn't ask the card to suspend interrupts within
1311 * this handler. The card receives an implicit acknowledgment of the
1312 * current interrupt when we read the command register.
1316 static irqreturn_t mc32_interrupt(int irq, void *dev_id)
1318 struct net_device *dev = dev_id;
1319 struct mc32_local *lp;
1320 int ioaddr, status, boguscount = 0;
1321 int rx_event = 0;
1322 int tx_event = 0;
1324 ioaddr = dev->base_addr;
1325 lp = netdev_priv(dev);
1327 /* See whats cooking */
1329 while((inb(ioaddr+HOST_STATUS)&HOST_STATUS_CWR) && boguscount++<2000)
1331 status=inb(ioaddr+HOST_CMD);
1333 #ifdef DEBUG_IRQ
1334 printk("Status TX%d RX%d EX%d OV%d BC%d\n",
1335 (status&7), (status>>3)&7, (status>>6)&1,
1336 (status>>7)&1, boguscount);
1337 #endif
1339 switch(status&7)
1341 case 0:
1342 break;
1343 case 6: /* TX fail */
1344 case 2: /* TX ok */
1345 tx_event = 1;
1346 break;
1347 case 3: /* Halt */
1348 case 4: /* Abort */
1349 complete(&lp->xceiver_cmd);
1350 break;
1351 default:
1352 printk("%s: strange tx ack %d\n", dev->name, status&7);
1354 status>>=3;
1355 switch(status&7)
1357 case 0:
1358 break;
1359 case 2: /* RX */
1360 rx_event=1;
1361 break;
1362 case 3: /* Halt */
1363 case 4: /* Abort */
1364 complete(&lp->xceiver_cmd);
1365 break;
1366 case 6:
1367 /* Out of RX buffers stat */
1368 /* Must restart rx */
1369 dev->stats.rx_dropped++;
1370 mc32_rx_ring(dev);
1371 mc32_start_transceiver(dev);
1372 break;
1373 default:
1374 printk("%s: strange rx ack %d\n",
1375 dev->name, status&7);
1377 status>>=3;
1378 if(status&1)
1381 * No thread is waiting: we need to tidy
1382 * up ourself.
1385 if (lp->cmd_nonblocking) {
1386 up(&lp->cmd_mutex);
1387 if (lp->mc_reload_wait)
1388 mc32_reset_multicast_list(dev);
1390 else complete(&lp->execution_cmd);
1392 if(status&2)
1395 * We get interrupted once per
1396 * counter that is about to overflow.
1399 mc32_update_stats(dev);
1405 * Process the transmit and receive rings
1408 if(tx_event)
1409 mc32_tx_ring(dev);
1411 if(rx_event)
1412 mc32_rx_ring(dev);
1414 return IRQ_HANDLED;
1419 * mc32_close - user configuring the 3c527 down
1420 * @dev: 3c527 card to shut down
1422 * The 3c527 is a bus mastering device. We must be careful how we
1423 * shut it down. It may also be running shared interrupt so we have
1424 * to be sure to silence it properly
1426 * We indicate that the card is closing to the rest of the
1427 * driver. Otherwise, it is possible that the card may run out
1428 * of receive buffers and restart the transceiver while we're
1429 * trying to close it.
1431 * We abort any receive and transmits going on and then wait until
1432 * any pending exec commands have completed in other code threads.
1433 * In theory we can't get here while that is true, in practice I am
1434 * paranoid
1436 * We turn off the interrupt enable for the board to be sure it can't
1437 * intefere with other devices.
1440 static int mc32_close(struct net_device *dev)
1442 struct mc32_local *lp = netdev_priv(dev);
1443 int ioaddr = dev->base_addr;
1445 u8 regs;
1446 u16 one=1;
1448 lp->xceiver_desired_state = HALTED;
1449 netif_stop_queue(dev);
1452 * Send the indications on command (handy debug check)
1455 mc32_command(dev, 4, &one, 2);
1457 /* Shut down the transceiver */
1459 mc32_halt_transceiver(dev);
1461 /* Ensure we issue no more commands beyond this point */
1463 down(&lp->cmd_mutex);
1465 /* Ok the card is now stopping */
1467 regs=inb(ioaddr+HOST_CTRL);
1468 regs&=~HOST_CTRL_INTE;
1469 outb(regs, ioaddr+HOST_CTRL);
1471 mc32_flush_rx_ring(dev);
1472 mc32_flush_tx_ring(dev);
1474 mc32_update_stats(dev);
1476 return 0;
1481 * mc32_get_stats - hand back stats to network layer
1482 * @dev: The 3c527 card to handle
1484 * We've collected all the stats we can in software already. Now
1485 * it's time to update those kept on-card and return the lot.
1489 static struct net_device_stats *mc32_get_stats(struct net_device *dev)
1491 mc32_update_stats(dev);
1492 return &dev->stats;
1497 * do_mc32_set_multicast_list - attempt to update multicasts
1498 * @dev: 3c527 device to load the list on
1499 * @retry: indicates this is not the first call.
1502 * Actually set or clear the multicast filter for this adaptor. The
1503 * locking issues are handled by this routine. We have to track
1504 * state as it may take multiple calls to get the command sequence
1505 * completed. We just keep trying to schedule the loads until we
1506 * manage to process them all.
1508 * num_addrs == -1 Promiscuous mode, receive all packets
1510 * num_addrs == 0 Normal mode, clear multicast list
1512 * num_addrs > 0 Multicast mode, receive normal and MC packets,
1513 * and do best-effort filtering.
1515 * See mc32_update_stats() regards setting the SAV BP bit.
1519 static void do_mc32_set_multicast_list(struct net_device *dev, int retry)
1521 struct mc32_local *lp = netdev_priv(dev);
1522 u16 filt = (1<<2); /* Save Bad Packets, for stats purposes */
1524 if ((dev->flags&IFF_PROMISC) ||
1525 (dev->flags&IFF_ALLMULTI) ||
1526 dev->mc_count > 10)
1527 /* Enable promiscuous mode */
1528 filt |= 1;
1529 else if(dev->mc_count)
1531 unsigned char block[62];
1532 unsigned char *bp;
1533 struct dev_mc_list *dmc=dev->mc_list;
1535 int i;
1537 if(retry==0)
1538 lp->mc_list_valid = 0;
1539 if(!lp->mc_list_valid)
1541 block[1]=0;
1542 block[0]=dev->mc_count;
1543 bp=block+2;
1545 for(i=0;i<dev->mc_count;i++)
1547 memcpy(bp, dmc->dmi_addr, 6);
1548 bp+=6;
1549 dmc=dmc->next;
1551 if(mc32_command_nowait(dev, 2, block, 2+6*dev->mc_count)==-1)
1553 lp->mc_reload_wait = 1;
1554 return;
1556 lp->mc_list_valid=1;
1560 if(mc32_command_nowait(dev, 0, &filt, 2)==-1)
1562 lp->mc_reload_wait = 1;
1564 else {
1565 lp->mc_reload_wait = 0;
1571 * mc32_set_multicast_list - queue multicast list update
1572 * @dev: The 3c527 to use
1574 * Commence loading the multicast list. This is called when the kernel
1575 * changes the lists. It will override any pending list we are trying to
1576 * load.
1579 static void mc32_set_multicast_list(struct net_device *dev)
1581 do_mc32_set_multicast_list(dev,0);
1586 * mc32_reset_multicast_list - reset multicast list
1587 * @dev: The 3c527 to use
1589 * Attempt the next step in loading the multicast lists. If this attempt
1590 * fails to complete then it will be scheduled and this function called
1591 * again later from elsewhere.
1594 static void mc32_reset_multicast_list(struct net_device *dev)
1596 do_mc32_set_multicast_list(dev,1);
1599 static void netdev_get_drvinfo(struct net_device *dev,
1600 struct ethtool_drvinfo *info)
1602 strcpy(info->driver, DRV_NAME);
1603 strcpy(info->version, DRV_VERSION);
1604 sprintf(info->bus_info, "MCA 0x%lx", dev->base_addr);
1607 static u32 netdev_get_msglevel(struct net_device *dev)
1609 return mc32_debug;
1612 static void netdev_set_msglevel(struct net_device *dev, u32 level)
1614 mc32_debug = level;
1617 static const struct ethtool_ops netdev_ethtool_ops = {
1618 .get_drvinfo = netdev_get_drvinfo,
1619 .get_msglevel = netdev_get_msglevel,
1620 .set_msglevel = netdev_set_msglevel,
1623 #ifdef MODULE
1625 static struct net_device *this_device;
1628 * init_module - entry point
1630 * Probe and locate a 3c527 card. This really should probe and locate
1631 * all the 3c527 cards in the machine not just one of them. Yes you can
1632 * insmod multiple modules for now but it's a hack.
1635 int __init init_module(void)
1637 this_device = mc32_probe(-1);
1638 if (IS_ERR(this_device))
1639 return PTR_ERR(this_device);
1640 return 0;
1644 * cleanup_module - free resources for an unload
1646 * Unloading time. We release the MCA bus resources and the interrupt
1647 * at which point everything is ready to unload. The card must be stopped
1648 * at this point or we would not have been called. When we unload we
1649 * leave the card stopped but not totally shut down. When the card is
1650 * initialized it must be rebooted or the rings reloaded before any
1651 * transmit operations are allowed to start scribbling into memory.
1654 void __exit cleanup_module(void)
1656 unregister_netdev(this_device);
1657 cleanup_card(this_device);
1658 free_netdev(this_device);
1661 #endif /* MODULE */