Import 2.4.0-test4
[davej-history.git] / drivers / net / 82596.c
blob7fa0bb8e19c350aa5988f008b03075d071bc4a22
1 /* 82596.c: A generic 82596 ethernet driver for linux. */
2 /*
3 Based on Apricot.c
4 Written 1994 by Mark Evans.
5 This driver is for the Apricot 82596 bus-master interface
7 Modularised 12/94 Mark Evans
10 Modified to support the 82596 ethernet chips on 680x0 VME boards.
11 by Richard Hirst <richard@sleepie.demon.co.uk>
12 Renamed to be 82596.c
14 980825: Changed to receive directly in to sk_buffs which are
15 allocated at open() time. Eliminates copy on incoming frames
16 (small ones are still copied). Shared data now held in a
17 non-cached page, so we can run on 68060 in copyback mode.
19 TBD:
20 * look at deferring rx frames rather than discarding (as per tulip)
21 * handle tx ring full as per tulip
22 * performace test to tune rx_copybreak
24 Most of my modifications relate to the braindead big-endian
25 implementation by Intel. When the i596 is operating in
26 'big-endian' mode, it thinks a 32 bit value of 0x12345678
27 should be stored as 0x56781234. This is a real pain, when
28 you have linked lists which are shared by the 680x0 and the
29 i596.
31 Driver skeleton
32 Written 1993 by Donald Becker.
33 Copyright 1993 United States Government as represented by the Director,
34 National Security Agency. This software may only be used and distributed
35 according to the terms of the GNU Public License as modified by SRC,
36 incorporated herein by reference.
38 The author may be reached as becker@super.org or
39 C/O Supercomputing Research Ctr., 17100 Science Dr., Bowie MD 20715
43 static const char *version = "82596.c $Revision: 1.4 $\n";
45 #include <linux/config.h>
46 #include <linux/module.h>
48 #include <linux/kernel.h>
49 #include <linux/sched.h>
50 #include <linux/string.h>
51 #include <linux/ptrace.h>
52 #include <linux/errno.h>
53 #include <linux/ioport.h>
54 #include <linux/malloc.h>
55 #include <linux/interrupt.h>
56 #include <linux/delay.h>
57 #include <linux/netdevice.h>
58 #include <linux/etherdevice.h>
59 #include <linux/skbuff.h>
60 #include <linux/init.h>
62 #include <asm/bitops.h>
63 #include <asm/io.h>
64 #include <asm/dma.h>
65 #include <asm/pgtable.h>
66 #include <asm/pgalloc.h>
68 /* DEBUG flags
71 #define DEB_INIT 0x0001
72 #define DEB_PROBE 0x0002
73 #define DEB_SERIOUS 0x0004
74 #define DEB_ERRORS 0x0008
75 #define DEB_MULTI 0x0010
76 #define DEB_TDR 0x0020
77 #define DEB_OPEN 0x0040
78 #define DEB_RESET 0x0080
79 #define DEB_ADDCMD 0x0100
80 #define DEB_STATUS 0x0200
81 #define DEB_STARTTX 0x0400
82 #define DEB_RXADDR 0x0800
83 #define DEB_TXADDR 0x1000
84 #define DEB_RXFRAME 0x2000
85 #define DEB_INTS 0x4000
86 #define DEB_STRUCT 0x8000
87 #define DEB_ANY 0xffff
90 #define DEB(x,y) if (i596_debug & (x)) y
93 #if defined(CONFIG_MVME16x_NET) || defined(CONFIG_MVME16x_NET_MODULE)
94 #define ENABLE_MVME16x_NET
95 #endif
96 #if defined(CONFIG_BVME6000_NET) || defined(CONFIG_BVME6000_NET_MODULE)
97 #define ENABLE_BVME6000_NET
98 #endif
99 #if defined(CONFIG_APRICOT) || defined(CONFIG_APRICOT_MODULE)
100 #define ENABLE_APRICOT
101 #endif
103 #ifdef ENABLE_MVME16x_NET
104 #include <asm/mvme16xhw.h>
105 #endif
106 #ifdef ENABLE_BVME6000_NET
107 #include <asm/bvme6000hw.h>
108 #endif
111 * Define various macros for Channel Attention, word swapping etc., dependent
112 * on architecture. MVME and BVME are 680x0 based, otherwise it is Intel.
115 #ifdef __mc68000__
116 #define WSWAPrfd(x) ((struct i596_rfd *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
117 #define WSWAPrbd(x) ((struct i596_rbd *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
118 #define WSWAPiscp(x) ((struct i596_iscp *)(((u32)(x)<<16) | ((((u32)(x)))>>16)))
119 #define WSWAPscb(x) ((struct i596_scb *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
120 #define WSWAPcmd(x) ((struct i596_cmd *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
121 #define WSWAPtbd(x) ((struct i596_tbd *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
122 #define WSWAPchar(x) ((char *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
123 #define ISCP_BUSY 0x00010000
124 #define MACH_IS_APRICOT 0
125 #else
126 #define WSWAPrfd(x) ((struct i596_rfd *)(x))
127 #define WSWAPrbd(x) ((struct i596_rbd *)(x))
128 #define WSWAPiscp(x) ((struct i596_iscp *)(x))
129 #define WSWAPscb(x) ((struct i596_scb *)(x))
130 #define WSWAPcmd(x) ((struct i596_cmd *)(x))
131 #define WSWAPtbd(x) ((struct i596_tbd *)(x))
132 #define WSWAPchar(x) ((char *)(x))
133 #define ISCP_BUSY 0x0001
134 #define MACH_IS_APRICOT 1
135 #endif
138 * The MPU_PORT command allows direct access to the 82596. With PORT access
139 * the following commands are available (p5-18). The 32-bit port command
140 * must be word-swapped with the most significant word written first.
141 * This only applies to VME boards.
143 #define PORT_RESET 0x00 /* reset 82596 */
144 #define PORT_SELFTEST 0x01 /* selftest */
145 #define PORT_ALTSCP 0x02 /* alternate SCB address */
146 #define PORT_ALTDUMP 0x03 /* Alternate DUMP address */
148 static int i596_debug = (DEB_SERIOUS|DEB_PROBE);
150 MODULE_AUTHOR("Richard Hirst");
151 MODULE_DESCRIPTION("i82596 driver");
152 MODULE_PARM(i596_debug, "i");
155 /* Copy frames shorter than rx_copybreak, otherwise pass on up in
156 * a full sized sk_buff. Value of 100 stolen from tulip.c (!alpha).
158 static int rx_copybreak = 100;
160 #define PKT_BUF_SZ 1536
161 #define MAX_MC_CNT 64
163 #define I596_TOTAL_SIZE 17
165 #define I596_NULL ((void *)0xffffffff)
167 #define CMD_EOL 0x8000 /* The last command of the list, stop. */
168 #define CMD_SUSP 0x4000 /* Suspend after doing cmd. */
169 #define CMD_INTR 0x2000 /* Interrupt after doing cmd. */
171 #define CMD_FLEX 0x0008 /* Enable flexible memory model */
173 enum commands {
174 CmdNOp = 0, CmdSASetup = 1, CmdConfigure = 2, CmdMulticastList = 3,
175 CmdTx = 4, CmdTDR = 5, CmdDump = 6, CmdDiagnose = 7
178 #define STAT_C 0x8000 /* Set to 0 after execution */
179 #define STAT_B 0x4000 /* Command being executed */
180 #define STAT_OK 0x2000 /* Command executed ok */
181 #define STAT_A 0x1000 /* Command aborted */
183 #define CUC_START 0x0100
184 #define CUC_RESUME 0x0200
185 #define CUC_SUSPEND 0x0300
186 #define CUC_ABORT 0x0400
187 #define RX_START 0x0010
188 #define RX_RESUME 0x0020
189 #define RX_SUSPEND 0x0030
190 #define RX_ABORT 0x0040
192 #define TX_TIMEOUT 5
195 struct i596_reg {
196 unsigned short porthi;
197 unsigned short portlo;
198 unsigned long ca;
201 #define EOF 0x8000
202 #define SIZE_MASK 0x3fff
204 struct i596_tbd {
205 unsigned short size;
206 unsigned short pad;
207 struct i596_tbd *next;
208 char *data;
211 /* The command structure has two 'next' pointers; v_next is the address of
212 * the next command as seen by the CPU, b_next is the address of the next
213 * command as seen by the 82596. The b_next pointer, as used by the 82596
214 * always references the status field of the next command, rather than the
215 * v_next field, because the 82596 is unaware of v_next. It may seem more
216 * logical to put v_next at the end of the structure, but we cannot do that
217 * because the 82596 expects other fields to be there, depending on command
218 * type.
221 struct i596_cmd {
222 struct i596_cmd *v_next; /* Address from CPUs viewpoint */
223 unsigned short status;
224 unsigned short command;
225 struct i596_cmd *b_next; /* Address from i596 viewpoint */
228 struct tx_cmd {
229 struct i596_cmd cmd;
230 struct i596_tbd *tbd;
231 unsigned short size;
232 unsigned short pad;
233 struct sk_buff *skb; /* So we can free it after tx */
236 struct tdr_cmd {
237 struct i596_cmd cmd;
238 unsigned short status;
239 unsigned short pad;
242 struct mc_cmd {
243 struct i596_cmd cmd;
244 short mc_cnt;
245 char mc_addrs[MAX_MC_CNT*6];
248 struct sa_cmd {
249 struct i596_cmd cmd;
250 char eth_addr[8];
253 struct cf_cmd {
254 struct i596_cmd cmd;
255 char i596_config[16];
258 struct i596_rfd {
259 unsigned short stat;
260 unsigned short cmd;
261 struct i596_rfd *b_next; /* Address from i596 viewpoint */
262 struct i596_rbd *rbd;
263 unsigned short count;
264 unsigned short size;
265 struct i596_rfd *v_next; /* Address from CPUs viewpoint */
266 struct i596_rfd *v_prev;
269 struct i596_rbd {
270 unsigned short count;
271 unsigned short zero1;
272 struct i596_rbd *b_next;
273 unsigned char *b_data; /* Address from i596 viewpoint */
274 unsigned short size;
275 unsigned short zero2;
276 struct sk_buff *skb;
277 struct i596_rbd *v_next;
278 struct i596_rbd *b_addr; /* This rbd addr from i596 view */
279 unsigned char *v_data; /* Address from CPUs viewpoint */
282 #define TX_RING_SIZE 64
283 #define RX_RING_SIZE 16
285 struct i596_scb {
286 unsigned short status;
287 unsigned short command;
288 struct i596_cmd *cmd;
289 struct i596_rfd *rfd;
290 unsigned long crc_err;
291 unsigned long align_err;
292 unsigned long resource_err;
293 unsigned long over_err;
294 unsigned long rcvdt_err;
295 unsigned long short_err;
296 unsigned short t_on;
297 unsigned short t_off;
300 struct i596_iscp {
301 unsigned long stat;
302 struct i596_scb *scb;
305 struct i596_scp {
306 unsigned long sysbus;
307 unsigned long pad;
308 struct i596_iscp *iscp;
311 struct i596_private {
312 volatile struct i596_scp scp;
313 volatile struct i596_iscp iscp;
314 volatile struct i596_scb scb;
315 struct sa_cmd sa_cmd;
316 struct cf_cmd cf_cmd;
317 struct tdr_cmd tdr_cmd;
318 struct mc_cmd mc_cmd;
319 unsigned long stat;
320 int last_restart __attribute__((aligned(4)));
321 struct i596_rfd *rfd_head;
322 struct i596_rbd *rbd_head;
323 struct i596_cmd *cmd_tail;
324 struct i596_cmd *cmd_head;
325 int cmd_backlog;
326 unsigned long last_cmd;
327 struct net_device_stats stats;
328 struct i596_rfd rfds[RX_RING_SIZE];
329 struct i596_rbd rbds[RX_RING_SIZE];
330 struct tx_cmd tx_cmds[TX_RING_SIZE];
331 struct i596_tbd tbds[TX_RING_SIZE];
332 int next_tx_cmd;
333 spinlock_t lock;
336 char init_setup[] =
338 0x8E, /* length, prefetch on */
339 0xC8, /* fifo to 8, monitor off */
340 #ifdef CONFIG_VME
341 0xc0, /* don't save bad frames */
342 #else
343 0x80, /* don't save bad frames */
344 #endif
345 0x2E, /* No source address insertion, 8 byte preamble */
346 0x00, /* priority and backoff defaults */
347 0x60, /* interframe spacing */
348 0x00, /* slot time LSB */
349 0xf2, /* slot time and retries */
350 0x00, /* promiscuous mode */
351 0x00, /* collision detect */
352 0x40, /* minimum frame length */
353 0xff,
354 0x00,
355 0x7f /* *multi IA */ };
357 static int i596_open(struct net_device *dev);
358 static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
359 static void i596_interrupt(int irq, void *dev_id, struct pt_regs *regs);
360 static int i596_close(struct net_device *dev);
361 static struct net_device_stats *i596_get_stats(struct net_device *dev);
362 static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd);
363 static void i596_tx_timeout (struct net_device *dev);
364 static void print_eth(unsigned char *buf, char *str);
365 static void set_multicast_list(struct net_device *dev);
367 static int rx_ring_size = RX_RING_SIZE;
368 static int ticks_limit = 25;
369 static int max_cmd_backlog = TX_RING_SIZE-1;
372 static inline void CA(struct net_device *dev)
374 #ifdef ENABLE_MVME16x_NET
375 if (MACH_IS_MVME16x) {
376 ((struct i596_reg *) dev->base_addr)->ca = 1;
378 #endif
379 #ifdef ENABLE_BVME6000_NET
380 if (MACH_IS_BVME6000) {
381 volatile u32 i;
383 i = *(volatile u32 *) (dev->base_addr);
385 #endif
386 #ifdef ENABLE_APRICOT
387 if (MACH_IS_APRICOT) {
388 outw(0, (short) (dev->base_addr) + 4);
390 #endif
394 static inline void MPU_PORT(struct net_device *dev, int c, volatile void *x)
396 #ifdef ENABLE_MVME16x_NET
397 if (MACH_IS_MVME16x) {
398 struct i596_reg *p = (struct i596_reg *) (dev->base_addr);
399 p->porthi = ((c) | (u32) (x)) & 0xffff;
400 p->portlo = ((c) | (u32) (x)) >> 16;
402 #endif
403 #ifdef ENABLE_BVME6000_NET
404 if (MACH_IS_BVME6000) {
405 u32 v = (u32) (c) | (u32) (x);
406 v = ((u32) (v) << 16) | ((u32) (v) >> 16);
407 *(volatile u32 *) dev->base_addr = v;
408 udelay(1);
409 *(volatile u32 *) dev->base_addr = v;
411 #endif
415 static inline int wait_istat(struct net_device *dev, struct i596_private *lp, int delcnt, char *str)
417 while (--delcnt && lp->iscp.stat)
418 udelay(10);
419 if (!delcnt) {
420 printk("%s: %s, status %4.4x, cmd %4.4x.\n",
421 dev->name, str, lp->scb.status, lp->scb.command);
422 return -1;
424 else
425 return 0;
429 static inline int wait_cmd(struct net_device *dev, struct i596_private *lp, int delcnt, char *str)
431 while (--delcnt && lp->scb.command)
432 udelay(10);
433 if (!delcnt) {
434 printk("%s: %s, status %4.4x, cmd %4.4x.\n",
435 dev->name, str, lp->scb.status, lp->scb.command);
436 return -1;
438 else
439 return 0;
443 static void i596_display_data(struct net_device *dev)
445 struct i596_private *lp = (struct i596_private *) dev->priv;
446 struct i596_cmd *cmd;
447 struct i596_rfd *rfd;
448 struct i596_rbd *rbd;
450 printk("lp and scp at %p, .sysbus = %08lx, .iscp = %p\n",
451 &lp->scp, lp->scp.sysbus, lp->scp.iscp);
452 printk("iscp at %p, iscp.stat = %08lx, .scb = %p\n",
453 &lp->iscp, lp->iscp.stat, lp->iscp.scb);
454 printk("scb at %p, scb.status = %04x, .command = %04x,"
455 " .cmd = %p, .rfd = %p\n",
456 &lp->scb, lp->scb.status, lp->scb.command,
457 lp->scb.cmd, lp->scb.rfd);
458 printk(" errors: crc %lx, align %lx, resource %lx,"
459 " over %lx, rcvdt %lx, short %lx\n",
460 lp->scb.crc_err, lp->scb.align_err, lp->scb.resource_err,
461 lp->scb.over_err, lp->scb.rcvdt_err, lp->scb.short_err);
462 cmd = lp->cmd_head;
463 while (cmd != I596_NULL) {
464 printk("cmd at %p, .status = %04x, .command = %04x, .b_next = %p\n",
465 cmd, cmd->status, cmd->command, cmd->b_next);
466 cmd = cmd->v_next;
468 rfd = lp->rfd_head;
469 printk("rfd_head = %p\n", rfd);
470 do {
471 printk (" %p .stat %04x, .cmd %04x, b_next %p, rbd %p,"
472 " count %04x\n",
473 rfd, rfd->stat, rfd->cmd, rfd->b_next, rfd->rbd,
474 rfd->count);
475 rfd = rfd->v_next;
476 } while (rfd != lp->rfd_head);
477 rbd = lp->rbd_head;
478 printk("rbd_head = %p\n", rbd);
479 do {
480 printk(" %p .count %04x, b_next %p, b_data %p, size %04x\n",
481 rbd, rbd->count, rbd->b_next, rbd->b_data, rbd->size);
482 rbd = rbd->v_next;
483 } while (rbd != lp->rbd_head);
487 #if defined(ENABLE_MVME16x_NET) || defined(ENABLE_BVME6000_NET)
488 static void i596_error(int irq, void *dev_id, struct pt_regs *regs)
490 struct net_device *dev = dev_id;
491 volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000;
493 pcc2[0x28] = 1;
494 pcc2[0x2b] = 0x1d;
495 printk("%s: Error interrupt\n", dev->name);
496 i596_display_data(dev);
498 #endif
500 static inline void init_rx_bufs(struct net_device *dev)
502 struct i596_private *lp = (struct i596_private *)dev->priv;
503 int i;
504 struct i596_rfd *rfd;
505 struct i596_rbd *rbd;
507 /* First build the Receive Buffer Descriptor List */
509 for (i = 0, rbd = lp->rbds; i < rx_ring_size; i++, rbd++) {
510 struct sk_buff *skb = dev_alloc_skb(PKT_BUF_SZ);
512 if (skb == NULL)
513 panic("82596: alloc_skb() failed");
514 skb->dev = dev;
515 rbd->v_next = rbd+1;
516 rbd->b_next = WSWAPrbd(virt_to_bus(rbd+1));
517 rbd->b_addr = WSWAPrbd(virt_to_bus(rbd));
518 rbd->skb = skb;
519 rbd->v_data = skb->tail;
520 rbd->b_data = WSWAPchar(virt_to_bus(skb->tail));
521 rbd->size = PKT_BUF_SZ;
522 #ifdef __mc68000__
523 cache_clear(virt_to_phys(skb->tail), PKT_BUF_SZ);
524 #endif
526 lp->rbd_head = lp->rbds;
527 rbd = lp->rbds + rx_ring_size - 1;
528 rbd->v_next = lp->rbds;
529 rbd->b_next = WSWAPrbd(virt_to_bus(lp->rbds));
531 /* Now build the Receive Frame Descriptor List */
533 for (i = 0, rfd = lp->rfds; i < rx_ring_size; i++, rfd++) {
534 rfd->rbd = I596_NULL;
535 rfd->v_next = rfd+1;
536 rfd->v_prev = rfd-1;
537 rfd->b_next = WSWAPrfd(virt_to_bus(rfd+1));
538 rfd->cmd = CMD_FLEX;
540 lp->rfd_head = lp->rfds;
541 lp->scb.rfd = WSWAPrfd(virt_to_bus(lp->rfds));
542 rfd = lp->rfds;
543 rfd->rbd = lp->rbd_head;
544 rfd->v_prev = lp->rfds + rx_ring_size - 1;
545 rfd = lp->rfds + rx_ring_size - 1;
546 rfd->v_next = lp->rfds;
547 rfd->b_next = WSWAPrfd(virt_to_bus(lp->rfds));
548 rfd->cmd = CMD_EOL|CMD_FLEX;
551 static inline void remove_rx_bufs(struct net_device *dev)
553 struct i596_private *lp = (struct i596_private *)dev->priv;
554 struct i596_rbd *rbd;
555 int i;
557 for (i = 0, rbd = lp->rbds; i < rx_ring_size; i++, rbd++) {
558 if (rbd->skb == NULL)
559 break;
560 dev_kfree_skb(rbd->skb);
565 static void rebuild_rx_bufs(struct net_device *dev)
567 struct i596_private *lp = (struct i596_private *) dev->priv;
568 int i;
570 /* Ensure rx frame/buffer descriptors are tidy */
572 for (i = 0; i < rx_ring_size; i++) {
573 lp->rfds[i].rbd = I596_NULL;
574 lp->rfds[i].cmd = CMD_FLEX;
576 lp->rfds[rx_ring_size-1].cmd = CMD_EOL|CMD_FLEX;
577 lp->rfd_head = lp->rfds;
578 lp->scb.rfd = WSWAPrfd(virt_to_bus(lp->rfds));
579 lp->rbd_head = lp->rbds;
580 lp->rfds[0].rbd = WSWAPrbd(virt_to_bus(lp->rbds));
584 static int init_i596_mem(struct net_device *dev)
586 struct i596_private *lp = (struct i596_private *) dev->priv;
587 #if !defined(ENABLE_MVME16x_NET) && !defined(ENABLE_BVME6000_NET)
588 short ioaddr = dev->base_addr;
589 #endif
590 unsigned long flags;
592 MPU_PORT(dev, PORT_RESET, 0);
594 udelay(100); /* Wait 100us - seems to help */
596 #if defined(ENABLE_MVME16x_NET) || defined(ENABLE_BVME6000_NET)
597 #ifdef ENABLE_MVME16x_NET
598 if (MACH_IS_MVME16x) {
599 volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000;
601 /* Disable all ints for now */
602 pcc2[0x28] = 1;
603 pcc2[0x2a] = 0x48;
604 /* Following disables snooping. Snooping is not required
605 * as we make appropriate use of non-cached pages for
606 * shared data, and cache_push/cache_clear.
608 pcc2[0x2b] = 0x08;
610 #endif
611 #ifdef ENABLE_BVME6000_NET
612 if (MACH_IS_BVME6000) {
613 volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG;
615 *ethirq = 1;
617 #endif
619 /* change the scp address */
621 MPU_PORT(dev, PORT_ALTSCP, (void *)virt_to_bus(&lp->scp));
623 #elif defined(ENABLE_APRICOT)
626 u32 scp = virt_to_bus(&lp->scp);
628 /* change the scp address */
629 outw(0, ioaddr);
630 outw(0, ioaddr);
631 outb(4, ioaddr + 0xf);
632 outw(scp | 2, ioaddr);
633 outw(scp >> 16, ioaddr);
635 #endif
637 lp->last_cmd = jiffies;
639 #ifdef ENABLE_MVME16x_NET
640 if (MACH_IS_MVME16x)
641 lp->scp.sysbus = 0x00000054;
642 #endif
643 #ifdef ENABLE_BVME6000_NET
644 if (MACH_IS_BVME6000)
645 lp->scp.sysbus = 0x0000004c;
646 #endif
647 #ifdef ENABLE_APRICOT
648 if (MACH_IS_APRICOT)
649 lp->scp.sysbus = 0x00440000;
650 #endif
652 lp->scp.iscp = WSWAPiscp(virt_to_bus(&(lp->iscp)));
653 lp->iscp.scb = WSWAPscb(virt_to_bus(&(lp->scb)));
654 lp->iscp.stat = ISCP_BUSY;
655 lp->cmd_backlog = 0;
657 lp->cmd_head = lp->scb.cmd = I596_NULL;
659 DEB(DEB_INIT,printk("%s: starting i82596.\n", dev->name));
661 #if defined(ENABLE_APRICOT)
662 (void) inb(ioaddr + 0x10);
663 outb(4, ioaddr + 0xf);
664 #endif
665 CA(dev);
667 if (wait_istat(dev,lp,1000,"initialization timed out"))
668 goto failed;
669 DEB(DEB_INIT,printk("%s: i82596 initialization successful\n", dev->name));
671 /* Ensure rx frame/buffer descriptors are tidy */
672 rebuild_rx_bufs(dev);
673 lp->scb.command = 0;
675 #ifdef ENABLE_MVME16x_NET
676 if (MACH_IS_MVME16x) {
677 volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000;
679 /* Enable ints, etc. now */
680 pcc2[0x2a] = 0x55; /* Edge sensitive */
681 pcc2[0x2b] = 0x15;
683 #endif
684 #ifdef ENABLE_BVME6000_NET
685 if (MACH_IS_BVME6000) {
686 volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG;
688 *ethirq = 3;
690 #endif
693 DEB(DEB_INIT,printk("%s: queuing CmdConfigure\n", dev->name));
694 memcpy(lp->cf_cmd.i596_config, init_setup, 14);
695 lp->cf_cmd.cmd.command = CmdConfigure;
696 i596_add_cmd(dev, &lp->cf_cmd.cmd);
698 DEB(DEB_INIT,printk("%s: queuing CmdSASetup\n", dev->name));
699 memcpy(lp->sa_cmd.eth_addr, dev->dev_addr, 6);
700 lp->sa_cmd.cmd.command = CmdSASetup;
701 i596_add_cmd(dev, &lp->sa_cmd.cmd);
703 DEB(DEB_INIT,printk("%s: queuing CmdTDR\n", dev->name));
704 lp->tdr_cmd.cmd.command = CmdTDR;
705 i596_add_cmd(dev, &lp->tdr_cmd.cmd);
707 spin_lock_irqsave (&lp->lock, flags);
709 if (wait_cmd(dev,lp,1000,"timed out waiting to issue RX_START"))
710 goto failed;
711 DEB(DEB_INIT,printk("%s: Issuing RX_START\n", dev->name));
712 lp->scb.command = RX_START;
713 CA(dev);
715 spin_unlock_irqrestore (&lp->lock, flags);
717 if (wait_cmd(dev,lp,1000,"RX_START not processed"))
718 goto failed;
719 DEB(DEB_INIT,printk("%s: Receive unit started OK\n", dev->name));
720 return 0;
722 failed:
723 printk("%s: Failed to initialise 82596\n", dev->name);
724 MPU_PORT(dev, PORT_RESET, 0);
725 return -1;
728 static inline int i596_rx(struct net_device *dev)
730 struct i596_private *lp = (struct i596_private *)dev->priv;
731 struct i596_rfd *rfd;
732 struct i596_rbd *rbd;
733 int frames = 0;
735 DEB(DEB_RXFRAME,printk ("i596_rx(), rfd_head %p, rbd_head %p\n",
736 lp->rfd_head, lp->rbd_head));
738 rfd = lp->rfd_head; /* Ref next frame to check */
740 while ((rfd->stat) & STAT_C) { /* Loop while complete frames */
741 if (rfd->rbd == I596_NULL)
742 rbd = I596_NULL;
743 else if (rfd->rbd == lp->rbd_head->b_addr)
744 rbd = lp->rbd_head;
745 else {
746 printk("%s: rbd chain broken!\n", dev->name);
747 /* XXX Now what? */
748 rbd = I596_NULL;
750 DEB(DEB_RXFRAME, printk(" rfd %p, rfd.rbd %p, rfd.stat %04x\n",
751 rfd, rfd->rbd, rfd->stat));
753 if (rbd != I596_NULL && ((rfd->stat) & STAT_OK)) {
754 /* a good frame */
755 int pkt_len = rbd->count & 0x3fff;
756 struct sk_buff *skb = rbd->skb;
757 int rx_in_place = 0;
759 DEB(DEB_RXADDR,print_eth(rbd->v_data, "received"));
760 frames++;
762 /* Check if the packet is long enough to just accept
763 * without copying to a properly sized skbuff.
766 if (pkt_len > rx_copybreak) {
767 struct sk_buff *newskb;
769 /* Get fresh skbuff to replace filled one. */
770 newskb = dev_alloc_skb(PKT_BUF_SZ);
771 if (newskb == NULL) {
772 skb = NULL; /* drop pkt */
773 goto memory_squeeze;
775 /* Pass up the skb already on the Rx ring. */
776 skb_put(skb, pkt_len);
777 rx_in_place = 1;
778 rbd->skb = newskb;
779 newskb->dev = dev;
780 rbd->v_data = newskb->tail;
781 rbd->b_data = WSWAPchar(virt_to_bus(newskb->tail));
782 #ifdef __mc68000__
783 cache_clear(virt_to_phys(newskb->tail), PKT_BUF_SZ);
784 #endif
786 else
787 skb = dev_alloc_skb(pkt_len + 2);
788 memory_squeeze:
789 if (skb == NULL) {
790 /* XXX tulip.c can defer packets here!! */
791 printk ("%s: i596_rx Memory squeeze, dropping packet.\n", dev->name);
792 lp->stats.rx_dropped++;
794 else {
795 skb->dev = dev;
796 if (!rx_in_place) {
797 /* 16 byte align the data fields */
798 skb_reserve(skb, 2);
799 memcpy(skb_put(skb,pkt_len), rbd->v_data, pkt_len);
801 skb->protocol=eth_type_trans(skb,dev);
802 skb->len = pkt_len;
803 #ifdef __mc68000__
804 cache_clear(virt_to_phys(rbd->skb->tail),
805 pkt_len);
806 #endif
807 netif_rx(skb);
808 lp->stats.rx_packets++;
809 lp->stats.rx_bytes+=pkt_len;
812 else {
813 DEB(DEB_ERRORS, printk("%s: Error, rfd.stat = 0x%04x\n",
814 dev->name, rfd->stat));
815 lp->stats.rx_errors++;
816 if ((rfd->stat) & 0x0001)
817 lp->stats.collisions++;
818 if ((rfd->stat) & 0x0080)
819 lp->stats.rx_length_errors++;
820 if ((rfd->stat) & 0x0100)
821 lp->stats.rx_over_errors++;
822 if ((rfd->stat) & 0x0200)
823 lp->stats.rx_fifo_errors++;
824 if ((rfd->stat) & 0x0400)
825 lp->stats.rx_frame_errors++;
826 if ((rfd->stat) & 0x0800)
827 lp->stats.rx_crc_errors++;
828 if ((rfd->stat) & 0x1000)
829 lp->stats.rx_length_errors++;
832 /* Clear the buffer descriptor count and EOF + F flags */
834 if (rbd != I596_NULL && (rbd->count & 0x4000)) {
835 rbd->count = 0;
836 lp->rbd_head = rbd->v_next;
839 /* Tidy the frame descriptor, marking it as end of list */
841 rfd->rbd = I596_NULL;
842 rfd->stat = 0;
843 rfd->cmd = CMD_EOL|CMD_FLEX;
844 rfd->count = 0;
846 /* Remove end-of-list from old end descriptor */
848 rfd->v_prev->cmd = CMD_FLEX;
850 /* Update record of next frame descriptor to process */
852 lp->scb.rfd = rfd->b_next;
853 lp->rfd_head = rfd->v_next;
854 rfd = lp->rfd_head;
857 DEB(DEB_RXFRAME,printk ("frames %d\n", frames));
859 return 0;
863 static inline void i596_cleanup_cmd(struct net_device *dev, struct i596_private *lp)
865 struct i596_cmd *ptr;
867 while (lp->cmd_head != I596_NULL) {
868 ptr = lp->cmd_head;
869 lp->cmd_head = ptr->v_next;
870 lp->cmd_backlog--;
872 switch ((ptr->command) & 0x7) {
873 case CmdTx:
875 struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
876 struct sk_buff *skb = tx_cmd->skb;
878 dev_kfree_skb(skb);
880 lp->stats.tx_errors++;
881 lp->stats.tx_aborted_errors++;
883 ptr->v_next = ptr->b_next = I596_NULL;
884 tx_cmd->cmd.command = 0; /* Mark as free */
885 break;
887 default:
888 ptr->v_next = ptr->b_next = I596_NULL;
892 wait_cmd(dev,lp,100,"i596_cleanup_cmd timed out");
893 lp->scb.cmd = I596_NULL;
896 static inline void i596_reset(struct net_device *dev, struct i596_private *lp, int ioaddr)
898 unsigned long flags;
900 DEB(DEB_RESET,printk("i596_reset\n"));
902 spin_lock_irqsave (&lp->lock, flags);
904 wait_cmd(dev,lp,100,"i596_reset timed out");
906 netif_stop_queue(dev);
908 lp->scb.command = CUC_ABORT | RX_ABORT;
909 CA(dev);
911 /* wait for shutdown */
912 wait_cmd(dev,lp,1000,"i596_reset 2 timed out");
913 spin_unlock_irqrestore (&lp->lock, flags);
915 i596_cleanup_cmd(dev,lp);
916 i596_rx(dev);
918 netif_start_queue(dev);
919 init_i596_mem(dev);
922 static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd)
924 struct i596_private *lp = (struct i596_private *) dev->priv;
925 int ioaddr = dev->base_addr;
926 unsigned long flags;
928 DEB(DEB_ADDCMD,printk("i596_add_cmd\n"));
930 cmd->status = 0;
931 cmd->command |= (CMD_EOL | CMD_INTR);
932 cmd->v_next = cmd->b_next = I596_NULL;
934 spin_lock_irqsave (&lp->lock, flags);
936 if (lp->cmd_head != I596_NULL) {
937 lp->cmd_tail->v_next = cmd;
938 lp->cmd_tail->b_next = WSWAPcmd(virt_to_bus(&cmd->status));
939 } else {
940 lp->cmd_head = cmd;
941 wait_cmd(dev,lp,100,"i596_add_cmd timed out");
942 lp->scb.cmd = WSWAPcmd(virt_to_bus(&cmd->status));
943 lp->scb.command = CUC_START;
944 CA(dev);
946 lp->cmd_tail = cmd;
947 lp->cmd_backlog++;
949 spin_unlock_irqrestore (&lp->lock, flags);
951 if (lp->cmd_backlog > max_cmd_backlog) {
952 unsigned long tickssofar = jiffies - lp->last_cmd;
954 if (tickssofar < ticks_limit)
955 return;
957 printk("%s: command unit timed out, status resetting.\n", dev->name);
959 i596_reset(dev, lp, ioaddr);
963 static int i596_open(struct net_device *dev)
965 int res = 0;
967 DEB(DEB_OPEN,printk("%s: i596_open() irq %d.\n", dev->name, dev->irq));
969 if (request_irq(dev->irq, &i596_interrupt, 0, "i82596", dev)) {
970 printk("%s: IRQ %d not free\n", dev->name, dev->irq);
971 return -EAGAIN;
973 #ifdef ENABLE_MVME16x_NET
974 if (MACH_IS_MVME16x) {
975 if (request_irq(0x56, &i596_error, 0, "i82596_error", dev))
976 return -EAGAIN;
978 #endif
979 init_rx_bufs(dev);
981 netif_start_queue(dev);
983 MOD_INC_USE_COUNT;
985 /* Initialize the 82596 memory */
986 if (init_i596_mem(dev)) {
987 res = -EAGAIN;
988 free_irq(dev->irq, dev);
991 return res;
994 static void i596_tx_timeout (struct net_device *dev)
996 struct i596_private *lp = (struct i596_private *) dev->priv;
997 int ioaddr = dev->base_addr;
999 /* Transmitter timeout, serious problems. */
1000 DEB(DEB_ERRORS,printk("%s: transmit timed out, status resetting.\n",
1001 dev->name));
1003 lp->stats.tx_errors++;
1005 /* Try to restart the adaptor */
1006 if (lp->last_restart == lp->stats.tx_packets) {
1007 DEB(DEB_ERRORS,printk ("Resetting board.\n"));
1008 /* Shutdown and restart */
1009 i596_reset (dev, lp, ioaddr);
1010 } else {
1011 /* Issue a channel attention signal */
1012 DEB(DEB_ERRORS,printk ("Kicking board.\n"));
1013 lp->scb.command = CUC_START | RX_START;
1014 CA (dev);
1015 lp->last_restart = lp->stats.tx_packets;
1018 dev->trans_start = jiffies;
1019 netif_wake_queue (dev);
1023 static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
1025 struct i596_private *lp = (struct i596_private *) dev->priv;
1026 struct tx_cmd *tx_cmd;
1027 struct i596_tbd *tbd;
1028 short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
1029 dev->trans_start = jiffies;
1031 DEB(DEB_STARTTX,printk("%s: i596_start_xmit(%x,%x) called\n", dev->name,
1032 skb->len, (unsigned int)skb->data));
1034 netif_stop_queue(dev);
1036 tx_cmd = lp->tx_cmds + lp->next_tx_cmd;
1037 tbd = lp->tbds + lp->next_tx_cmd;
1039 if (tx_cmd->cmd.command) {
1040 DEB(DEB_ERRORS,printk ("%s: xmit ring full, dropping packet.\n",
1041 dev->name));
1042 lp->stats.tx_dropped++;
1044 dev_kfree_skb(skb);
1045 } else {
1046 if (++lp->next_tx_cmd == TX_RING_SIZE)
1047 lp->next_tx_cmd = 0;
1048 tx_cmd->tbd = WSWAPtbd(virt_to_bus(tbd));
1049 tbd->next = I596_NULL;
1051 tx_cmd->cmd.command = CMD_FLEX | CmdTx;
1052 tx_cmd->skb = skb;
1054 tx_cmd->pad = 0;
1055 tx_cmd->size = 0;
1056 tbd->pad = 0;
1057 tbd->size = EOF | length;
1059 tbd->data = WSWAPchar(virt_to_bus(skb->data));
1061 #ifdef __mc68000__
1062 cache_push(virt_to_phys(skb->data), length);
1063 #endif
1064 DEB(DEB_TXADDR,print_eth(skb->data, "tx-queued"));
1065 i596_add_cmd(dev, &tx_cmd->cmd);
1067 lp->stats.tx_packets++;
1068 lp->stats.tx_bytes += length;
1071 netif_start_queue(dev);
1073 return 0;
1076 static void print_eth(unsigned char *add, char *str)
1078 int i;
1080 printk("i596 0x%p, ", add);
1081 for (i = 0; i < 6; i++)
1082 printk(" %02X", add[i + 6]);
1083 printk(" -->");
1084 for (i = 0; i < 6; i++)
1085 printk(" %02X", add[i]);
1086 printk(" %02X%02X, %s\n", add[12], add[13], str);
1089 int __init i82596_probe(struct net_device *dev)
1091 int i;
1092 struct i596_private *lp;
1093 char eth_addr[6];
1094 static int probed = 0;
1096 if (probed)
1097 return -ENODEV;
1098 probed++;
1099 #ifdef ENABLE_MVME16x_NET
1100 if (MACH_IS_MVME16x) {
1101 if (mvme16x_config & MVME16x_CONFIG_NO_ETHERNET) {
1102 printk("Ethernet probe disabled - chip not present\n");
1103 return -ENODEV;
1105 memcpy(eth_addr, (void *) 0xfffc1f2c, 6); /* YUCK! Get addr from NOVRAM */
1106 dev->base_addr = MVME_I596_BASE;
1107 dev->irq = (unsigned) MVME16x_IRQ_I596;
1109 #endif
1110 #ifdef ENABLE_BVME6000_NET
1111 if (MACH_IS_BVME6000) {
1112 volatile unsigned char *rtc = (unsigned char *) BVME_RTC_BASE;
1113 unsigned char msr = rtc[3];
1114 int i;
1116 rtc[3] |= 0x80;
1117 for (i = 0; i < 6; i++)
1118 eth_addr[i] = rtc[i * 4 + 7]; /* Stored in RTC RAM at offset 1 */
1119 rtc[3] = msr;
1120 dev->base_addr = BVME_I596_BASE;
1121 dev->irq = (unsigned) BVME_IRQ_I596;
1123 #endif
1124 #ifdef ENABLE_APRICOT
1126 int checksum = 0;
1127 int ioaddr = 0x300;
1129 /* this is easy the ethernet interface can only be at 0x300 */
1130 /* first check nothing is already registered here */
1132 if (check_region(ioaddr, I596_TOTAL_SIZE)) {
1133 printk("82596: IO address 0x%04x in use\n", ioaddr);
1134 return -ENODEV;
1137 for (i = 0; i < 8; i++) {
1138 eth_addr[i] = inb(ioaddr + 8 + i);
1139 checksum += eth_addr[i];
1142 /* checksum is a multiple of 0x100, got this wrong first time
1143 some machines have 0x100, some 0x200. The DOS driver doesn't
1144 even bother with the checksum */
1146 if (checksum % 0x100)
1147 return -ENODEV;
1149 /* Some other boards trip the checksum.. but then appear as
1150 * ether address 0. Trap these - AC */
1152 if (memcmp(eth_addr, "\x00\x00\x49", 3) != 0)
1153 return -ENODEV;
1155 if (!request_region(ioaddr, I596_TOTAL_SIZE, "i596"))
1156 return -ENODEV;
1158 dev->base_addr = ioaddr;
1159 dev->irq = 10;
1161 #endif
1162 dev->mem_start = (int)__get_free_pages(GFP_ATOMIC, 0);
1163 if (!dev->mem_start) {
1164 #ifdef ENABLE_APRICOT
1165 release_region(dev->base_addr, I596_TOTAL_SIZE);
1166 #endif
1167 return -ENOMEM;
1170 ether_setup(dev);
1171 DEB(DEB_PROBE,printk("%s: 82596 at %#3lx,", dev->name, dev->base_addr));
1173 for (i = 0; i < 6; i++)
1174 DEB(DEB_PROBE,printk(" %2.2X", dev->dev_addr[i] = eth_addr[i]));
1176 DEB(DEB_PROBE,printk(" IRQ %d.\n", dev->irq));
1178 DEB(DEB_PROBE,printk(version));
1180 /* The 82596-specific entries in the device structure. */
1181 dev->open = i596_open;
1182 dev->stop = i596_close;
1183 dev->hard_start_xmit = i596_start_xmit;
1184 dev->get_stats = i596_get_stats;
1185 dev->set_multicast_list = set_multicast_list;
1186 dev->tx_timeout = i596_tx_timeout;
1187 dev->watchdog_timeo = TX_TIMEOUT;
1189 dev->priv = (void *)(dev->mem_start);
1191 lp = (struct i596_private *) dev->priv;
1192 DEB(DEB_INIT,printk ("%s: lp at 0x%08lx (%d bytes), lp->scb at 0x%08lx\n",
1193 dev->name, (unsigned long)lp,
1194 sizeof(struct i596_private), (unsigned long)&lp->scb));
1195 memset((void *) lp, 0, sizeof(struct i596_private));
1197 #ifdef __mc68000__
1198 cache_push(virt_to_phys((void *)(dev->mem_start)), 4096);
1199 cache_clear(virt_to_phys((void *)(dev->mem_start)), 4096);
1200 kernel_set_cachemode((void *)(dev->mem_start), 4096, IOMAP_NOCACHE_SER);
1201 #endif
1202 lp->scb.command = 0;
1203 lp->scb.cmd = I596_NULL;
1204 lp->scb.rfd = I596_NULL;
1205 lp->lock = SPIN_LOCK_UNLOCKED;
1207 return 0;
1210 static void i596_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1212 struct net_device *dev = dev_id;
1213 struct i596_private *lp;
1214 short ioaddr;
1215 unsigned short status, ack_cmd = 0;
1217 #ifdef ENABLE_BVME6000_NET
1218 if (MACH_IS_BVME6000) {
1219 if (*(char *) BVME_LOCAL_IRQ_STAT & BVME_ETHERR) {
1220 i596_error(BVME_IRQ_I596, NULL, NULL);
1221 return;
1224 #endif
1225 if (dev == NULL) {
1226 printk("i596_interrupt(): irq %d for unknown device.\n", irq);
1227 return;
1230 ioaddr = dev->base_addr;
1231 lp = (struct i596_private *) dev->priv;
1233 spin_lock (&lp->lock);
1235 wait_cmd(dev,lp,100,"i596 interrupt, timeout");
1236 status = lp->scb.status;
1238 DEB(DEB_INTS,printk("%s: i596 interrupt, IRQ %d, status %4.4x.\n",
1239 dev->name, irq, status));
1241 ack_cmd = status & 0xf000;
1243 if ((status & 0x8000) || (status & 0x2000)) {
1244 struct i596_cmd *ptr;
1246 if ((status & 0x8000))
1247 DEB(DEB_INTS,printk("%s: i596 interrupt completed command.\n", dev->name));
1248 if ((status & 0x2000))
1249 DEB(DEB_INTS,printk("%s: i596 interrupt command unit inactive %x.\n", dev->name, status & 0x0700));
1251 while ((lp->cmd_head != I596_NULL) && (lp->cmd_head->status & STAT_C)) {
1252 ptr = lp->cmd_head;
1254 DEB(DEB_STATUS,printk("cmd_head->status = %04x, ->command = %04x\n",
1255 lp->cmd_head->status, lp->cmd_head->command));
1256 lp->cmd_head = ptr->v_next;
1257 lp->cmd_backlog--;
1259 switch ((ptr->command) & 0x7) {
1260 case CmdTx:
1262 struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
1263 struct sk_buff *skb = tx_cmd->skb;
1265 if ((ptr->status) & STAT_OK) {
1266 DEB(DEB_TXADDR,print_eth(skb->data, "tx-done"));
1267 } else {
1268 lp->stats.tx_errors++;
1269 if ((ptr->status) & 0x0020)
1270 lp->stats.collisions++;
1271 if (!((ptr->status) & 0x0040))
1272 lp->stats.tx_heartbeat_errors++;
1273 if ((ptr->status) & 0x0400)
1274 lp->stats.tx_carrier_errors++;
1275 if ((ptr->status) & 0x0800)
1276 lp->stats.collisions++;
1277 if ((ptr->status) & 0x1000)
1278 lp->stats.tx_aborted_errors++;
1281 dev_kfree_skb_irq(skb);
1283 tx_cmd->cmd.command = 0; /* Mark free */
1284 break;
1286 case CmdTDR:
1288 unsigned short status = ((struct tdr_cmd *)ptr)->status;
1290 if (status & 0x8000) {
1291 DEB(DEB_ANY,printk("%s: link ok.\n", dev->name));
1292 } else {
1293 if (status & 0x4000)
1294 printk("%s: Transceiver problem.\n", dev->name);
1295 if (status & 0x2000)
1296 printk("%s: Termination problem.\n", dev->name);
1297 if (status & 0x1000)
1298 printk("%s: Short circuit.\n", dev->name);
1300 DEB(DEB_TDR,printk("%s: Time %d.\n", dev->name, status & 0x07ff));
1302 break;
1304 case CmdConfigure:
1305 /* Zap command so set_multicast_list() knows it is free */
1306 ptr->command = 0;
1307 break;
1309 ptr->v_next = ptr->b_next = I596_NULL;
1310 lp->last_cmd = jiffies;
1313 ptr = lp->cmd_head;
1314 while ((ptr != I596_NULL) && (ptr != lp->cmd_tail)) {
1315 ptr->command &= 0x1fff;
1316 ptr = ptr->v_next;
1319 if ((lp->cmd_head != I596_NULL))
1320 ack_cmd |= CUC_START;
1321 lp->scb.cmd = WSWAPcmd(virt_to_bus(&lp->cmd_head->status));
1323 if ((status & 0x1000) || (status & 0x4000)) {
1324 if ((status & 0x4000))
1325 DEB(DEB_INTS,printk("%s: i596 interrupt received a frame.\n", dev->name));
1326 i596_rx(dev);
1327 /* Only RX_START if stopped - RGH 07-07-96 */
1328 if (status & 0x1000) {
1329 if (netif_running(dev)) {
1330 DEB(DEB_ERRORS,printk("%s: i596 interrupt receive unit inactive, status 0x%x\n", dev->name, status));
1331 ack_cmd |= RX_START;
1332 lp->stats.rx_errors++;
1333 lp->stats.rx_fifo_errors++;
1334 rebuild_rx_bufs(dev);
1338 wait_cmd(dev,lp,100,"i596 interrupt, timeout");
1339 lp->scb.command = ack_cmd;
1341 #ifdef ENABLE_MVME16x_NET
1342 if (MACH_IS_MVME16x) {
1343 /* Ack the interrupt */
1345 volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000;
1347 pcc2[0x2a] |= 0x08;
1349 #endif
1350 #ifdef ENABLE_BVME6000_NET
1351 if (MACH_IS_BVME6000) {
1352 volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG;
1354 *ethirq = 1;
1355 *ethirq = 3;
1357 #endif
1358 #ifdef ENABLE_APRICOT
1359 (void) inb(ioaddr + 0x10);
1360 outb(4, ioaddr + 0xf);
1361 #endif
1362 CA(dev);
1364 DEB(DEB_INTS,printk("%s: exiting interrupt.\n", dev->name));
1366 spin_unlock (&lp->lock);
1367 return;
1370 static int i596_close(struct net_device *dev)
1372 struct i596_private *lp = (struct i596_private *) dev->priv;
1373 unsigned long flags;
1375 netif_stop_queue(dev);
1377 DEB(DEB_INIT,printk("%s: Shutting down ethercard, status was %4.4x.\n",
1378 dev->name, lp->scb.status));
1380 save_flags(flags);
1381 cli();
1383 wait_cmd(dev,lp,100,"close1 timed out");
1384 lp->scb.command = CUC_ABORT | RX_ABORT;
1385 CA(dev);
1387 wait_cmd(dev,lp,100,"close2 timed out");
1388 restore_flags(flags);
1389 DEB(DEB_STRUCT,i596_display_data(dev));
1390 i596_cleanup_cmd(dev,lp);
1392 #ifdef ENABLE_MVME16x_NET
1393 if (MACH_IS_MVME16x) {
1394 volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000;
1396 /* Disable all ints */
1397 pcc2[0x28] = 1;
1398 pcc2[0x2a] = 0x40;
1399 pcc2[0x2b] = 0x40; /* Set snooping bits now! */
1401 #endif
1402 #ifdef ENABLE_BVME6000_NET
1403 if (MACH_IS_BVME6000) {
1404 volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG;
1406 *ethirq = 1;
1408 #endif
1410 free_irq(dev->irq, dev);
1411 remove_rx_bufs(dev);
1412 MOD_DEC_USE_COUNT;
1414 return 0;
1417 static struct net_device_stats *
1418 i596_get_stats(struct net_device *dev)
1420 struct i596_private *lp = (struct i596_private *) dev->priv;
1422 return &lp->stats;
1426 * Set or clear the multicast filter for this adaptor.
1429 static void set_multicast_list(struct net_device *dev)
1431 struct i596_private *lp = (struct i596_private *) dev->priv;
1432 int config = 0, cnt;
1434 DEB(DEB_MULTI,printk("%s: set multicast list, %d entries, promisc %s, allmulti %s\n", dev->name, dev->mc_count, dev->flags & IFF_PROMISC ? "ON" : "OFF", dev->flags & IFF_ALLMULTI ? "ON" : "OFF"));
1436 if ((dev->flags & IFF_PROMISC) && !(lp->cf_cmd.i596_config[8] & 0x01)) {
1437 lp->cf_cmd.i596_config[8] |= 0x01;
1438 config = 1;
1440 if (!(dev->flags & IFF_PROMISC) && (lp->cf_cmd.i596_config[8] & 0x01)) {
1441 lp->cf_cmd.i596_config[8] &= ~0x01;
1442 config = 1;
1444 if ((dev->flags & IFF_ALLMULTI) && (lp->cf_cmd.i596_config[11] & 0x20)) {
1445 lp->cf_cmd.i596_config[11] &= ~0x20;
1446 config = 1;
1448 if (!(dev->flags & IFF_ALLMULTI) && !(lp->cf_cmd.i596_config[11] & 0x20)) {
1449 lp->cf_cmd.i596_config[11] |= 0x20;
1450 config = 1;
1452 if (config) {
1453 if (lp->cf_cmd.cmd.command)
1454 printk("%s: config change request already queued\n",
1455 dev->name);
1456 else {
1457 lp->cf_cmd.cmd.command = CmdConfigure;
1458 i596_add_cmd(dev, &lp->cf_cmd.cmd);
1462 cnt = dev->mc_count;
1463 if (cnt > MAX_MC_CNT)
1465 cnt = MAX_MC_CNT;
1466 printk("%s: Only %d multicast addresses supported",
1467 dev->name, cnt);
1470 if (dev->mc_count > 0) {
1471 struct dev_mc_list *dmi;
1472 unsigned char *cp;
1473 struct mc_cmd *cmd;
1475 cmd = &lp->mc_cmd;
1476 cmd->cmd.command = CmdMulticastList;
1477 cmd->mc_cnt = dev->mc_count * 6;
1478 cp = cmd->mc_addrs;
1479 for (dmi = dev->mc_list; cnt && dmi != NULL; dmi = dmi->next, cnt--, cp += 6) {
1480 memcpy(cp, dmi->dmi_addr, 6);
1481 if (i596_debug > 1)
1482 DEB(DEB_MULTI,printk("%s: Adding address %02x:%02x:%02x:%02x:%02x:%02x\n",
1483 dev->name, cp[0],cp[1],cp[2],cp[3],cp[4],cp[5]));
1485 i596_add_cmd(dev, &cmd->cmd);
1489 #ifdef HAVE_DEVLIST
1490 static unsigned int i596_portlist[] __initdata =
1491 {0x300, 0};
1492 struct netdev_entry i596_drv =
1493 {"i82596", i82596_probe, I596_TOTAL_SIZE, i596_portlist};
1494 #endif
1496 #ifdef MODULE
1497 static struct net_device dev_82596 =
1499 "", /* device name inserted by drivers/net/net_init.c */
1500 0, 0, 0, 0,
1501 0, 0, /* base, irq */
1502 0, 0, 0, NULL, i82596_probe};
1504 #ifdef ENABLE_APRICOT
1505 static int io = 0x300;
1506 static int irq = 10;
1507 MODULE_PARM(irq, "i");
1508 #endif
1510 MODULE_PARM(debug, "i");
1511 static int debug = -1;
1513 int init_module(void)
1515 #ifdef ENABLE_APRICOT
1516 dev_82596.base_addr = io;
1517 dev_82596.irq = irq;
1518 #endif
1519 if (debug >= 0)
1520 i596_debug = debug;
1521 if (register_netdev(&dev_82596) != 0)
1522 return -EIO;
1523 return 0;
1526 void cleanup_module(void)
1528 unregister_netdev(&dev_82596);
1529 #ifdef __mc68000__
1530 /* XXX This assumes default cache mode to be IOMAP_FULL_CACHING,
1531 * XXX which may be invalid (CONFIG_060_WRITETHROUGH)
1534 kernel_set_cachemode((void *)(dev_82596.mem_start), 4096,
1535 IOMAP_FULL_CACHING);
1536 #endif
1537 free_page ((u32)(dev_82596.mem_start));
1538 dev_82596.priv = NULL;
1539 #ifdef ENABLE_APRICOT
1540 /* If we don't do this, we can't re-insmod it later. */
1541 release_region(dev_82596.base_addr, I596_TOTAL_SIZE);
1542 #endif
1545 #endif /* MODULE */
1548 * Local variables:
1549 * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c 82596.c"
1550 * End: