Ok. I didn't make 2.4.0 in 2000. Tough. I tried, but we had some
[davej-history.git] / drivers / net / 82596.c
blobeb3253b822b5e351dc46f8237925580610f8cac3
1 /* 82596.c: A generic 82596 ethernet driver for linux. */
2 /*
3 Based on Apricot.c
4 Written 1994 by Mark Evans.
5 This driver is for the Apricot 82596 bus-master interface
7 Modularised 12/94 Mark Evans
10 Modified to support the 82596 ethernet chips on 680x0 VME boards.
11 by Richard Hirst <richard@sleepie.demon.co.uk>
12 Renamed to be 82596.c
14 980825: Changed to receive directly in to sk_buffs which are
15 allocated at open() time. Eliminates copy on incoming frames
16 (small ones are still copied). Shared data now held in a
17 non-cached page, so we can run on 68060 in copyback mode.
19 TBD:
20 * look at deferring rx frames rather than discarding (as per tulip)
21 * handle tx ring full as per tulip
22 * performace test to tune rx_copybreak
24 Most of my modifications relate to the braindead big-endian
25 implementation by Intel. When the i596 is operating in
26 'big-endian' mode, it thinks a 32 bit value of 0x12345678
27 should be stored as 0x56781234. This is a real pain, when
28 you have linked lists which are shared by the 680x0 and the
29 i596.
31 Driver skeleton
32 Written 1993 by Donald Becker.
33 Copyright 1993 United States Government as represented by the Director,
34 National Security Agency. This software may only be used and distributed
35 according to the terms of the GNU Public License as modified by SRC,
36 incorporated herein by reference.
38 The author may be reached as becker@super.org or
39 C/O Supercomputing Research Ctr., 17100 Science Dr., Bowie MD 20715
43 static const char *version = "82596.c $Revision: 1.4 $\n";
45 #include <linux/config.h>
46 #include <linux/module.h>
48 #include <linux/kernel.h>
49 #include <linux/sched.h>
50 #include <linux/string.h>
51 #include <linux/ptrace.h>
52 #include <linux/errno.h>
53 #include <linux/ioport.h>
54 #include <linux/malloc.h>
55 #include <linux/interrupt.h>
56 #include <linux/delay.h>
57 #include <linux/netdevice.h>
58 #include <linux/etherdevice.h>
59 #include <linux/skbuff.h>
60 #include <linux/init.h>
62 #include <asm/bitops.h>
63 #include <asm/io.h>
64 #include <asm/dma.h>
65 #include <asm/pgtable.h>
66 #include <asm/pgalloc.h>
68 /* DEBUG flags
71 #define DEB_INIT 0x0001
72 #define DEB_PROBE 0x0002
73 #define DEB_SERIOUS 0x0004
74 #define DEB_ERRORS 0x0008
75 #define DEB_MULTI 0x0010
76 #define DEB_TDR 0x0020
77 #define DEB_OPEN 0x0040
78 #define DEB_RESET 0x0080
79 #define DEB_ADDCMD 0x0100
80 #define DEB_STATUS 0x0200
81 #define DEB_STARTTX 0x0400
82 #define DEB_RXADDR 0x0800
83 #define DEB_TXADDR 0x1000
84 #define DEB_RXFRAME 0x2000
85 #define DEB_INTS 0x4000
86 #define DEB_STRUCT 0x8000
87 #define DEB_ANY 0xffff
90 #define DEB(x,y) if (i596_debug & (x)) y
93 #if defined(CONFIG_MVME16x_NET) || defined(CONFIG_MVME16x_NET_MODULE)
94 #define ENABLE_MVME16x_NET
95 #endif
96 #if defined(CONFIG_BVME6000_NET) || defined(CONFIG_BVME6000_NET_MODULE)
97 #define ENABLE_BVME6000_NET
98 #endif
99 #if defined(CONFIG_APRICOT) || defined(CONFIG_APRICOT_MODULE)
100 #define ENABLE_APRICOT
101 #endif
103 #ifdef ENABLE_MVME16x_NET
104 #include <asm/mvme16xhw.h>
105 #endif
106 #ifdef ENABLE_BVME6000_NET
107 #include <asm/bvme6000hw.h>
108 #endif
111 * Define various macros for Channel Attention, word swapping etc., dependent
112 * on architecture. MVME and BVME are 680x0 based, otherwise it is Intel.
115 #ifdef __mc68000__
116 #define WSWAPrfd(x) ((struct i596_rfd *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
117 #define WSWAPrbd(x) ((struct i596_rbd *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
118 #define WSWAPiscp(x) ((struct i596_iscp *)(((u32)(x)<<16) | ((((u32)(x)))>>16)))
119 #define WSWAPscb(x) ((struct i596_scb *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
120 #define WSWAPcmd(x) ((struct i596_cmd *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
121 #define WSWAPtbd(x) ((struct i596_tbd *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
122 #define WSWAPchar(x) ((char *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
123 #define ISCP_BUSY 0x00010000
124 #define MACH_IS_APRICOT 0
125 #else
126 #define WSWAPrfd(x) ((struct i596_rfd *)(x))
127 #define WSWAPrbd(x) ((struct i596_rbd *)(x))
128 #define WSWAPiscp(x) ((struct i596_iscp *)(x))
129 #define WSWAPscb(x) ((struct i596_scb *)(x))
130 #define WSWAPcmd(x) ((struct i596_cmd *)(x))
131 #define WSWAPtbd(x) ((struct i596_tbd *)(x))
132 #define WSWAPchar(x) ((char *)(x))
133 #define ISCP_BUSY 0x0001
134 #define MACH_IS_APRICOT 1
135 #endif
138 * The MPU_PORT command allows direct access to the 82596. With PORT access
139 * the following commands are available (p5-18). The 32-bit port command
140 * must be word-swapped with the most significant word written first.
141 * This only applies to VME boards.
143 #define PORT_RESET 0x00 /* reset 82596 */
144 #define PORT_SELFTEST 0x01 /* selftest */
145 #define PORT_ALTSCP 0x02 /* alternate SCB address */
146 #define PORT_ALTDUMP 0x03 /* Alternate DUMP address */
148 static int i596_debug = (DEB_SERIOUS|DEB_PROBE);
150 MODULE_AUTHOR("Richard Hirst");
151 MODULE_DESCRIPTION("i82596 driver");
152 MODULE_PARM(i596_debug, "i");
155 /* Copy frames shorter than rx_copybreak, otherwise pass on up in
156 * a full sized sk_buff. Value of 100 stolen from tulip.c (!alpha).
158 static int rx_copybreak = 100;
160 #define PKT_BUF_SZ 1536
161 #define MAX_MC_CNT 64
163 #define I596_TOTAL_SIZE 17
165 #define I596_NULL ((void *)0xffffffff)
167 #define CMD_EOL 0x8000 /* The last command of the list, stop. */
168 #define CMD_SUSP 0x4000 /* Suspend after doing cmd. */
169 #define CMD_INTR 0x2000 /* Interrupt after doing cmd. */
171 #define CMD_FLEX 0x0008 /* Enable flexible memory model */
173 enum commands {
174 CmdNOp = 0, CmdSASetup = 1, CmdConfigure = 2, CmdMulticastList = 3,
175 CmdTx = 4, CmdTDR = 5, CmdDump = 6, CmdDiagnose = 7
178 #define STAT_C 0x8000 /* Set to 0 after execution */
179 #define STAT_B 0x4000 /* Command being executed */
180 #define STAT_OK 0x2000 /* Command executed ok */
181 #define STAT_A 0x1000 /* Command aborted */
183 #define CUC_START 0x0100
184 #define CUC_RESUME 0x0200
185 #define CUC_SUSPEND 0x0300
186 #define CUC_ABORT 0x0400
187 #define RX_START 0x0010
188 #define RX_RESUME 0x0020
189 #define RX_SUSPEND 0x0030
190 #define RX_ABORT 0x0040
192 #define TX_TIMEOUT 5
195 struct i596_reg {
196 unsigned short porthi;
197 unsigned short portlo;
198 unsigned long ca;
201 #define EOF 0x8000
202 #define SIZE_MASK 0x3fff
204 struct i596_tbd {
205 unsigned short size;
206 unsigned short pad;
207 struct i596_tbd *next;
208 char *data;
211 /* The command structure has two 'next' pointers; v_next is the address of
212 * the next command as seen by the CPU, b_next is the address of the next
213 * command as seen by the 82596. The b_next pointer, as used by the 82596
214 * always references the status field of the next command, rather than the
215 * v_next field, because the 82596 is unaware of v_next. It may seem more
216 * logical to put v_next at the end of the structure, but we cannot do that
217 * because the 82596 expects other fields to be there, depending on command
218 * type.
221 struct i596_cmd {
222 struct i596_cmd *v_next; /* Address from CPUs viewpoint */
223 unsigned short status;
224 unsigned short command;
225 struct i596_cmd *b_next; /* Address from i596 viewpoint */
228 struct tx_cmd {
229 struct i596_cmd cmd;
230 struct i596_tbd *tbd;
231 unsigned short size;
232 unsigned short pad;
233 struct sk_buff *skb; /* So we can free it after tx */
236 struct tdr_cmd {
237 struct i596_cmd cmd;
238 unsigned short status;
239 unsigned short pad;
242 struct mc_cmd {
243 struct i596_cmd cmd;
244 short mc_cnt;
245 char mc_addrs[MAX_MC_CNT*6];
248 struct sa_cmd {
249 struct i596_cmd cmd;
250 char eth_addr[8];
253 struct cf_cmd {
254 struct i596_cmd cmd;
255 char i596_config[16];
258 struct i596_rfd {
259 unsigned short stat;
260 unsigned short cmd;
261 struct i596_rfd *b_next; /* Address from i596 viewpoint */
262 struct i596_rbd *rbd;
263 unsigned short count;
264 unsigned short size;
265 struct i596_rfd *v_next; /* Address from CPUs viewpoint */
266 struct i596_rfd *v_prev;
269 struct i596_rbd {
270 unsigned short count;
271 unsigned short zero1;
272 struct i596_rbd *b_next;
273 unsigned char *b_data; /* Address from i596 viewpoint */
274 unsigned short size;
275 unsigned short zero2;
276 struct sk_buff *skb;
277 struct i596_rbd *v_next;
278 struct i596_rbd *b_addr; /* This rbd addr from i596 view */
279 unsigned char *v_data; /* Address from CPUs viewpoint */
282 #define TX_RING_SIZE 64
283 #define RX_RING_SIZE 16
285 struct i596_scb {
286 unsigned short status;
287 unsigned short command;
288 struct i596_cmd *cmd;
289 struct i596_rfd *rfd;
290 unsigned long crc_err;
291 unsigned long align_err;
292 unsigned long resource_err;
293 unsigned long over_err;
294 unsigned long rcvdt_err;
295 unsigned long short_err;
296 unsigned short t_on;
297 unsigned short t_off;
300 struct i596_iscp {
301 unsigned long stat;
302 struct i596_scb *scb;
305 struct i596_scp {
306 unsigned long sysbus;
307 unsigned long pad;
308 struct i596_iscp *iscp;
311 struct i596_private {
312 volatile struct i596_scp scp;
313 volatile struct i596_iscp iscp;
314 volatile struct i596_scb scb;
315 struct sa_cmd sa_cmd;
316 struct cf_cmd cf_cmd;
317 struct tdr_cmd tdr_cmd;
318 struct mc_cmd mc_cmd;
319 unsigned long stat;
320 int last_restart __attribute__((aligned(4)));
321 struct i596_rfd *rfd_head;
322 struct i596_rbd *rbd_head;
323 struct i596_cmd *cmd_tail;
324 struct i596_cmd *cmd_head;
325 int cmd_backlog;
326 unsigned long last_cmd;
327 struct net_device_stats stats;
328 struct i596_rfd rfds[RX_RING_SIZE];
329 struct i596_rbd rbds[RX_RING_SIZE];
330 struct tx_cmd tx_cmds[TX_RING_SIZE];
331 struct i596_tbd tbds[TX_RING_SIZE];
332 int next_tx_cmd;
333 spinlock_t lock;
336 char init_setup[] =
338 0x8E, /* length, prefetch on */
339 0xC8, /* fifo to 8, monitor off */
340 #ifdef CONFIG_VME
341 0xc0, /* don't save bad frames */
342 #else
343 0x80, /* don't save bad frames */
344 #endif
345 0x2E, /* No source address insertion, 8 byte preamble */
346 0x00, /* priority and backoff defaults */
347 0x60, /* interframe spacing */
348 0x00, /* slot time LSB */
349 0xf2, /* slot time and retries */
350 0x00, /* promiscuous mode */
351 0x00, /* collision detect */
352 0x40, /* minimum frame length */
353 0xff,
354 0x00,
355 0x7f /* *multi IA */ };
357 static int i596_open(struct net_device *dev);
358 static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
359 static void i596_interrupt(int irq, void *dev_id, struct pt_regs *regs);
360 static int i596_close(struct net_device *dev);
361 static struct net_device_stats *i596_get_stats(struct net_device *dev);
362 static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd);
363 static void i596_tx_timeout (struct net_device *dev);
364 static void print_eth(unsigned char *buf, char *str);
365 static void set_multicast_list(struct net_device *dev);
367 static int rx_ring_size = RX_RING_SIZE;
368 static int ticks_limit = 25;
369 static int max_cmd_backlog = TX_RING_SIZE-1;
372 static inline void CA(struct net_device *dev)
374 #ifdef ENABLE_MVME16x_NET
375 if (MACH_IS_MVME16x) {
376 ((struct i596_reg *) dev->base_addr)->ca = 1;
378 #endif
379 #ifdef ENABLE_BVME6000_NET
380 if (MACH_IS_BVME6000) {
381 volatile u32 i;
383 i = *(volatile u32 *) (dev->base_addr);
385 #endif
386 #ifdef ENABLE_APRICOT
387 if (MACH_IS_APRICOT) {
388 outw(0, (short) (dev->base_addr) + 4);
390 #endif
394 static inline void MPU_PORT(struct net_device *dev, int c, volatile void *x)
396 #ifdef ENABLE_MVME16x_NET
397 if (MACH_IS_MVME16x) {
398 struct i596_reg *p = (struct i596_reg *) (dev->base_addr);
399 p->porthi = ((c) | (u32) (x)) & 0xffff;
400 p->portlo = ((c) | (u32) (x)) >> 16;
402 #endif
403 #ifdef ENABLE_BVME6000_NET
404 if (MACH_IS_BVME6000) {
405 u32 v = (u32) (c) | (u32) (x);
406 v = ((u32) (v) << 16) | ((u32) (v) >> 16);
407 *(volatile u32 *) dev->base_addr = v;
408 udelay(1);
409 *(volatile u32 *) dev->base_addr = v;
411 #endif
415 static inline int wait_istat(struct net_device *dev, struct i596_private *lp, int delcnt, char *str)
417 while (--delcnt && lp->iscp.stat)
418 udelay(10);
419 if (!delcnt) {
420 printk("%s: %s, status %4.4x, cmd %4.4x.\n",
421 dev->name, str, lp->scb.status, lp->scb.command);
422 return -1;
424 else
425 return 0;
429 static inline int wait_cmd(struct net_device *dev, struct i596_private *lp, int delcnt, char *str)
431 while (--delcnt && lp->scb.command)
432 udelay(10);
433 if (!delcnt) {
434 printk("%s: %s, status %4.4x, cmd %4.4x.\n",
435 dev->name, str, lp->scb.status, lp->scb.command);
436 return -1;
438 else
439 return 0;
443 static void i596_display_data(struct net_device *dev)
445 struct i596_private *lp = (struct i596_private *) dev->priv;
446 struct i596_cmd *cmd;
447 struct i596_rfd *rfd;
448 struct i596_rbd *rbd;
450 printk("lp and scp at %p, .sysbus = %08lx, .iscp = %p\n",
451 &lp->scp, lp->scp.sysbus, lp->scp.iscp);
452 printk("iscp at %p, iscp.stat = %08lx, .scb = %p\n",
453 &lp->iscp, lp->iscp.stat, lp->iscp.scb);
454 printk("scb at %p, scb.status = %04x, .command = %04x,"
455 " .cmd = %p, .rfd = %p\n",
456 &lp->scb, lp->scb.status, lp->scb.command,
457 lp->scb.cmd, lp->scb.rfd);
458 printk(" errors: crc %lx, align %lx, resource %lx,"
459 " over %lx, rcvdt %lx, short %lx\n",
460 lp->scb.crc_err, lp->scb.align_err, lp->scb.resource_err,
461 lp->scb.over_err, lp->scb.rcvdt_err, lp->scb.short_err);
462 cmd = lp->cmd_head;
463 while (cmd != I596_NULL) {
464 printk("cmd at %p, .status = %04x, .command = %04x, .b_next = %p\n",
465 cmd, cmd->status, cmd->command, cmd->b_next);
466 cmd = cmd->v_next;
468 rfd = lp->rfd_head;
469 printk("rfd_head = %p\n", rfd);
470 do {
471 printk (" %p .stat %04x, .cmd %04x, b_next %p, rbd %p,"
472 " count %04x\n",
473 rfd, rfd->stat, rfd->cmd, rfd->b_next, rfd->rbd,
474 rfd->count);
475 rfd = rfd->v_next;
476 } while (rfd != lp->rfd_head);
477 rbd = lp->rbd_head;
478 printk("rbd_head = %p\n", rbd);
479 do {
480 printk(" %p .count %04x, b_next %p, b_data %p, size %04x\n",
481 rbd, rbd->count, rbd->b_next, rbd->b_data, rbd->size);
482 rbd = rbd->v_next;
483 } while (rbd != lp->rbd_head);
487 #if defined(ENABLE_MVME16x_NET) || defined(ENABLE_BVME6000_NET)
488 static void i596_error(int irq, void *dev_id, struct pt_regs *regs)
490 struct net_device *dev = dev_id;
491 volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000;
493 pcc2[0x28] = 1;
494 pcc2[0x2b] = 0x1d;
495 printk("%s: Error interrupt\n", dev->name);
496 i596_display_data(dev);
498 #endif
500 static inline void init_rx_bufs(struct net_device *dev)
502 struct i596_private *lp = (struct i596_private *)dev->priv;
503 int i;
504 struct i596_rfd *rfd;
505 struct i596_rbd *rbd;
507 /* First build the Receive Buffer Descriptor List */
509 for (i = 0, rbd = lp->rbds; i < rx_ring_size; i++, rbd++) {
510 struct sk_buff *skb = dev_alloc_skb(PKT_BUF_SZ);
512 if (skb == NULL)
513 panic("82596: alloc_skb() failed");
514 skb->dev = dev;
515 rbd->v_next = rbd+1;
516 rbd->b_next = WSWAPrbd(virt_to_bus(rbd+1));
517 rbd->b_addr = WSWAPrbd(virt_to_bus(rbd));
518 rbd->skb = skb;
519 rbd->v_data = skb->tail;
520 rbd->b_data = WSWAPchar(virt_to_bus(skb->tail));
521 rbd->size = PKT_BUF_SZ;
522 #ifdef __mc68000__
523 cache_clear(virt_to_phys(skb->tail), PKT_BUF_SZ);
524 #endif
526 lp->rbd_head = lp->rbds;
527 rbd = lp->rbds + rx_ring_size - 1;
528 rbd->v_next = lp->rbds;
529 rbd->b_next = WSWAPrbd(virt_to_bus(lp->rbds));
531 /* Now build the Receive Frame Descriptor List */
533 for (i = 0, rfd = lp->rfds; i < rx_ring_size; i++, rfd++) {
534 rfd->rbd = I596_NULL;
535 rfd->v_next = rfd+1;
536 rfd->v_prev = rfd-1;
537 rfd->b_next = WSWAPrfd(virt_to_bus(rfd+1));
538 rfd->cmd = CMD_FLEX;
540 lp->rfd_head = lp->rfds;
541 lp->scb.rfd = WSWAPrfd(virt_to_bus(lp->rfds));
542 rfd = lp->rfds;
543 rfd->rbd = lp->rbd_head;
544 rfd->v_prev = lp->rfds + rx_ring_size - 1;
545 rfd = lp->rfds + rx_ring_size - 1;
546 rfd->v_next = lp->rfds;
547 rfd->b_next = WSWAPrfd(virt_to_bus(lp->rfds));
548 rfd->cmd = CMD_EOL|CMD_FLEX;
551 static inline void remove_rx_bufs(struct net_device *dev)
553 struct i596_private *lp = (struct i596_private *)dev->priv;
554 struct i596_rbd *rbd;
555 int i;
557 for (i = 0, rbd = lp->rbds; i < rx_ring_size; i++, rbd++) {
558 if (rbd->skb == NULL)
559 break;
560 dev_kfree_skb(rbd->skb);
565 static void rebuild_rx_bufs(struct net_device *dev)
567 struct i596_private *lp = (struct i596_private *) dev->priv;
568 int i;
570 /* Ensure rx frame/buffer descriptors are tidy */
572 for (i = 0; i < rx_ring_size; i++) {
573 lp->rfds[i].rbd = I596_NULL;
574 lp->rfds[i].cmd = CMD_FLEX;
576 lp->rfds[rx_ring_size-1].cmd = CMD_EOL|CMD_FLEX;
577 lp->rfd_head = lp->rfds;
578 lp->scb.rfd = WSWAPrfd(virt_to_bus(lp->rfds));
579 lp->rbd_head = lp->rbds;
580 lp->rfds[0].rbd = WSWAPrbd(virt_to_bus(lp->rbds));
584 static int init_i596_mem(struct net_device *dev)
586 struct i596_private *lp = (struct i596_private *) dev->priv;
587 #if !defined(ENABLE_MVME16x_NET) && !defined(ENABLE_BVME6000_NET)
588 short ioaddr = dev->base_addr;
589 #endif
590 unsigned long flags;
592 MPU_PORT(dev, PORT_RESET, 0);
594 udelay(100); /* Wait 100us - seems to help */
596 #if defined(ENABLE_MVME16x_NET) || defined(ENABLE_BVME6000_NET)
597 #ifdef ENABLE_MVME16x_NET
598 if (MACH_IS_MVME16x) {
599 volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000;
601 /* Disable all ints for now */
602 pcc2[0x28] = 1;
603 pcc2[0x2a] = 0x48;
604 /* Following disables snooping. Snooping is not required
605 * as we make appropriate use of non-cached pages for
606 * shared data, and cache_push/cache_clear.
608 pcc2[0x2b] = 0x08;
610 #endif
611 #ifdef ENABLE_BVME6000_NET
612 if (MACH_IS_BVME6000) {
613 volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG;
615 *ethirq = 1;
617 #endif
619 /* change the scp address */
621 MPU_PORT(dev, PORT_ALTSCP, (void *)virt_to_bus(&lp->scp));
623 #elif defined(ENABLE_APRICOT)
626 u32 scp = virt_to_bus(&lp->scp);
628 /* change the scp address */
629 outw(0, ioaddr);
630 outw(0, ioaddr);
631 outb(4, ioaddr + 0xf);
632 outw(scp | 2, ioaddr);
633 outw(scp >> 16, ioaddr);
635 #endif
637 lp->last_cmd = jiffies;
639 #ifdef ENABLE_MVME16x_NET
640 if (MACH_IS_MVME16x)
641 lp->scp.sysbus = 0x00000054;
642 #endif
643 #ifdef ENABLE_BVME6000_NET
644 if (MACH_IS_BVME6000)
645 lp->scp.sysbus = 0x0000004c;
646 #endif
647 #ifdef ENABLE_APRICOT
648 if (MACH_IS_APRICOT)
649 lp->scp.sysbus = 0x00440000;
650 #endif
652 lp->scp.iscp = WSWAPiscp(virt_to_bus(&(lp->iscp)));
653 lp->iscp.scb = WSWAPscb(virt_to_bus(&(lp->scb)));
654 lp->iscp.stat = ISCP_BUSY;
655 lp->cmd_backlog = 0;
657 lp->cmd_head = lp->scb.cmd = I596_NULL;
659 DEB(DEB_INIT,printk("%s: starting i82596.\n", dev->name));
661 #if defined(ENABLE_APRICOT)
662 (void) inb(ioaddr + 0x10);
663 outb(4, ioaddr + 0xf);
664 #endif
665 CA(dev);
667 if (wait_istat(dev,lp,1000,"initialization timed out"))
668 goto failed;
669 DEB(DEB_INIT,printk("%s: i82596 initialization successful\n", dev->name));
671 /* Ensure rx frame/buffer descriptors are tidy */
672 rebuild_rx_bufs(dev);
673 lp->scb.command = 0;
675 #ifdef ENABLE_MVME16x_NET
676 if (MACH_IS_MVME16x) {
677 volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000;
679 /* Enable ints, etc. now */
680 pcc2[0x2a] = 0x55; /* Edge sensitive */
681 pcc2[0x2b] = 0x15;
683 #endif
684 #ifdef ENABLE_BVME6000_NET
685 if (MACH_IS_BVME6000) {
686 volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG;
688 *ethirq = 3;
690 #endif
693 DEB(DEB_INIT,printk("%s: queuing CmdConfigure\n", dev->name));
694 memcpy(lp->cf_cmd.i596_config, init_setup, 14);
695 lp->cf_cmd.cmd.command = CmdConfigure;
696 i596_add_cmd(dev, &lp->cf_cmd.cmd);
698 DEB(DEB_INIT,printk("%s: queuing CmdSASetup\n", dev->name));
699 memcpy(lp->sa_cmd.eth_addr, dev->dev_addr, 6);
700 lp->sa_cmd.cmd.command = CmdSASetup;
701 i596_add_cmd(dev, &lp->sa_cmd.cmd);
703 DEB(DEB_INIT,printk("%s: queuing CmdTDR\n", dev->name));
704 lp->tdr_cmd.cmd.command = CmdTDR;
705 i596_add_cmd(dev, &lp->tdr_cmd.cmd);
707 spin_lock_irqsave (&lp->lock, flags);
709 if (wait_cmd(dev,lp,1000,"timed out waiting to issue RX_START")) {
710 spin_unlock_irqrestore (&lp->lock, flags);
711 goto failed;
713 DEB(DEB_INIT,printk("%s: Issuing RX_START\n", dev->name));
714 lp->scb.command = RX_START;
715 CA(dev);
717 spin_unlock_irqrestore (&lp->lock, flags);
719 if (wait_cmd(dev,lp,1000,"RX_START not processed"))
720 goto failed;
721 DEB(DEB_INIT,printk("%s: Receive unit started OK\n", dev->name));
722 return 0;
724 failed:
725 printk("%s: Failed to initialise 82596\n", dev->name);
726 MPU_PORT(dev, PORT_RESET, 0);
727 return -1;
730 static inline int i596_rx(struct net_device *dev)
732 struct i596_private *lp = (struct i596_private *)dev->priv;
733 struct i596_rfd *rfd;
734 struct i596_rbd *rbd;
735 int frames = 0;
737 DEB(DEB_RXFRAME,printk ("i596_rx(), rfd_head %p, rbd_head %p\n",
738 lp->rfd_head, lp->rbd_head));
740 rfd = lp->rfd_head; /* Ref next frame to check */
742 while ((rfd->stat) & STAT_C) { /* Loop while complete frames */
743 if (rfd->rbd == I596_NULL)
744 rbd = I596_NULL;
745 else if (rfd->rbd == lp->rbd_head->b_addr)
746 rbd = lp->rbd_head;
747 else {
748 printk("%s: rbd chain broken!\n", dev->name);
749 /* XXX Now what? */
750 rbd = I596_NULL;
752 DEB(DEB_RXFRAME, printk(" rfd %p, rfd.rbd %p, rfd.stat %04x\n",
753 rfd, rfd->rbd, rfd->stat));
755 if (rbd != I596_NULL && ((rfd->stat) & STAT_OK)) {
756 /* a good frame */
757 int pkt_len = rbd->count & 0x3fff;
758 struct sk_buff *skb = rbd->skb;
759 int rx_in_place = 0;
761 DEB(DEB_RXADDR,print_eth(rbd->v_data, "received"));
762 frames++;
764 /* Check if the packet is long enough to just accept
765 * without copying to a properly sized skbuff.
768 if (pkt_len > rx_copybreak) {
769 struct sk_buff *newskb;
771 /* Get fresh skbuff to replace filled one. */
772 newskb = dev_alloc_skb(PKT_BUF_SZ);
773 if (newskb == NULL) {
774 skb = NULL; /* drop pkt */
775 goto memory_squeeze;
777 /* Pass up the skb already on the Rx ring. */
778 skb_put(skb, pkt_len);
779 rx_in_place = 1;
780 rbd->skb = newskb;
781 newskb->dev = dev;
782 rbd->v_data = newskb->tail;
783 rbd->b_data = WSWAPchar(virt_to_bus(newskb->tail));
784 #ifdef __mc68000__
785 cache_clear(virt_to_phys(newskb->tail), PKT_BUF_SZ);
786 #endif
788 else
789 skb = dev_alloc_skb(pkt_len + 2);
790 memory_squeeze:
791 if (skb == NULL) {
792 /* XXX tulip.c can defer packets here!! */
793 printk ("%s: i596_rx Memory squeeze, dropping packet.\n", dev->name);
794 lp->stats.rx_dropped++;
796 else {
797 skb->dev = dev;
798 if (!rx_in_place) {
799 /* 16 byte align the data fields */
800 skb_reserve(skb, 2);
801 memcpy(skb_put(skb,pkt_len), rbd->v_data, pkt_len);
803 skb->protocol=eth_type_trans(skb,dev);
804 skb->len = pkt_len;
805 #ifdef __mc68000__
806 cache_clear(virt_to_phys(rbd->skb->tail),
807 pkt_len);
808 #endif
809 netif_rx(skb);
810 lp->stats.rx_packets++;
811 lp->stats.rx_bytes+=pkt_len;
814 else {
815 DEB(DEB_ERRORS, printk("%s: Error, rfd.stat = 0x%04x\n",
816 dev->name, rfd->stat));
817 lp->stats.rx_errors++;
818 if ((rfd->stat) & 0x0001)
819 lp->stats.collisions++;
820 if ((rfd->stat) & 0x0080)
821 lp->stats.rx_length_errors++;
822 if ((rfd->stat) & 0x0100)
823 lp->stats.rx_over_errors++;
824 if ((rfd->stat) & 0x0200)
825 lp->stats.rx_fifo_errors++;
826 if ((rfd->stat) & 0x0400)
827 lp->stats.rx_frame_errors++;
828 if ((rfd->stat) & 0x0800)
829 lp->stats.rx_crc_errors++;
830 if ((rfd->stat) & 0x1000)
831 lp->stats.rx_length_errors++;
834 /* Clear the buffer descriptor count and EOF + F flags */
836 if (rbd != I596_NULL && (rbd->count & 0x4000)) {
837 rbd->count = 0;
838 lp->rbd_head = rbd->v_next;
841 /* Tidy the frame descriptor, marking it as end of list */
843 rfd->rbd = I596_NULL;
844 rfd->stat = 0;
845 rfd->cmd = CMD_EOL|CMD_FLEX;
846 rfd->count = 0;
848 /* Remove end-of-list from old end descriptor */
850 rfd->v_prev->cmd = CMD_FLEX;
852 /* Update record of next frame descriptor to process */
854 lp->scb.rfd = rfd->b_next;
855 lp->rfd_head = rfd->v_next;
856 rfd = lp->rfd_head;
859 DEB(DEB_RXFRAME,printk ("frames %d\n", frames));
861 return 0;
865 static inline void i596_cleanup_cmd(struct net_device *dev, struct i596_private *lp)
867 struct i596_cmd *ptr;
869 while (lp->cmd_head != I596_NULL) {
870 ptr = lp->cmd_head;
871 lp->cmd_head = ptr->v_next;
872 lp->cmd_backlog--;
874 switch ((ptr->command) & 0x7) {
875 case CmdTx:
877 struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
878 struct sk_buff *skb = tx_cmd->skb;
880 dev_kfree_skb(skb);
882 lp->stats.tx_errors++;
883 lp->stats.tx_aborted_errors++;
885 ptr->v_next = ptr->b_next = I596_NULL;
886 tx_cmd->cmd.command = 0; /* Mark as free */
887 break;
889 default:
890 ptr->v_next = ptr->b_next = I596_NULL;
894 wait_cmd(dev,lp,100,"i596_cleanup_cmd timed out");
895 lp->scb.cmd = I596_NULL;
898 static inline void i596_reset(struct net_device *dev, struct i596_private *lp, int ioaddr)
900 unsigned long flags;
902 DEB(DEB_RESET,printk("i596_reset\n"));
904 spin_lock_irqsave (&lp->lock, flags);
906 wait_cmd(dev,lp,100,"i596_reset timed out");
908 netif_stop_queue(dev);
910 lp->scb.command = CUC_ABORT | RX_ABORT;
911 CA(dev);
913 /* wait for shutdown */
914 wait_cmd(dev,lp,1000,"i596_reset 2 timed out");
915 spin_unlock_irqrestore (&lp->lock, flags);
917 i596_cleanup_cmd(dev,lp);
918 i596_rx(dev);
920 netif_start_queue(dev);
921 init_i596_mem(dev);
924 static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd)
926 struct i596_private *lp = (struct i596_private *) dev->priv;
927 int ioaddr = dev->base_addr;
928 unsigned long flags;
930 DEB(DEB_ADDCMD,printk("i596_add_cmd\n"));
932 cmd->status = 0;
933 cmd->command |= (CMD_EOL | CMD_INTR);
934 cmd->v_next = cmd->b_next = I596_NULL;
936 spin_lock_irqsave (&lp->lock, flags);
938 if (lp->cmd_head != I596_NULL) {
939 lp->cmd_tail->v_next = cmd;
940 lp->cmd_tail->b_next = WSWAPcmd(virt_to_bus(&cmd->status));
941 } else {
942 lp->cmd_head = cmd;
943 wait_cmd(dev,lp,100,"i596_add_cmd timed out");
944 lp->scb.cmd = WSWAPcmd(virt_to_bus(&cmd->status));
945 lp->scb.command = CUC_START;
946 CA(dev);
948 lp->cmd_tail = cmd;
949 lp->cmd_backlog++;
951 spin_unlock_irqrestore (&lp->lock, flags);
953 if (lp->cmd_backlog > max_cmd_backlog) {
954 unsigned long tickssofar = jiffies - lp->last_cmd;
956 if (tickssofar < ticks_limit)
957 return;
959 printk("%s: command unit timed out, status resetting.\n", dev->name);
961 i596_reset(dev, lp, ioaddr);
965 static int i596_open(struct net_device *dev)
967 int res = 0;
969 DEB(DEB_OPEN,printk("%s: i596_open() irq %d.\n", dev->name, dev->irq));
971 if (request_irq(dev->irq, &i596_interrupt, 0, "i82596", dev)) {
972 printk("%s: IRQ %d not free\n", dev->name, dev->irq);
973 return -EAGAIN;
975 #ifdef ENABLE_MVME16x_NET
976 if (MACH_IS_MVME16x) {
977 if (request_irq(0x56, &i596_error, 0, "i82596_error", dev))
978 return -EAGAIN;
980 #endif
981 init_rx_bufs(dev);
983 netif_start_queue(dev);
985 MOD_INC_USE_COUNT;
987 /* Initialize the 82596 memory */
988 if (init_i596_mem(dev)) {
989 res = -EAGAIN;
990 free_irq(dev->irq, dev);
993 return res;
996 static void i596_tx_timeout (struct net_device *dev)
998 struct i596_private *lp = (struct i596_private *) dev->priv;
999 int ioaddr = dev->base_addr;
1001 /* Transmitter timeout, serious problems. */
1002 DEB(DEB_ERRORS,printk("%s: transmit timed out, status resetting.\n",
1003 dev->name));
1005 lp->stats.tx_errors++;
1007 /* Try to restart the adaptor */
1008 if (lp->last_restart == lp->stats.tx_packets) {
1009 DEB(DEB_ERRORS,printk ("Resetting board.\n"));
1010 /* Shutdown and restart */
1011 i596_reset (dev, lp, ioaddr);
1012 } else {
1013 /* Issue a channel attention signal */
1014 DEB(DEB_ERRORS,printk ("Kicking board.\n"));
1015 lp->scb.command = CUC_START | RX_START;
1016 CA (dev);
1017 lp->last_restart = lp->stats.tx_packets;
1020 dev->trans_start = jiffies;
1021 netif_wake_queue (dev);
1025 static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
1027 struct i596_private *lp = (struct i596_private *) dev->priv;
1028 struct tx_cmd *tx_cmd;
1029 struct i596_tbd *tbd;
1030 short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
1031 dev->trans_start = jiffies;
1033 DEB(DEB_STARTTX,printk("%s: i596_start_xmit(%x,%x) called\n", dev->name,
1034 skb->len, (unsigned int)skb->data));
1036 netif_stop_queue(dev);
1038 tx_cmd = lp->tx_cmds + lp->next_tx_cmd;
1039 tbd = lp->tbds + lp->next_tx_cmd;
1041 if (tx_cmd->cmd.command) {
1042 DEB(DEB_ERRORS,printk ("%s: xmit ring full, dropping packet.\n",
1043 dev->name));
1044 lp->stats.tx_dropped++;
1046 dev_kfree_skb(skb);
1047 } else {
1048 if (++lp->next_tx_cmd == TX_RING_SIZE)
1049 lp->next_tx_cmd = 0;
1050 tx_cmd->tbd = WSWAPtbd(virt_to_bus(tbd));
1051 tbd->next = I596_NULL;
1053 tx_cmd->cmd.command = CMD_FLEX | CmdTx;
1054 tx_cmd->skb = skb;
1056 tx_cmd->pad = 0;
1057 tx_cmd->size = 0;
1058 tbd->pad = 0;
1059 tbd->size = EOF | length;
1061 tbd->data = WSWAPchar(virt_to_bus(skb->data));
1063 #ifdef __mc68000__
1064 cache_push(virt_to_phys(skb->data), length);
1065 #endif
1066 DEB(DEB_TXADDR,print_eth(skb->data, "tx-queued"));
1067 i596_add_cmd(dev, &tx_cmd->cmd);
1069 lp->stats.tx_packets++;
1070 lp->stats.tx_bytes += length;
1073 netif_start_queue(dev);
1075 return 0;
1078 static void print_eth(unsigned char *add, char *str)
1080 int i;
1082 printk("i596 0x%p, ", add);
1083 for (i = 0; i < 6; i++)
1084 printk(" %02X", add[i + 6]);
1085 printk(" -->");
1086 for (i = 0; i < 6; i++)
1087 printk(" %02X", add[i]);
1088 printk(" %02X%02X, %s\n", add[12], add[13], str);
1091 int __init i82596_probe(struct net_device *dev)
1093 int i;
1094 struct i596_private *lp;
1095 char eth_addr[8];
1096 static int probed = 0;
1098 if (probed)
1099 return -ENODEV;
1100 probed++;
1101 #ifdef ENABLE_MVME16x_NET
1102 if (MACH_IS_MVME16x) {
1103 if (mvme16x_config & MVME16x_CONFIG_NO_ETHERNET) {
1104 printk("Ethernet probe disabled - chip not present\n");
1105 return -ENODEV;
1107 memcpy(eth_addr, (void *) 0xfffc1f2c, 6); /* YUCK! Get addr from NOVRAM */
1108 dev->base_addr = MVME_I596_BASE;
1109 dev->irq = (unsigned) MVME16x_IRQ_I596;
1111 #endif
1112 #ifdef ENABLE_BVME6000_NET
1113 if (MACH_IS_BVME6000) {
1114 volatile unsigned char *rtc = (unsigned char *) BVME_RTC_BASE;
1115 unsigned char msr = rtc[3];
1116 int i;
1118 rtc[3] |= 0x80;
1119 for (i = 0; i < 6; i++)
1120 eth_addr[i] = rtc[i * 4 + 7]; /* Stored in RTC RAM at offset 1 */
1121 rtc[3] = msr;
1122 dev->base_addr = BVME_I596_BASE;
1123 dev->irq = (unsigned) BVME_IRQ_I596;
1125 #endif
1126 #ifdef ENABLE_APRICOT
1128 int checksum = 0;
1129 int ioaddr = 0x300;
1131 /* this is easy the ethernet interface can only be at 0x300 */
1132 /* first check nothing is already registered here */
1134 if (check_region(ioaddr, I596_TOTAL_SIZE)) {
1135 printk("82596: IO address 0x%04x in use\n", ioaddr);
1136 return -ENODEV;
1139 for (i = 0; i < 8; i++) {
1140 eth_addr[i] = inb(ioaddr + 8 + i);
1141 checksum += eth_addr[i];
1144 /* checksum is a multiple of 0x100, got this wrong first time
1145 some machines have 0x100, some 0x200. The DOS driver doesn't
1146 even bother with the checksum */
1148 if (checksum % 0x100)
1149 return -ENODEV;
1151 /* Some other boards trip the checksum.. but then appear as
1152 * ether address 0. Trap these - AC */
1154 if (memcmp(eth_addr, "\x00\x00\x49", 3) != 0)
1155 return -ENODEV;
1157 if (!request_region(ioaddr, I596_TOTAL_SIZE, "i596"))
1158 return -ENODEV;
1160 dev->base_addr = ioaddr;
1161 dev->irq = 10;
1163 #endif
1164 dev->mem_start = (int)__get_free_pages(GFP_ATOMIC, 0);
1165 if (!dev->mem_start) {
1166 #ifdef ENABLE_APRICOT
1167 release_region(dev->base_addr, I596_TOTAL_SIZE);
1168 #endif
1169 return -ENOMEM;
1172 ether_setup(dev);
1173 DEB(DEB_PROBE,printk("%s: 82596 at %#3lx,", dev->name, dev->base_addr));
1175 for (i = 0; i < 6; i++)
1176 DEB(DEB_PROBE,printk(" %2.2X", dev->dev_addr[i] = eth_addr[i]));
1178 DEB(DEB_PROBE,printk(" IRQ %d.\n", dev->irq));
1180 DEB(DEB_PROBE,printk(version));
1182 /* The 82596-specific entries in the device structure. */
1183 dev->open = i596_open;
1184 dev->stop = i596_close;
1185 dev->hard_start_xmit = i596_start_xmit;
1186 dev->get_stats = i596_get_stats;
1187 dev->set_multicast_list = set_multicast_list;
1188 dev->tx_timeout = i596_tx_timeout;
1189 dev->watchdog_timeo = TX_TIMEOUT;
1191 dev->priv = (void *)(dev->mem_start);
1193 lp = (struct i596_private *) dev->priv;
1194 DEB(DEB_INIT,printk ("%s: lp at 0x%08lx (%d bytes), lp->scb at 0x%08lx\n",
1195 dev->name, (unsigned long)lp,
1196 sizeof(struct i596_private), (unsigned long)&lp->scb));
1197 memset((void *) lp, 0, sizeof(struct i596_private));
1199 #ifdef __mc68000__
1200 cache_push(virt_to_phys((void *)(dev->mem_start)), 4096);
1201 cache_clear(virt_to_phys((void *)(dev->mem_start)), 4096);
1202 kernel_set_cachemode((void *)(dev->mem_start), 4096, IOMAP_NOCACHE_SER);
1203 #endif
1204 lp->scb.command = 0;
1205 lp->scb.cmd = I596_NULL;
1206 lp->scb.rfd = I596_NULL;
1207 lp->lock = SPIN_LOCK_UNLOCKED;
1209 return 0;
1212 static void i596_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1214 struct net_device *dev = dev_id;
1215 struct i596_private *lp;
1216 short ioaddr;
1217 unsigned short status, ack_cmd = 0;
1219 #ifdef ENABLE_BVME6000_NET
1220 if (MACH_IS_BVME6000) {
1221 if (*(char *) BVME_LOCAL_IRQ_STAT & BVME_ETHERR) {
1222 i596_error(BVME_IRQ_I596, NULL, NULL);
1223 return;
1226 #endif
1227 if (dev == NULL) {
1228 printk("i596_interrupt(): irq %d for unknown device.\n", irq);
1229 return;
1232 ioaddr = dev->base_addr;
1233 lp = (struct i596_private *) dev->priv;
1235 spin_lock (&lp->lock);
1237 wait_cmd(dev,lp,100,"i596 interrupt, timeout");
1238 status = lp->scb.status;
1240 DEB(DEB_INTS,printk("%s: i596 interrupt, IRQ %d, status %4.4x.\n",
1241 dev->name, irq, status));
1243 ack_cmd = status & 0xf000;
1245 if ((status & 0x8000) || (status & 0x2000)) {
1246 struct i596_cmd *ptr;
1248 if ((status & 0x8000))
1249 DEB(DEB_INTS,printk("%s: i596 interrupt completed command.\n", dev->name));
1250 if ((status & 0x2000))
1251 DEB(DEB_INTS,printk("%s: i596 interrupt command unit inactive %x.\n", dev->name, status & 0x0700));
1253 while ((lp->cmd_head != I596_NULL) && (lp->cmd_head->status & STAT_C)) {
1254 ptr = lp->cmd_head;
1256 DEB(DEB_STATUS,printk("cmd_head->status = %04x, ->command = %04x\n",
1257 lp->cmd_head->status, lp->cmd_head->command));
1258 lp->cmd_head = ptr->v_next;
1259 lp->cmd_backlog--;
1261 switch ((ptr->command) & 0x7) {
1262 case CmdTx:
1264 struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
1265 struct sk_buff *skb = tx_cmd->skb;
1267 if ((ptr->status) & STAT_OK) {
1268 DEB(DEB_TXADDR,print_eth(skb->data, "tx-done"));
1269 } else {
1270 lp->stats.tx_errors++;
1271 if ((ptr->status) & 0x0020)
1272 lp->stats.collisions++;
1273 if (!((ptr->status) & 0x0040))
1274 lp->stats.tx_heartbeat_errors++;
1275 if ((ptr->status) & 0x0400)
1276 lp->stats.tx_carrier_errors++;
1277 if ((ptr->status) & 0x0800)
1278 lp->stats.collisions++;
1279 if ((ptr->status) & 0x1000)
1280 lp->stats.tx_aborted_errors++;
1283 dev_kfree_skb_irq(skb);
1285 tx_cmd->cmd.command = 0; /* Mark free */
1286 break;
1288 case CmdTDR:
1290 unsigned short status = ((struct tdr_cmd *)ptr)->status;
1292 if (status & 0x8000) {
1293 DEB(DEB_ANY,printk("%s: link ok.\n", dev->name));
1294 } else {
1295 if (status & 0x4000)
1296 printk("%s: Transceiver problem.\n", dev->name);
1297 if (status & 0x2000)
1298 printk("%s: Termination problem.\n", dev->name);
1299 if (status & 0x1000)
1300 printk("%s: Short circuit.\n", dev->name);
1302 DEB(DEB_TDR,printk("%s: Time %d.\n", dev->name, status & 0x07ff));
1304 break;
1306 case CmdConfigure:
1307 /* Zap command so set_multicast_list() knows it is free */
1308 ptr->command = 0;
1309 break;
1311 ptr->v_next = ptr->b_next = I596_NULL;
1312 lp->last_cmd = jiffies;
1315 ptr = lp->cmd_head;
1316 while ((ptr != I596_NULL) && (ptr != lp->cmd_tail)) {
1317 ptr->command &= 0x1fff;
1318 ptr = ptr->v_next;
1321 if ((lp->cmd_head != I596_NULL))
1322 ack_cmd |= CUC_START;
1323 lp->scb.cmd = WSWAPcmd(virt_to_bus(&lp->cmd_head->status));
1325 if ((status & 0x1000) || (status & 0x4000)) {
1326 if ((status & 0x4000))
1327 DEB(DEB_INTS,printk("%s: i596 interrupt received a frame.\n", dev->name));
1328 i596_rx(dev);
1329 /* Only RX_START if stopped - RGH 07-07-96 */
1330 if (status & 0x1000) {
1331 if (netif_running(dev)) {
1332 DEB(DEB_ERRORS,printk("%s: i596 interrupt receive unit inactive, status 0x%x\n", dev->name, status));
1333 ack_cmd |= RX_START;
1334 lp->stats.rx_errors++;
1335 lp->stats.rx_fifo_errors++;
1336 rebuild_rx_bufs(dev);
1340 wait_cmd(dev,lp,100,"i596 interrupt, timeout");
1341 lp->scb.command = ack_cmd;
1343 #ifdef ENABLE_MVME16x_NET
1344 if (MACH_IS_MVME16x) {
1345 /* Ack the interrupt */
1347 volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000;
1349 pcc2[0x2a] |= 0x08;
1351 #endif
1352 #ifdef ENABLE_BVME6000_NET
1353 if (MACH_IS_BVME6000) {
1354 volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG;
1356 *ethirq = 1;
1357 *ethirq = 3;
1359 #endif
1360 #ifdef ENABLE_APRICOT
1361 (void) inb(ioaddr + 0x10);
1362 outb(4, ioaddr + 0xf);
1363 #endif
1364 CA(dev);
1366 DEB(DEB_INTS,printk("%s: exiting interrupt.\n", dev->name));
1368 spin_unlock (&lp->lock);
1369 return;
1372 static int i596_close(struct net_device *dev)
1374 struct i596_private *lp = (struct i596_private *) dev->priv;
1375 unsigned long flags;
1377 netif_stop_queue(dev);
1379 DEB(DEB_INIT,printk("%s: Shutting down ethercard, status was %4.4x.\n",
1380 dev->name, lp->scb.status));
1382 save_flags(flags);
1383 cli();
1385 wait_cmd(dev,lp,100,"close1 timed out");
1386 lp->scb.command = CUC_ABORT | RX_ABORT;
1387 CA(dev);
1389 wait_cmd(dev,lp,100,"close2 timed out");
1390 restore_flags(flags);
1391 DEB(DEB_STRUCT,i596_display_data(dev));
1392 i596_cleanup_cmd(dev,lp);
1394 #ifdef ENABLE_MVME16x_NET
1395 if (MACH_IS_MVME16x) {
1396 volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000;
1398 /* Disable all ints */
1399 pcc2[0x28] = 1;
1400 pcc2[0x2a] = 0x40;
1401 pcc2[0x2b] = 0x40; /* Set snooping bits now! */
1403 #endif
1404 #ifdef ENABLE_BVME6000_NET
1405 if (MACH_IS_BVME6000) {
1406 volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG;
1408 *ethirq = 1;
1410 #endif
1412 free_irq(dev->irq, dev);
1413 remove_rx_bufs(dev);
1414 MOD_DEC_USE_COUNT;
1416 return 0;
1419 static struct net_device_stats *
1420 i596_get_stats(struct net_device *dev)
1422 struct i596_private *lp = (struct i596_private *) dev->priv;
1424 return &lp->stats;
1428 * Set or clear the multicast filter for this adaptor.
1431 static void set_multicast_list(struct net_device *dev)
1433 struct i596_private *lp = (struct i596_private *) dev->priv;
1434 int config = 0, cnt;
1436 DEB(DEB_MULTI,printk("%s: set multicast list, %d entries, promisc %s, allmulti %s\n", dev->name, dev->mc_count, dev->flags & IFF_PROMISC ? "ON" : "OFF", dev->flags & IFF_ALLMULTI ? "ON" : "OFF"));
1438 if ((dev->flags & IFF_PROMISC) && !(lp->cf_cmd.i596_config[8] & 0x01)) {
1439 lp->cf_cmd.i596_config[8] |= 0x01;
1440 config = 1;
1442 if (!(dev->flags & IFF_PROMISC) && (lp->cf_cmd.i596_config[8] & 0x01)) {
1443 lp->cf_cmd.i596_config[8] &= ~0x01;
1444 config = 1;
1446 if ((dev->flags & IFF_ALLMULTI) && (lp->cf_cmd.i596_config[11] & 0x20)) {
1447 lp->cf_cmd.i596_config[11] &= ~0x20;
1448 config = 1;
1450 if (!(dev->flags & IFF_ALLMULTI) && !(lp->cf_cmd.i596_config[11] & 0x20)) {
1451 lp->cf_cmd.i596_config[11] |= 0x20;
1452 config = 1;
1454 if (config) {
1455 if (lp->cf_cmd.cmd.command)
1456 printk("%s: config change request already queued\n",
1457 dev->name);
1458 else {
1459 lp->cf_cmd.cmd.command = CmdConfigure;
1460 i596_add_cmd(dev, &lp->cf_cmd.cmd);
1464 cnt = dev->mc_count;
1465 if (cnt > MAX_MC_CNT)
1467 cnt = MAX_MC_CNT;
1468 printk("%s: Only %d multicast addresses supported",
1469 dev->name, cnt);
1472 if (dev->mc_count > 0) {
1473 struct dev_mc_list *dmi;
1474 unsigned char *cp;
1475 struct mc_cmd *cmd;
1477 cmd = &lp->mc_cmd;
1478 cmd->cmd.command = CmdMulticastList;
1479 cmd->mc_cnt = dev->mc_count * 6;
1480 cp = cmd->mc_addrs;
1481 for (dmi = dev->mc_list; cnt && dmi != NULL; dmi = dmi->next, cnt--, cp += 6) {
1482 memcpy(cp, dmi->dmi_addr, 6);
1483 if (i596_debug > 1)
1484 DEB(DEB_MULTI,printk("%s: Adding address %02x:%02x:%02x:%02x:%02x:%02x\n",
1485 dev->name, cp[0],cp[1],cp[2],cp[3],cp[4],cp[5]));
1487 i596_add_cmd(dev, &cmd->cmd);
1491 #ifdef MODULE
1492 static struct net_device dev_82596 = { init: i82596_probe };
1494 #ifdef ENABLE_APRICOT
1495 static int io = 0x300;
1496 static int irq = 10;
1497 MODULE_PARM(irq, "i");
1498 #endif
1500 MODULE_PARM(debug, "i");
1501 static int debug = -1;
1503 int init_module(void)
1505 #ifdef ENABLE_APRICOT
1506 dev_82596.base_addr = io;
1507 dev_82596.irq = irq;
1508 #endif
1509 if (debug >= 0)
1510 i596_debug = debug;
1511 if (register_netdev(&dev_82596) != 0)
1512 return -EIO;
1513 return 0;
1516 void cleanup_module(void)
1518 unregister_netdev(&dev_82596);
1519 #ifdef __mc68000__
1520 /* XXX This assumes default cache mode to be IOMAP_FULL_CACHING,
1521 * XXX which may be invalid (CONFIG_060_WRITETHROUGH)
1524 kernel_set_cachemode((void *)(dev_82596.mem_start), 4096,
1525 IOMAP_FULL_CACHING);
1526 #endif
1527 free_page ((u32)(dev_82596.mem_start));
1528 dev_82596.priv = NULL;
1529 #ifdef ENABLE_APRICOT
1530 /* If we don't do this, we can't re-insmod it later. */
1531 release_region(dev_82596.base_addr, I596_TOTAL_SIZE);
1532 #endif
1535 #endif /* MODULE */
1538 * Local variables:
1539 * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c 82596.c"
1540 * End: