[PATCH] DVB core update
[linux-2.6/history.git] / drivers / net / lasi_82596.c
blobc215fe3c1ecb1be314d306425057199915d653f8
1 /* lasi_82596.c -- driver for the intel 82596 ethernet controller, as
2 munged into HPPA boxen .
4 This driver is based upon 82596.c, original credits are below...
5 but there were too many hoops which HP wants jumped through to
6 keep this code in there in a sane manner.
8 3 primary sources of the mess --
9 1) hppa needs *lots* of cacheline flushing to keep this kind of
10 MMIO running.
12 2) The 82596 needs to see all of its pointers as their physical
13 address. Thus virt_to_bus/bus_to_virt are *everywhere*.
15 3) The implementation HP is using seems to be significantly pickier
16 about when and how the command and RX units are started. some
17 command ordering was changed.
19 Examination of the mach driver leads one to believe that there
20 might be a saner way to pull this off... anyone who feels like a
21 full rewrite can be my guest.
23 Split 02/13/2000 Sam Creasey (sammy@oh.verio.com)
25 02/01/2000 Initial modifications for parisc by Helge Deller (deller@gmx.de)
26 03/02/2000 changes for better/correct(?) cache-flushing (deller)
29 /* 82596.c: A generic 82596 ethernet driver for linux. */
31 Based on Apricot.c
32 Written 1994 by Mark Evans.
33 This driver is for the Apricot 82596 bus-master interface
35 Modularised 12/94 Mark Evans
38 Modified to support the 82596 ethernet chips on 680x0 VME boards.
39 by Richard Hirst <richard@sleepie.demon.co.uk>
40 Renamed to be 82596.c
42 980825: Changed to receive directly in to sk_buffs which are
43 allocated at open() time. Eliminates copy on incoming frames
44 (small ones are still copied). Shared data now held in a
45 non-cached page, so we can run on 68060 in copyback mode.
47 TBD:
48 * look at deferring rx frames rather than discarding (as per tulip)
49 * handle tx ring full as per tulip
50 * performace test to tune rx_copybreak
52 Most of my modifications relate to the braindead big-endian
53 implementation by Intel. When the i596 is operating in
54 'big-endian' mode, it thinks a 32 bit value of 0x12345678
55 should be stored as 0x56781234. This is a real pain, when
56 you have linked lists which are shared by the 680x0 and the
57 i596.
59 Driver skeleton
60 Written 1993 by Donald Becker.
61 Copyright 1993 United States Government as represented by the Director,
62 National Security Agency. This software may only be used and distributed
63 according to the terms of the GNU General Public License as modified by SRC,
64 incorporated herein by reference.
66 The author may be reached as becker@scyld.com, or C/O
67 Scyld Computing Corporation, 410 Severn Ave., Suite 210, Annapolis MD 21403
71 #include <linux/module.h>
72 #include <linux/kernel.h>
73 #include <linux/string.h>
74 #include <linux/ptrace.h>
75 #include <linux/errno.h>
76 #include <linux/ioport.h>
77 #include <linux/slab.h>
78 #include <linux/interrupt.h>
79 #include <linux/delay.h>
80 #include <linux/netdevice.h>
81 #include <linux/etherdevice.h>
82 #include <linux/skbuff.h>
83 #include <linux/init.h>
84 #include <linux/pci.h>
85 #include <linux/types.h>
87 #include <asm/bitops.h>
88 #include <asm/io.h>
89 #include <asm/pgtable.h>
90 #include <asm/pgalloc.h>
91 #include <asm/irq.h>
92 #include <asm/pdc.h>
93 #include <asm/cache.h>
94 #include <asm/parisc-device.h>
96 static char version[] __devinitdata =
97 "82596.c $Revision: 1.29 $\n";
99 /* DEBUG flags
102 #define DEB_INIT 0x0001
103 #define DEB_PROBE 0x0002
104 #define DEB_SERIOUS 0x0004
105 #define DEB_ERRORS 0x0008
106 #define DEB_MULTI 0x0010
107 #define DEB_TDR 0x0020
108 #define DEB_OPEN 0x0040
109 #define DEB_RESET 0x0080
110 #define DEB_ADDCMD 0x0100
111 #define DEB_STATUS 0x0200
112 #define DEB_STARTTX 0x0400
113 #define DEB_RXADDR 0x0800
114 #define DEB_TXADDR 0x1000
115 #define DEB_RXFRAME 0x2000
116 #define DEB_INTS 0x4000
117 #define DEB_STRUCT 0x8000
118 #define DEB_ANY 0xffff
121 #define DEB(x,y) if (i596_debug & (x)) { y; }
124 #define CHECK_WBACK(addr,len) \
125 do { dma_cache_sync((void *)addr, len, DMA_TO_DEVICE); } while (0)
127 #define CHECK_INV(addr,len) \
128 do { dma_cache_sync((void *)addr,len, DMA_FROM_DEVICE); } while(0)
130 #define CHECK_WBACK_INV(addr,len) \
131 do { dma_cache_sync((void *)addr,len, DMA_BIDIRECTIONAL); } while (0)
134 #define PA_I82596_RESET 0 /* Offsets relative to LASI-LAN-Addr.*/
135 #define PA_CPU_PORT_L_ACCESS 4
136 #define PA_CHANNEL_ATTENTION 8
140 * Define various macros for Channel Attention, word swapping etc., dependent
141 * on architecture. MVME and BVME are 680x0 based, otherwise it is Intel.
144 #ifdef __BIG_ENDIAN
145 #define WSWAPrfd(x) (((u32)(x)<<16) | ((((u32)(x)))>>16))
146 #define WSWAPrbd(x) (((u32)(x)<<16) | ((((u32)(x)))>>16))
147 #define WSWAPiscp(x) (((u32)(x)<<16) | ((((u32)(x)))>>16))
148 #define WSWAPscb(x) (((u32)(x)<<16) | ((((u32)(x)))>>16))
149 #define WSWAPcmd(x) (((u32)(x)<<16) | ((((u32)(x)))>>16))
150 #define WSWAPtbd(x) (((u32)(x)<<16) | ((((u32)(x)))>>16))
151 #define WSWAPchar(x) (((u32)(x)<<16) | ((((u32)(x)))>>16))
152 #define ISCP_BUSY 0x00010000
153 #define MACH_IS_APRICOT 0
154 #else
155 #define WSWAPrfd(x) ((struct i596_rfd *)(x))
156 #define WSWAPrbd(x) ((struct i596_rbd *)(x))
157 #define WSWAPiscp(x) ((struct i596_iscp *)(x))
158 #define WSWAPscb(x) ((struct i596_scb *)(x))
159 #define WSWAPcmd(x) ((struct i596_cmd *)(x))
160 #define WSWAPtbd(x) ((struct i596_tbd *)(x))
161 #define WSWAPchar(x) ((char *)(x))
162 #define ISCP_BUSY 0x0001
163 #define MACH_IS_APRICOT 1
164 #endif
167 * The MPU_PORT command allows direct access to the 82596. With PORT access
168 * the following commands are available (p5-18). The 32-bit port command
169 * must be word-swapped with the most significant word written first.
170 * This only applies to VME boards.
172 #define PORT_RESET 0x00 /* reset 82596 */
173 #define PORT_SELFTEST 0x01 /* selftest */
174 #define PORT_ALTSCP 0x02 /* alternate SCB address */
175 #define PORT_ALTDUMP 0x03 /* Alternate DUMP address */
177 static int i596_debug = (DEB_SERIOUS|DEB_PROBE);
179 MODULE_AUTHOR("Richard Hirst");
180 MODULE_DESCRIPTION("i82596 driver");
181 MODULE_LICENSE("GPL");
182 MODULE_PARM(i596_debug, "i");
183 MODULE_PARM_DESC(i596_debug, "lasi_82596 debug mask");
185 /* Copy frames shorter than rx_copybreak, otherwise pass on up in
186 * a full sized sk_buff. Value of 100 stolen from tulip.c (!alpha).
188 static int rx_copybreak = 100;
190 #define MAX_DRIVERS 4 /* max count of drivers */
192 #define PKT_BUF_SZ 1536
193 #define MAX_MC_CNT 64
195 #define I596_NULL ((u32)0xffffffff)
197 #define CMD_EOL 0x8000 /* The last command of the list, stop. */
198 #define CMD_SUSP 0x4000 /* Suspend after doing cmd. */
199 #define CMD_INTR 0x2000 /* Interrupt after doing cmd. */
201 #define CMD_FLEX 0x0008 /* Enable flexible memory model */
203 enum commands {
204 CmdNOp = 0, CmdSASetup = 1, CmdConfigure = 2, CmdMulticastList = 3,
205 CmdTx = 4, CmdTDR = 5, CmdDump = 6, CmdDiagnose = 7
208 #define STAT_C 0x8000 /* Set to 0 after execution */
209 #define STAT_B 0x4000 /* Command being executed */
210 #define STAT_OK 0x2000 /* Command executed ok */
211 #define STAT_A 0x1000 /* Command aborted */
213 #define CUC_START 0x0100
214 #define CUC_RESUME 0x0200
215 #define CUC_SUSPEND 0x0300
216 #define CUC_ABORT 0x0400
217 #define RX_START 0x0010
218 #define RX_RESUME 0x0020
219 #define RX_SUSPEND 0x0030
220 #define RX_ABORT 0x0040
222 #define TX_TIMEOUT 5
224 #define OPT_SWAP_PORT 0x0001 /* Need to wordswp on the MPU port */
227 struct i596_reg {
228 unsigned short porthi;
229 unsigned short portlo;
230 u32 ca;
233 #define EOF 0x8000
234 #define SIZE_MASK 0x3fff
236 struct i596_tbd {
237 unsigned short size;
238 unsigned short pad;
239 dma_addr_t next;
240 dma_addr_t data;
241 u32 cache_pad[5]; /* Total 32 bytes... */
244 /* The command structure has two 'next' pointers; v_next is the address of
245 * the next command as seen by the CPU, b_next is the address of the next
246 * command as seen by the 82596. The b_next pointer, as used by the 82596
247 * always references the status field of the next command, rather than the
248 * v_next field, because the 82596 is unaware of v_next. It may seem more
249 * logical to put v_next at the end of the structure, but we cannot do that
250 * because the 82596 expects other fields to be there, depending on command
251 * type.
254 struct i596_cmd {
255 struct i596_cmd *v_next; /* Address from CPUs viewpoint */
256 unsigned short status;
257 unsigned short command;
258 dma_addr_t b_next; /* Address from i596 viewpoint */
261 struct tx_cmd {
262 struct i596_cmd cmd;
263 dma_addr_t tbd;
264 unsigned short size;
265 unsigned short pad;
266 struct sk_buff *skb; /* So we can free it after tx */
267 dma_addr_t dma_addr;
268 #ifdef __LP64__
269 u32 cache_pad[6]; /* Total 64 bytes... */
270 #else
271 u32 cache_pad[1]; /* Total 32 bytes... */
272 #endif
275 struct tdr_cmd {
276 struct i596_cmd cmd;
277 unsigned short status;
278 unsigned short pad;
281 struct mc_cmd {
282 struct i596_cmd cmd;
283 short mc_cnt;
284 char mc_addrs[MAX_MC_CNT*6];
287 struct sa_cmd {
288 struct i596_cmd cmd;
289 char eth_addr[8];
292 struct cf_cmd {
293 struct i596_cmd cmd;
294 char i596_config[16];
297 struct i596_rfd {
298 unsigned short stat;
299 unsigned short cmd;
300 dma_addr_t b_next; /* Address from i596 viewpoint */
301 dma_addr_t rbd;
302 unsigned short count;
303 unsigned short size;
304 struct i596_rfd *v_next; /* Address from CPUs viewpoint */
305 struct i596_rfd *v_prev;
306 #ifndef __LP64__
307 u32 cache_pad[2]; /* Total 32 bytes... */
308 #endif
311 struct i596_rbd {
312 /* hardware data */
313 unsigned short count;
314 unsigned short zero1;
315 dma_addr_t b_next;
316 dma_addr_t b_data; /* Address from i596 viewpoint */
317 unsigned short size;
318 unsigned short zero2;
319 /* driver data */
320 struct sk_buff *skb;
321 struct i596_rbd *v_next;
322 dma_addr_t b_addr; /* This rbd addr from i596 view */
323 unsigned char *v_data; /* Address from CPUs viewpoint */
324 /* Total 32 bytes... */
325 #ifdef __LP64__
326 u32 cache_pad[4];
327 #endif
330 /* These values as chosen so struct i596_private fits in one page... */
332 #define TX_RING_SIZE 32
333 #define RX_RING_SIZE 16
335 struct i596_scb {
336 unsigned short status;
337 unsigned short command;
338 dma_addr_t cmd;
339 dma_addr_t rfd;
340 u32 crc_err;
341 u32 align_err;
342 u32 resource_err;
343 u32 over_err;
344 u32 rcvdt_err;
345 u32 short_err;
346 unsigned short t_on;
347 unsigned short t_off;
350 struct i596_iscp {
351 u32 stat;
352 dma_addr_t scb;
355 struct i596_scp {
356 u32 sysbus;
357 u32 pad;
358 dma_addr_t iscp;
361 struct i596_private {
362 volatile struct i596_scp scp __attribute__((aligned(32)));
363 volatile struct i596_iscp iscp __attribute__((aligned(32)));
364 volatile struct i596_scb scb __attribute__((aligned(32)));
365 struct sa_cmd sa_cmd __attribute__((aligned(32)));
366 struct cf_cmd cf_cmd __attribute__((aligned(32)));
367 struct tdr_cmd tdr_cmd __attribute__((aligned(32)));
368 struct mc_cmd mc_cmd __attribute__((aligned(32)));
369 struct i596_rfd rfds[RX_RING_SIZE] __attribute__((aligned(32)));
370 struct i596_rbd rbds[RX_RING_SIZE] __attribute__((aligned(32)));
371 struct tx_cmd tx_cmds[TX_RING_SIZE] __attribute__((aligned(32)));
372 struct i596_tbd tbds[TX_RING_SIZE] __attribute__((aligned(32)));
373 u32 stat;
374 int last_restart;
375 struct i596_rfd *rfd_head;
376 struct i596_rbd *rbd_head;
377 struct i596_cmd *cmd_tail;
378 struct i596_cmd *cmd_head;
379 int cmd_backlog;
380 u32 last_cmd;
381 struct net_device_stats stats;
382 int next_tx_cmd;
383 int options;
384 spinlock_t lock;
385 dma_addr_t dma_addr;
386 struct device *dev;
389 static char init_setup[] =
391 0x8E, /* length, prefetch on */
392 0xC8, /* fifo to 8, monitor off */
393 0x80, /* don't save bad frames */
394 0x2E, /* No source address insertion, 8 byte preamble */
395 0x00, /* priority and backoff defaults */
396 0x60, /* interframe spacing */
397 0x00, /* slot time LSB */
398 0xf2, /* slot time and retries */
399 0x00, /* promiscuous mode */
400 0x00, /* collision detect */
401 0x40, /* minimum frame length */
402 0xff,
403 0x00,
404 0x7f /* *multi IA */ };
406 static int i596_open(struct net_device *dev);
407 static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
408 static irqreturn_t i596_interrupt(int irq, void *dev_id, struct pt_regs *regs);
409 static int i596_close(struct net_device *dev);
410 static struct net_device_stats *i596_get_stats(struct net_device *dev);
411 static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd);
412 static void i596_tx_timeout (struct net_device *dev);
413 static void print_eth(unsigned char *buf, char *str);
414 static void set_multicast_list(struct net_device *dev);
416 static int rx_ring_size = RX_RING_SIZE;
417 static int ticks_limit = 100;
418 static int max_cmd_backlog = TX_RING_SIZE-1;
421 static inline void CA(struct net_device *dev)
423 gsc_writel(0, dev->base_addr + PA_CHANNEL_ATTENTION);
427 static inline void MPU_PORT(struct net_device *dev, int c, dma_addr_t x)
429 struct i596_private *lp = (struct i596_private *) dev->priv;
431 u32 v = (u32) (c) | (u32) (x);
432 u16 a, b;
434 if (lp->options & OPT_SWAP_PORT) {
435 a = v >> 16;
436 b = v & 0xffff;
437 } else {
438 a = v & 0xffff;
439 b = v >> 16;
442 gsc_writel(a, dev->base_addr + PA_CPU_PORT_L_ACCESS);
443 udelay(1);
444 gsc_writel(b, dev->base_addr + PA_CPU_PORT_L_ACCESS);
448 static inline int wait_istat(struct net_device *dev, struct i596_private *lp, int delcnt, char *str)
450 CHECK_INV(&(lp->iscp), sizeof(struct i596_iscp));
451 while (--delcnt && lp->iscp.stat) {
452 udelay(10);
453 CHECK_INV(&(lp->iscp), sizeof(struct i596_iscp));
455 if (!delcnt) {
456 printk("%s: %s, iscp.stat %04x, didn't clear\n",
457 dev->name, str, lp->iscp.stat);
458 return -1;
460 else
461 return 0;
465 static inline int wait_cmd(struct net_device *dev, struct i596_private *lp, int delcnt, char *str)
467 CHECK_INV(&(lp->scb), sizeof(struct i596_scb));
468 while (--delcnt && lp->scb.command) {
469 udelay(10);
470 CHECK_INV(&(lp->scb), sizeof(struct i596_scb));
472 if (!delcnt) {
473 printk("%s: %s, status %4.4x, cmd %4.4x.\n",
474 dev->name, str, lp->scb.status, lp->scb.command);
475 return -1;
477 else
478 return 0;
482 static void i596_display_data(struct net_device *dev)
484 struct i596_private *lp = (struct i596_private *) dev->priv;
485 struct i596_cmd *cmd;
486 struct i596_rfd *rfd;
487 struct i596_rbd *rbd;
489 printk("lp and scp at %p, .sysbus = %08x, .iscp = %08x\n",
490 &lp->scp, lp->scp.sysbus, lp->scp.iscp);
491 printk("iscp at %p, iscp.stat = %08x, .scb = %08x\n",
492 &lp->iscp, lp->iscp.stat, lp->iscp.scb);
493 printk("scb at %p, scb.status = %04x, .command = %04x,"
494 " .cmd = %08x, .rfd = %08x\n",
495 &lp->scb, lp->scb.status, lp->scb.command,
496 lp->scb.cmd, lp->scb.rfd);
497 printk(" errors: crc %x, align %x, resource %x,"
498 " over %x, rcvdt %x, short %x\n",
499 lp->scb.crc_err, lp->scb.align_err, lp->scb.resource_err,
500 lp->scb.over_err, lp->scb.rcvdt_err, lp->scb.short_err);
501 cmd = lp->cmd_head;
502 while (cmd != NULL) {
503 printk("cmd at %p, .status = %04x, .command = %04x, .b_next = %08x\n",
504 cmd, cmd->status, cmd->command, cmd->b_next);
505 cmd = cmd->v_next;
507 rfd = lp->rfd_head;
508 printk("rfd_head = %p\n", rfd);
509 do {
510 printk (" %p .stat %04x, .cmd %04x, b_next %08x, rbd %08x,"
511 " count %04x\n",
512 rfd, rfd->stat, rfd->cmd, rfd->b_next, rfd->rbd,
513 rfd->count);
514 rfd = rfd->v_next;
515 } while (rfd != lp->rfd_head);
516 rbd = lp->rbd_head;
517 printk("rbd_head = %p\n", rbd);
518 do {
519 printk(" %p .count %04x, b_next %08x, b_data %08x, size %04x\n",
520 rbd, rbd->count, rbd->b_next, rbd->b_data, rbd->size);
521 rbd = rbd->v_next;
522 } while (rbd != lp->rbd_head);
523 CHECK_INV(lp, sizeof(struct i596_private));
527 #if defined(ENABLE_MVME16x_NET) || defined(ENABLE_BVME6000_NET)
528 static void i596_error(int irq, void *dev_id, struct pt_regs *regs)
530 struct net_device *dev = dev_id;
531 volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000;
533 pcc2[0x28] = 1;
534 pcc2[0x2b] = 0x1d;
535 printk("%s: Error interrupt\n", dev->name);
536 i596_display_data(dev);
538 #endif
540 #define virt_to_dma(lp,v) ((lp)->dma_addr + (dma_addr_t)((unsigned long)(v)-(unsigned long)(lp)))
542 static inline void init_rx_bufs(struct net_device *dev)
544 struct i596_private *lp = (struct i596_private *)dev->priv;
545 int i;
546 struct i596_rfd *rfd;
547 struct i596_rbd *rbd;
549 /* First build the Receive Buffer Descriptor List */
551 for (i = 0, rbd = lp->rbds; i < rx_ring_size; i++, rbd++) {
552 dma_addr_t dma_addr;
553 struct sk_buff *skb = dev_alloc_skb(PKT_BUF_SZ + 4);
555 if (skb == NULL)
556 panic("82596: alloc_skb() failed");
557 skb_reserve(skb, 2);
558 dma_addr = dma_map_single(lp->dev, skb->tail,PKT_BUF_SZ,
559 DMA_FROM_DEVICE);
560 skb->dev = dev;
561 rbd->v_next = rbd+1;
562 rbd->b_next = WSWAPrbd(virt_to_dma(lp,rbd+1));
563 rbd->b_addr = WSWAPrbd(virt_to_dma(lp,rbd));
564 rbd->skb = skb;
565 rbd->v_data = skb->tail;
566 rbd->b_data = WSWAPchar(dma_addr);
567 rbd->size = PKT_BUF_SZ;
569 lp->rbd_head = lp->rbds;
570 rbd = lp->rbds + rx_ring_size - 1;
571 rbd->v_next = lp->rbds;
572 rbd->b_next = WSWAPrbd(virt_to_dma(lp,lp->rbds));
574 /* Now build the Receive Frame Descriptor List */
576 for (i = 0, rfd = lp->rfds; i < rx_ring_size; i++, rfd++) {
577 rfd->rbd = I596_NULL;
578 rfd->v_next = rfd+1;
579 rfd->v_prev = rfd-1;
580 rfd->b_next = WSWAPrfd(virt_to_dma(lp,rfd+1));
581 rfd->cmd = CMD_FLEX;
583 lp->rfd_head = lp->rfds;
584 lp->scb.rfd = WSWAPrfd(virt_to_dma(lp,lp->rfds));
585 rfd = lp->rfds;
586 rfd->rbd = WSWAPrbd(virt_to_dma(lp,lp->rbd_head));
587 rfd->v_prev = lp->rfds + rx_ring_size - 1;
588 rfd = lp->rfds + rx_ring_size - 1;
589 rfd->v_next = lp->rfds;
590 rfd->b_next = WSWAPrfd(virt_to_dma(lp,lp->rfds));
591 rfd->cmd = CMD_EOL|CMD_FLEX;
593 CHECK_WBACK_INV(lp, sizeof(struct i596_private));
596 static inline void remove_rx_bufs(struct net_device *dev)
598 struct i596_private *lp = (struct i596_private *)dev->priv;
599 struct i596_rbd *rbd;
600 int i;
602 for (i = 0, rbd = lp->rbds; i < rx_ring_size; i++, rbd++) {
603 if (rbd->skb == NULL)
604 break;
605 dma_unmap_single(lp->dev,
606 (dma_addr_t)WSWAPchar(rbd->b_data),
607 PKT_BUF_SZ, DMA_FROM_DEVICE);
608 dev_kfree_skb(rbd->skb);
613 static void rebuild_rx_bufs(struct net_device *dev)
615 struct i596_private *lp = (struct i596_private *) dev->priv;
616 int i;
618 /* Ensure rx frame/buffer descriptors are tidy */
620 for (i = 0; i < rx_ring_size; i++) {
621 lp->rfds[i].rbd = I596_NULL;
622 lp->rfds[i].cmd = CMD_FLEX;
624 lp->rfds[rx_ring_size-1].cmd = CMD_EOL|CMD_FLEX;
625 lp->rfd_head = lp->rfds;
626 lp->scb.rfd = WSWAPrfd(virt_to_dma(lp,lp->rfds));
627 lp->rbd_head = lp->rbds;
628 lp->rfds[0].rbd = WSWAPrbd(virt_to_dma(lp,lp->rbds));
630 CHECK_WBACK_INV(lp, sizeof(struct i596_private));
634 static int init_i596_mem(struct net_device *dev)
636 struct i596_private *lp = (struct i596_private *) dev->priv;
637 unsigned long flags;
639 disable_irq(dev->irq); /* disable IRQs from LAN */
640 DEB(DEB_INIT,
641 printk("RESET 82596 port: %08lX (with IRQ%d disabled)\n",
642 dev->base_addr + PA_I82596_RESET,
643 dev->irq));
645 gsc_writel(0, (void*)(dev->base_addr + PA_I82596_RESET)); /* Hard Reset */
646 udelay(100); /* Wait 100us - seems to help */
648 /* change the scp address */
650 lp->last_cmd = jiffies;
653 lp->scp.sysbus = 0x0000006c;
654 lp->scp.iscp = WSWAPiscp(virt_to_dma(lp,&(lp->iscp)));
655 lp->iscp.scb = WSWAPscb(virt_to_dma(lp,&(lp->scb)));
656 lp->iscp.stat = ISCP_BUSY;
657 lp->cmd_backlog = 0;
659 lp->cmd_head = NULL;
660 lp->scb.cmd = I596_NULL;
662 DEB(DEB_INIT,printk("%s: starting i82596.\n", dev->name));
664 CHECK_WBACK(&(lp->scp), sizeof(struct i596_scp));
665 CHECK_WBACK(&(lp->iscp), sizeof(struct i596_iscp));
667 MPU_PORT(dev, PORT_ALTSCP, virt_to_dma(lp,&lp->scp));
669 CA(dev);
671 if (wait_istat(dev,lp,1000,"initialization timed out"))
672 goto failed;
673 DEB(DEB_INIT,printk("%s: i82596 initialization successful\n", dev->name));
675 /* Ensure rx frame/buffer descriptors are tidy */
676 rebuild_rx_bufs(dev);
678 lp->scb.command = 0;
679 CHECK_WBACK(&(lp->scb), sizeof(struct i596_scb));
681 enable_irq(dev->irq); /* enable IRQs from LAN */
683 DEB(DEB_INIT,printk("%s: queuing CmdConfigure\n", dev->name));
684 memcpy(lp->cf_cmd.i596_config, init_setup, 14);
685 lp->cf_cmd.cmd.command = CmdConfigure;
686 CHECK_WBACK(&(lp->cf_cmd), sizeof(struct cf_cmd));
687 i596_add_cmd(dev, &lp->cf_cmd.cmd);
689 DEB(DEB_INIT,printk("%s: queuing CmdSASetup\n", dev->name));
690 memcpy(lp->sa_cmd.eth_addr, dev->dev_addr, 6);
691 lp->sa_cmd.cmd.command = CmdSASetup;
692 CHECK_WBACK(&(lp->sa_cmd), sizeof(struct sa_cmd));
693 i596_add_cmd(dev, &lp->sa_cmd.cmd);
695 DEB(DEB_INIT,printk("%s: queuing CmdTDR\n", dev->name));
696 lp->tdr_cmd.cmd.command = CmdTDR;
697 CHECK_WBACK(&(lp->tdr_cmd), sizeof(struct tdr_cmd));
698 i596_add_cmd(dev, &lp->tdr_cmd.cmd);
700 spin_lock_irqsave (&lp->lock, flags);
702 if (wait_cmd(dev,lp,1000,"timed out waiting to issue RX_START")) {
703 spin_unlock_irqrestore (&lp->lock, flags);
704 goto failed;
706 DEB(DEB_INIT,printk("%s: Issuing RX_START\n", dev->name));
707 lp->scb.command = RX_START;
708 lp->scb.rfd = WSWAPrfd(virt_to_dma(lp,lp->rfds));
709 CHECK_WBACK(&(lp->scb), sizeof(struct i596_scb));
711 CA(dev);
713 spin_unlock_irqrestore (&lp->lock, flags);
715 if (wait_cmd(dev,lp,1000,"RX_START not processed"))
716 goto failed;
717 DEB(DEB_INIT,printk("%s: Receive unit started OK\n", dev->name));
719 return 0;
721 failed:
722 printk("%s: Failed to initialise 82596\n", dev->name);
723 MPU_PORT(dev, PORT_RESET, 0);
724 return -1;
728 static inline int i596_rx(struct net_device *dev)
730 struct i596_private *lp = (struct i596_private *)dev->priv;
731 struct i596_rfd *rfd;
732 struct i596_rbd *rbd;
733 int frames = 0;
735 DEB(DEB_RXFRAME,printk ("i596_rx(), rfd_head %p, rbd_head %p\n",
736 lp->rfd_head, lp->rbd_head));
739 rfd = lp->rfd_head; /* Ref next frame to check */
741 CHECK_INV(rfd, sizeof(struct i596_rfd));
742 while ((rfd->stat) & STAT_C) { /* Loop while complete frames */
743 if (rfd->rbd == I596_NULL)
744 rbd = NULL;
745 else if (rfd->rbd == lp->rbd_head->b_addr) {
746 rbd = lp->rbd_head;
747 CHECK_INV(rbd, sizeof(struct i596_rbd));
749 else {
750 printk("%s: rbd chain broken!\n", dev->name);
751 /* XXX Now what? */
752 rbd = NULL;
754 DEB(DEB_RXFRAME, printk(" rfd %p, rfd.rbd %08x, rfd.stat %04x\n",
755 rfd, rfd->rbd, rfd->stat));
757 if (rbd != NULL && ((rfd->stat) & STAT_OK)) {
758 /* a good frame */
759 int pkt_len = rbd->count & 0x3fff;
760 struct sk_buff *skb = rbd->skb;
761 int rx_in_place = 0;
763 DEB(DEB_RXADDR,print_eth(rbd->v_data, "received"));
764 frames++;
766 /* Check if the packet is long enough to just accept
767 * without copying to a properly sized skbuff.
770 if (pkt_len > rx_copybreak) {
771 struct sk_buff *newskb;
772 dma_addr_t dma_addr;
774 dma_unmap_single(lp->dev,(dma_addr_t)WSWAPchar(rbd->b_data), PKT_BUF_SZ, DMA_FROM_DEVICE);
775 /* Get fresh skbuff to replace filled one. */
776 newskb = dev_alloc_skb(PKT_BUF_SZ + 4);
777 if (newskb == NULL) {
778 skb = NULL; /* drop pkt */
779 goto memory_squeeze;
781 skb_reserve(newskb, 2);
783 /* Pass up the skb already on the Rx ring. */
784 skb_put(skb, pkt_len);
785 rx_in_place = 1;
786 rbd->skb = newskb;
787 newskb->dev = dev;
788 dma_addr = dma_map_single(lp->dev, newskb->tail, PKT_BUF_SZ, DMA_FROM_DEVICE);
789 rbd->v_data = newskb->tail;
790 rbd->b_data = WSWAPchar(dma_addr);
791 CHECK_WBACK_INV(rbd, sizeof(struct i596_rbd));
793 else
794 skb = dev_alloc_skb(pkt_len + 2);
795 memory_squeeze:
796 if (skb == NULL) {
797 /* XXX tulip.c can defer packets here!! */
798 printk ("%s: i596_rx Memory squeeze, dropping packet.\n", dev->name);
799 lp->stats.rx_dropped++;
801 else {
802 skb->dev = dev;
803 if (!rx_in_place) {
804 /* 16 byte align the data fields */
805 dma_sync_single(lp->dev, (dma_addr_t)WSWAPchar(rbd->b_data), PKT_BUF_SZ, DMA_FROM_DEVICE);
806 skb_reserve(skb, 2);
807 memcpy(skb_put(skb,pkt_len), rbd->v_data, pkt_len);
809 skb->len = pkt_len;
810 skb->protocol=eth_type_trans(skb,dev);
811 netif_rx(skb);
812 dev->last_rx = jiffies;
813 lp->stats.rx_packets++;
814 lp->stats.rx_bytes+=pkt_len;
817 else {
818 DEB(DEB_ERRORS, printk("%s: Error, rfd.stat = 0x%04x\n",
819 dev->name, rfd->stat));
820 lp->stats.rx_errors++;
821 if ((rfd->stat) & 0x0001)
822 lp->stats.collisions++;
823 if ((rfd->stat) & 0x0080)
824 lp->stats.rx_length_errors++;
825 if ((rfd->stat) & 0x0100)
826 lp->stats.rx_over_errors++;
827 if ((rfd->stat) & 0x0200)
828 lp->stats.rx_fifo_errors++;
829 if ((rfd->stat) & 0x0400)
830 lp->stats.rx_frame_errors++;
831 if ((rfd->stat) & 0x0800)
832 lp->stats.rx_crc_errors++;
833 if ((rfd->stat) & 0x1000)
834 lp->stats.rx_length_errors++;
837 /* Clear the buffer descriptor count and EOF + F flags */
839 if (rbd != NULL && (rbd->count & 0x4000)) {
840 rbd->count = 0;
841 lp->rbd_head = rbd->v_next;
842 CHECK_WBACK_INV(rbd, sizeof(struct i596_rbd));
845 /* Tidy the frame descriptor, marking it as end of list */
847 rfd->rbd = I596_NULL;
848 rfd->stat = 0;
849 rfd->cmd = CMD_EOL|CMD_FLEX;
850 rfd->count = 0;
852 /* Remove end-of-list from old end descriptor */
854 rfd->v_prev->cmd = CMD_FLEX;
856 /* Update record of next frame descriptor to process */
858 lp->scb.rfd = rfd->b_next;
859 lp->rfd_head = rfd->v_next;
860 CHECK_WBACK_INV(rfd->v_prev, sizeof(struct i596_rfd));
861 CHECK_WBACK_INV(rfd, sizeof(struct i596_rfd));
862 rfd = lp->rfd_head;
863 CHECK_INV(rfd, sizeof(struct i596_rfd));
866 DEB(DEB_RXFRAME,printk ("frames %d\n", frames));
868 return 0;
872 static inline void i596_cleanup_cmd(struct net_device *dev, struct i596_private *lp)
874 struct i596_cmd *ptr;
876 while (lp->cmd_head != NULL) {
877 ptr = lp->cmd_head;
878 lp->cmd_head = ptr->v_next;
879 lp->cmd_backlog--;
881 switch ((ptr->command) & 0x7) {
882 case CmdTx:
884 struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
885 struct sk_buff *skb = tx_cmd->skb;
886 dma_unmap_single(lp->dev, tx_cmd->dma_addr, skb->len, DMA_TO_DEVICE);
888 dev_kfree_skb(skb);
890 lp->stats.tx_errors++;
891 lp->stats.tx_aborted_errors++;
893 ptr->v_next = NULL;
894 ptr->b_next = I596_NULL;
895 tx_cmd->cmd.command = 0; /* Mark as free */
896 break;
898 default:
899 ptr->v_next = NULL;
900 ptr->b_next = I596_NULL;
902 CHECK_WBACK_INV(ptr, sizeof(struct i596_cmd));
905 wait_cmd(dev,lp,100,"i596_cleanup_cmd timed out");
906 lp->scb.cmd = I596_NULL;
907 CHECK_WBACK(&(lp->scb), sizeof(struct i596_scb));
911 static inline void i596_reset(struct net_device *dev, struct i596_private *lp)
913 unsigned long flags;
915 DEB(DEB_RESET,printk("i596_reset\n"));
917 spin_lock_irqsave (&lp->lock, flags);
919 wait_cmd(dev,lp,100,"i596_reset timed out");
921 netif_stop_queue(dev);
923 /* FIXME: this command might cause an lpmc */
924 lp->scb.command = CUC_ABORT | RX_ABORT;
925 CHECK_WBACK(&(lp->scb), sizeof(struct i596_scb));
926 CA(dev);
928 /* wait for shutdown */
929 wait_cmd(dev,lp,1000,"i596_reset 2 timed out");
930 spin_unlock_irqrestore (&lp->lock, flags);
932 i596_cleanup_cmd(dev,lp);
933 i596_rx(dev);
935 netif_start_queue(dev);
936 init_i596_mem(dev);
940 static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd)
942 struct i596_private *lp = (struct i596_private *) dev->priv;
943 unsigned long flags;
945 DEB(DEB_ADDCMD,printk("i596_add_cmd cmd_head %p\n", lp->cmd_head));
947 cmd->status = 0;
948 cmd->command |= (CMD_EOL | CMD_INTR);
949 cmd->v_next = NULL;
950 cmd->b_next = I596_NULL;
951 CHECK_WBACK(cmd, sizeof(struct i596_cmd));
953 spin_lock_irqsave (&lp->lock, flags);
955 if (lp->cmd_head != NULL) {
956 lp->cmd_tail->v_next = cmd;
957 lp->cmd_tail->b_next = WSWAPcmd(virt_to_dma(lp,&cmd->status));
958 CHECK_WBACK(lp->cmd_tail, sizeof(struct i596_cmd));
959 } else {
960 lp->cmd_head = cmd;
961 wait_cmd(dev,lp,100,"i596_add_cmd timed out");
962 lp->scb.cmd = WSWAPcmd(virt_to_dma(lp,&cmd->status));
963 lp->scb.command = CUC_START;
964 CHECK_WBACK(&(lp->scb), sizeof(struct i596_scb));
965 CA(dev);
967 lp->cmd_tail = cmd;
968 lp->cmd_backlog++;
970 spin_unlock_irqrestore (&lp->lock, flags);
972 if (lp->cmd_backlog > max_cmd_backlog) {
973 unsigned long tickssofar = jiffies - lp->last_cmd;
975 if (tickssofar < ticks_limit)
976 return;
978 printk("%s: command unit timed out, status resetting.\n", dev->name);
979 #if 1
980 i596_reset(dev, lp);
981 #endif
985 #if 0
986 /* this function makes a perfectly adequate probe... but we have a
987 device list */
988 static int i596_test(struct net_device *dev)
990 struct i596_private *lp = (struct i596_private *) dev->priv;
991 volatile int *tint;
992 u32 data;
994 tint = (volatile int *)(&(lp->scp));
995 data = virt_to_dma(lp,tint);
997 tint[1] = -1;
998 CHECK_WBACK(tint,PAGE_SIZE);
1000 MPU_PORT(dev, 1, data);
1002 for(data = 1000000; data; data--) {
1003 CHECK_INV(tint,PAGE_SIZE);
1004 if(tint[1] != -1)
1005 break;
1009 printk("i596_test result %d\n", tint[1]);
1012 #endif
1015 static int i596_open(struct net_device *dev)
1017 DEB(DEB_OPEN,printk("%s: i596_open() irq %d.\n", dev->name, dev->irq));
1019 MOD_INC_USE_COUNT;
1021 if (request_irq(dev->irq, &i596_interrupt, 0, "i82596", dev)) {
1022 printk("%s: IRQ %d not free\n", dev->name, dev->irq);
1023 goto out;
1026 init_rx_bufs(dev);
1028 if (init_i596_mem(dev)) {
1029 printk("%s: Failed to init memory\n", dev->name);
1030 goto out_remove_rx_bufs;
1033 netif_start_queue(dev);
1035 return 0;
1037 out_remove_rx_bufs:
1038 remove_rx_bufs(dev);
1039 free_irq(dev->irq, dev);
1040 out:
1041 MOD_DEC_USE_COUNT;
1043 return -EAGAIN;
1046 static void i596_tx_timeout (struct net_device *dev)
1048 struct i596_private *lp = (struct i596_private *) dev->priv;
1050 /* Transmitter timeout, serious problems. */
1051 DEB(DEB_ERRORS,printk("%s: transmit timed out, status resetting.\n",
1052 dev->name));
1054 lp->stats.tx_errors++;
1056 /* Try to restart the adaptor */
1057 if (lp->last_restart == lp->stats.tx_packets) {
1058 DEB(DEB_ERRORS,printk ("Resetting board.\n"));
1059 /* Shutdown and restart */
1060 i596_reset (dev, lp);
1061 } else {
1062 /* Issue a channel attention signal */
1063 DEB(DEB_ERRORS,printk ("Kicking board.\n"));
1064 lp->scb.command = CUC_START | RX_START;
1065 CHECK_WBACK_INV(&(lp->scb), sizeof(struct i596_scb));
1066 CA (dev);
1067 lp->last_restart = lp->stats.tx_packets;
1070 dev->trans_start = jiffies;
1071 netif_wake_queue (dev);
1075 static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
1077 struct i596_private *lp = (struct i596_private *) dev->priv;
1078 struct tx_cmd *tx_cmd;
1079 struct i596_tbd *tbd;
1080 short length = skb->len;
1081 dev->trans_start = jiffies;
1083 DEB(DEB_STARTTX,printk("%s: i596_start_xmit(%x,%p) called\n", dev->name,
1084 skb->len, skb->data));
1086 if (length < ETH_ZLEN) {
1087 skb = skb_padto(skb, ETH_ZLEN);
1088 if (skb == NULL)
1089 return 0;
1090 length = ETH_ZLEN;
1093 netif_stop_queue(dev);
1095 tx_cmd = lp->tx_cmds + lp->next_tx_cmd;
1096 tbd = lp->tbds + lp->next_tx_cmd;
1098 if (tx_cmd->cmd.command) {
1099 DEB(DEB_ERRORS,printk ("%s: xmit ring full, dropping packet.\n",
1100 dev->name));
1101 lp->stats.tx_dropped++;
1103 dev_kfree_skb(skb);
1104 } else {
1105 if (++lp->next_tx_cmd == TX_RING_SIZE)
1106 lp->next_tx_cmd = 0;
1107 tx_cmd->tbd = WSWAPtbd(virt_to_dma(lp,tbd));
1108 tbd->next = I596_NULL;
1110 tx_cmd->cmd.command = CMD_FLEX | CmdTx;
1111 tx_cmd->skb = skb;
1113 tx_cmd->pad = 0;
1114 tx_cmd->size = 0;
1115 tbd->pad = 0;
1116 tbd->size = EOF | length;
1118 tx_cmd->dma_addr = dma_map_single(lp->dev, skb->data, skb->len,
1119 DMA_TO_DEVICE);
1120 tbd->data = WSWAPchar(tx_cmd->dma_addr);
1122 DEB(DEB_TXADDR,print_eth(skb->data, "tx-queued"));
1123 CHECK_WBACK_INV(tx_cmd, sizeof(struct tx_cmd));
1124 CHECK_WBACK_INV(tbd, sizeof(struct i596_tbd));
1125 i596_add_cmd(dev, &tx_cmd->cmd);
1127 lp->stats.tx_packets++;
1128 lp->stats.tx_bytes += length;
1131 netif_start_queue(dev);
1133 return 0;
1136 static void print_eth(unsigned char *add, char *str)
1138 int i;
1140 printk("i596 0x%p, ", add);
1141 for (i = 0; i < 6; i++)
1142 printk(" %02X", add[i + 6]);
1143 printk(" -->");
1144 for (i = 0; i < 6; i++)
1145 printk(" %02X", add[i]);
1146 printk(" %02X%02X, %s\n", add[12], add[13], str);
1150 #define LAN_PROM_ADDR 0xF0810000
1152 static int __devinit i82596_probe(struct net_device *dev)
1154 int i;
1155 struct i596_private *lp;
1156 /* we're going to overwrite dev->priv, so pull the device out */
1157 struct device *gen_dev = dev->priv;
1158 char eth_addr[6];
1159 dma_addr_t dma_addr;
1161 /* This lot is ensure things have been cache line aligned. */
1162 if (sizeof(struct i596_rfd) != 32) {
1163 printk("82596: sizeof(struct i596_rfd) = %d\n",
1164 sizeof(struct i596_rfd));
1165 return -ENODEV;
1167 if ((sizeof(struct i596_rbd) % 32) != 0) {
1168 printk("82596: sizeof(struct i596_rbd) = %d\n",
1169 sizeof(struct i596_rbd));
1170 return -ENODEV;
1172 if ((sizeof(struct tx_cmd) % 32) != 0) {
1173 printk("82596: sizeof(struct tx_cmd) = %d\n",
1174 sizeof(struct tx_cmd));
1175 return -ENODEV;
1177 if (sizeof(struct i596_tbd) != 32) {
1178 printk("82596: sizeof(struct i596_tbd) = %d\n",
1179 sizeof(struct i596_tbd));
1180 return -ENODEV;
1182 #ifndef __LP64__
1183 if (sizeof(struct i596_private) > 4096) {
1184 printk("82596: sizeof(struct i596_private) = %d\n",
1185 sizeof(struct i596_private));
1186 return -ENODEV;
1188 #endif
1190 if (!dev->base_addr || !dev->irq)
1191 return -ENODEV;
1193 if (pdc_lan_station_id(eth_addr, dev->base_addr)) {
1194 for (i=0; i < 6; i++) {
1195 eth_addr[i] = gsc_readb(LAN_PROM_ADDR + i);
1197 printk("82596.c: MAC of HP700 LAN read from EEPROM\n");
1200 dev->mem_start = (unsigned long) dma_alloc_noncoherent(gen_dev,
1201 sizeof(struct i596_private), &dma_addr, GFP_KERNEL);
1202 if (!dev->mem_start) {
1203 printk("%s: Couldn't get shared memory\n", dev->name);
1204 return -ENOMEM;
1207 ether_setup(dev);
1208 DEB(DEB_PROBE,printk("%s: 82596 at %#3lx,", dev->name, dev->base_addr));
1210 for (i = 0; i < 6; i++)
1211 DEB(DEB_PROBE,printk(" %2.2X", dev->dev_addr[i] = eth_addr[i]));
1213 DEB(DEB_PROBE,printk(" IRQ %d.\n", dev->irq));
1215 DEB(DEB_PROBE,printk(version));
1217 /* The 82596-specific entries in the device structure. */
1218 dev->open = i596_open;
1219 dev->stop = i596_close;
1220 dev->hard_start_xmit = i596_start_xmit;
1221 dev->get_stats = i596_get_stats;
1222 dev->set_multicast_list = set_multicast_list;
1223 dev->tx_timeout = i596_tx_timeout;
1224 dev->watchdog_timeo = TX_TIMEOUT;
1226 dev->priv = (void *)(dev->mem_start);
1228 lp = (struct i596_private *) dev->priv;
1229 DEB(DEB_INIT,printk ("%s: lp at 0x%08lx (%d bytes), lp->scb at 0x%08lx\n",
1230 dev->name, (unsigned long)lp,
1231 sizeof(struct i596_private), (unsigned long)&lp->scb));
1232 memset(lp, 0, sizeof(struct i596_private));
1234 lp->scb.command = 0;
1235 lp->scb.cmd = I596_NULL;
1236 lp->scb.rfd = I596_NULL;
1237 lp->lock = SPIN_LOCK_UNLOCKED;
1238 lp->dma_addr = dma_addr;
1239 lp->dev = gen_dev;
1241 CHECK_WBACK_INV(dev->mem_start, sizeof(struct i596_private));
1243 return 0;
1247 static irqreturn_t i596_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1249 struct net_device *dev = dev_id;
1250 struct i596_private *lp;
1251 unsigned short status, ack_cmd = 0;
1253 if (dev == NULL) {
1254 printk("i596_interrupt(): irq %d for unknown device.\n", irq);
1255 return IRQ_NONE;
1258 lp = (struct i596_private *) dev->priv;
1260 spin_lock (&lp->lock);
1262 wait_cmd(dev,lp,100,"i596 interrupt, timeout");
1263 status = lp->scb.status;
1265 DEB(DEB_INTS,printk("%s: i596 interrupt, IRQ %d, status %4.4x.\n",
1266 dev->name, irq, status));
1268 ack_cmd = status & 0xf000;
1270 if (!ack_cmd) {
1271 DEB(DEB_ERRORS, printk("%s: interrupt with no events\n", dev->name));
1272 spin_unlock (&lp->lock);
1273 return IRQ_NONE;
1276 if ((status & 0x8000) || (status & 0x2000)) {
1277 struct i596_cmd *ptr;
1279 if ((status & 0x8000))
1280 DEB(DEB_INTS,printk("%s: i596 interrupt completed command.\n", dev->name));
1281 if ((status & 0x2000))
1282 DEB(DEB_INTS,printk("%s: i596 interrupt command unit inactive %x.\n", dev->name, status & 0x0700));
1284 while (lp->cmd_head != NULL) {
1285 CHECK_INV(lp->cmd_head, sizeof(struct i596_cmd));
1286 if (!(lp->cmd_head->status & STAT_C))
1287 break;
1289 ptr = lp->cmd_head;
1291 DEB(DEB_STATUS,printk("cmd_head->status = %04x, ->command = %04x\n",
1292 lp->cmd_head->status, lp->cmd_head->command));
1293 lp->cmd_head = ptr->v_next;
1294 lp->cmd_backlog--;
1296 switch ((ptr->command) & 0x7) {
1297 case CmdTx:
1299 struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
1300 struct sk_buff *skb = tx_cmd->skb;
1302 if ((ptr->status) & STAT_OK) {
1303 DEB(DEB_TXADDR,print_eth(skb->data, "tx-done"));
1304 } else {
1305 lp->stats.tx_errors++;
1306 if ((ptr->status) & 0x0020)
1307 lp->stats.collisions++;
1308 if (!((ptr->status) & 0x0040))
1309 lp->stats.tx_heartbeat_errors++;
1310 if ((ptr->status) & 0x0400)
1311 lp->stats.tx_carrier_errors++;
1312 if ((ptr->status) & 0x0800)
1313 lp->stats.collisions++;
1314 if ((ptr->status) & 0x1000)
1315 lp->stats.tx_aborted_errors++;
1317 dma_unmap_single(lp->dev, tx_cmd->dma_addr, skb->len, DMA_TO_DEVICE);
1318 dev_kfree_skb_irq(skb);
1320 tx_cmd->cmd.command = 0; /* Mark free */
1321 break;
1323 case CmdTDR:
1325 unsigned short status = ((struct tdr_cmd *)ptr)->status;
1327 if (status & 0x8000) {
1328 DEB(DEB_ANY,printk("%s: link ok.\n", dev->name));
1329 } else {
1330 if (status & 0x4000)
1331 printk("%s: Transceiver problem.\n", dev->name);
1332 if (status & 0x2000)
1333 printk("%s: Termination problem.\n", dev->name);
1334 if (status & 0x1000)
1335 printk("%s: Short circuit.\n", dev->name);
1337 DEB(DEB_TDR,printk("%s: Time %d.\n", dev->name, status & 0x07ff));
1339 break;
1341 case CmdConfigure:
1342 /* Zap command so set_multicast_list() knows it is free */
1343 ptr->command = 0;
1344 break;
1346 ptr->v_next = NULL;
1347 ptr->b_next = I596_NULL;
1348 CHECK_WBACK(ptr, sizeof(struct i596_cmd));
1349 lp->last_cmd = jiffies;
1352 /* This mess is arranging that only the last of any outstanding
1353 * commands has the interrupt bit set. Should probably really
1354 * only add to the cmd queue when the CU is stopped.
1356 ptr = lp->cmd_head;
1357 while ((ptr != NULL) && (ptr != lp->cmd_tail)) {
1358 struct i596_cmd *prev = ptr;
1360 ptr->command &= 0x1fff;
1361 ptr = ptr->v_next;
1362 CHECK_WBACK_INV(prev, sizeof(struct i596_cmd));
1365 if ((lp->cmd_head != NULL))
1366 ack_cmd |= CUC_START;
1367 lp->scb.cmd = WSWAPcmd(virt_to_dma(lp,&lp->cmd_head->status));
1368 CHECK_WBACK_INV(&lp->scb, sizeof(struct i596_scb));
1370 if ((status & 0x1000) || (status & 0x4000)) {
1371 if ((status & 0x4000))
1372 DEB(DEB_INTS,printk("%s: i596 interrupt received a frame.\n", dev->name));
1373 i596_rx(dev);
1374 /* Only RX_START if stopped - RGH 07-07-96 */
1375 if (status & 0x1000) {
1376 if (netif_running(dev)) {
1377 DEB(DEB_ERRORS,printk("%s: i596 interrupt receive unit inactive, status 0x%x\n", dev->name, status));
1378 ack_cmd |= RX_START;
1379 lp->stats.rx_errors++;
1380 lp->stats.rx_fifo_errors++;
1381 rebuild_rx_bufs(dev);
1385 wait_cmd(dev,lp,100,"i596 interrupt, timeout");
1386 lp->scb.command = ack_cmd;
1387 CHECK_WBACK(&lp->scb, sizeof(struct i596_scb));
1389 /* DANGER: I suspect that some kind of interrupt
1390 acknowledgement aside from acking the 82596 might be needed
1391 here... but it's running acceptably without */
1393 CA(dev);
1395 wait_cmd(dev,lp,100,"i596 interrupt, exit timeout");
1396 DEB(DEB_INTS,printk("%s: exiting interrupt.\n", dev->name));
1398 spin_unlock (&lp->lock);
1399 return IRQ_HANDLED;
1402 static int i596_close(struct net_device *dev)
1404 struct i596_private *lp = (struct i596_private *) dev->priv;
1405 unsigned long flags;
1407 netif_stop_queue(dev);
1409 DEB(DEB_INIT,printk("%s: Shutting down ethercard, status was %4.4x.\n",
1410 dev->name, lp->scb.status));
1412 spin_lock_irqsave(&lp->lock, flags);
1414 wait_cmd(dev,lp,100,"close1 timed out");
1415 lp->scb.command = CUC_ABORT | RX_ABORT;
1416 CHECK_WBACK(&lp->scb, sizeof(struct i596_scb));
1418 CA(dev);
1420 wait_cmd(dev,lp,100,"close2 timed out");
1421 spin_unlock_irqrestore(&lp->lock, flags);
1422 DEB(DEB_STRUCT,i596_display_data(dev));
1423 i596_cleanup_cmd(dev,lp);
1425 disable_irq(dev->irq);
1427 free_irq(dev->irq, dev);
1428 remove_rx_bufs(dev);
1430 MOD_DEC_USE_COUNT;
1432 return 0;
1435 static struct net_device_stats *
1436 i596_get_stats(struct net_device *dev)
1438 struct i596_private *lp = (struct i596_private *) dev->priv;
1440 return &lp->stats;
1444 * Set or clear the multicast filter for this adaptor.
1447 static void set_multicast_list(struct net_device *dev)
1449 struct i596_private *lp = (struct i596_private *) dev->priv;
1450 int config = 0, cnt;
1452 DEB(DEB_MULTI,printk("%s: set multicast list, %d entries, promisc %s, allmulti %s\n", dev->name, dev->mc_count, dev->flags & IFF_PROMISC ? "ON" : "OFF", dev->flags & IFF_ALLMULTI ? "ON" : "OFF"));
1454 if ((dev->flags & IFF_PROMISC) && !(lp->cf_cmd.i596_config[8] & 0x01)) {
1455 lp->cf_cmd.i596_config[8] |= 0x01;
1456 config = 1;
1458 if (!(dev->flags & IFF_PROMISC) && (lp->cf_cmd.i596_config[8] & 0x01)) {
1459 lp->cf_cmd.i596_config[8] &= ~0x01;
1460 config = 1;
1462 if ((dev->flags & IFF_ALLMULTI) && (lp->cf_cmd.i596_config[11] & 0x20)) {
1463 lp->cf_cmd.i596_config[11] &= ~0x20;
1464 config = 1;
1466 if (!(dev->flags & IFF_ALLMULTI) && !(lp->cf_cmd.i596_config[11] & 0x20)) {
1467 lp->cf_cmd.i596_config[11] |= 0x20;
1468 config = 1;
1470 if (config) {
1471 if (lp->cf_cmd.cmd.command)
1472 printk("%s: config change request already queued\n",
1473 dev->name);
1474 else {
1475 lp->cf_cmd.cmd.command = CmdConfigure;
1476 CHECK_WBACK_INV(&lp->cf_cmd, sizeof(struct cf_cmd));
1477 i596_add_cmd(dev, &lp->cf_cmd.cmd);
1481 cnt = dev->mc_count;
1482 if (cnt > MAX_MC_CNT)
1484 cnt = MAX_MC_CNT;
1485 printk("%s: Only %d multicast addresses supported",
1486 dev->name, cnt);
1489 if (dev->mc_count > 0) {
1490 struct dev_mc_list *dmi;
1491 unsigned char *cp;
1492 struct mc_cmd *cmd;
1494 cmd = &lp->mc_cmd;
1495 cmd->cmd.command = CmdMulticastList;
1496 cmd->mc_cnt = dev->mc_count * 6;
1497 cp = cmd->mc_addrs;
1498 for (dmi = dev->mc_list; cnt && dmi != NULL; dmi = dmi->next, cnt--, cp += 6) {
1499 memcpy(cp, dmi->dmi_addr, 6);
1500 if (i596_debug > 1)
1501 DEB(DEB_MULTI,printk("%s: Adding address %02x:%02x:%02x:%02x:%02x:%02x\n",
1502 dev->name, cp[0],cp[1],cp[2],cp[3],cp[4],cp[5]));
1504 CHECK_WBACK_INV(&lp->mc_cmd, sizeof(struct mc_cmd));
1505 i596_add_cmd(dev, &cmd->cmd);
1509 MODULE_PARM(debug, "i");
1510 MODULE_PARM_DESC(debug, "lasi_82596 debug mask");
1511 static int debug = -1;
1513 static int num_drivers;
1514 static struct net_device *netdevs[MAX_DRIVERS];
1516 static int __devinit
1517 lan_init_chip(struct parisc_device *dev)
1519 struct net_device *netdevice;
1520 int retval;
1522 if (num_drivers >= MAX_DRIVERS) {
1523 /* max count of possible i82596 drivers reached */
1524 return -ENODEV;
1527 if (!dev->irq) {
1528 printk(KERN_ERR __FILE__ ": IRQ not found for i82596 at 0x%lx\n", dev->hpa);
1529 return -ENODEV;
1532 printk(KERN_INFO "Found i82596 at 0x%lx, IRQ %d\n", dev->hpa, dev->irq);
1534 netdevice = alloc_etherdev(0);
1535 if (!netdevice)
1536 return -ENOMEM;
1538 netdevice->base_addr = dev->hpa;
1539 netdevice->irq = dev->irq;
1540 netdevice->init = i82596_probe;
1541 netdevice->priv = &dev->dev;
1543 retval = register_netdev(netdevice);
1544 if (retval) {
1545 printk(KERN_WARNING __FILE__ ": register_netdevice ret'd %d\n", retval);
1546 kfree(netdevice);
1547 return -ENODEV;
1549 if (dev->id.sversion == 0x72) {
1550 ((struct i596_private *)netdevice->priv)->options = OPT_SWAP_PORT;
1553 netdevs[num_drivers++] = netdevice;
1555 return retval;
1559 static struct parisc_device_id lan_tbl[] = {
1560 { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x0008a },
1561 { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00072 },
1562 { 0, }
1565 MODULE_DEVICE_TABLE(parisc, lan_tbl);
1567 static struct parisc_driver lan_driver = {
1568 .name = "Apricot",
1569 .id_table = lan_tbl,
1570 .probe = lan_init_chip,
1573 static int __devinit lasi_82596_init(void)
1575 if (debug >= 0)
1576 i596_debug = debug;
1577 return register_parisc_driver(&lan_driver);
1580 module_init(lasi_82596_init);
1582 static void __exit lasi_82596_exit(void)
1584 int i;
1586 for (i=0; i<MAX_DRIVERS; i++) {
1587 struct i596_private *lp;
1588 struct net_device *netdevice;
1590 netdevice = netdevs[i];
1591 if (!netdevice)
1592 continue;
1594 unregister_netdev(netdevice);
1596 lp = (struct i596_private *) netdevice->priv;
1597 dma_free_noncoherent(lp->dev, sizeof(struct i596_private),
1598 (void *)netdevice->mem_start, lp->dma_addr);
1599 netdevice->priv = NULL;
1602 unregister_parisc_driver(&lan_driver);
1605 module_exit(lasi_82596_exit);