- Alan Cox: synch. PA-RISC arch and bitops cleanups
[davej-history.git] / drivers / net / lasi_82596.c
blob049ebccb78a462fa9bac528e9bf067b1f39c60ea
1 /* lasi_82596.c -- driver for the intel 82596 ethernet controller, as
2 munged into HPPA boxen .
4 This driver is based upon 82596.c, original credits are below...
5 but there were too many hoops which HP wants jumped through to
6 keep this code in there in a sane manner.
8 3 primary sources of the mess --
9 1) hppa needs *lots* of cacheline flushing to keep this kind of
10 MMIO running.
12 2) The 82596 needs to see all of its pointers as their physical
13 address. Thus virt_to_bus/bus_to_virt are *everywhere*.
15 3) The implementation HP is using seems to be significantly pickier
16 about when and how the command and RX units are started. some
17 command ordering was changed.
19 Examination of the mach driver leads one to believe that there
20 might be a saner way to pull this off... anyone who feels like a
21 full rewrite can be my guest.
23 Split 02/13/2000 Sam Creasey (sammy@oh.verio.com)
25 02/01/2000 Initial modifications for parisc by Helge Deller (deller@gmx.de)
26 03/02/2000 changes for better/correct(?) cache-flushing (deller)
29 /* 82596.c: A generic 82596 ethernet driver for linux. */
31 Based on Apricot.c
32 Written 1994 by Mark Evans.
33 This driver is for the Apricot 82596 bus-master interface
35 Modularised 12/94 Mark Evans
38 Modified to support the 82596 ethernet chips on 680x0 VME boards.
39 by Richard Hirst <richard@sleepie.demon.co.uk>
40 Renamed to be 82596.c
42 980825: Changed to receive directly in to sk_buffs which are
43 allocated at open() time. Eliminates copy on incoming frames
44 (small ones are still copied). Shared data now held in a
45 non-cached page, so we can run on 68060 in copyback mode.
47 TBD:
48 * look at deferring rx frames rather than discarding (as per tulip)
49 * handle tx ring full as per tulip
50 * performace test to tune rx_copybreak
52 Most of my modifications relate to the braindead big-endian
53 implementation by Intel. When the i596 is operating in
54 'big-endian' mode, it thinks a 32 bit value of 0x12345678
55 should be stored as 0x56781234. This is a real pain, when
56 you have linked lists which are shared by the 680x0 and the
57 i596.
59 Driver skeleton
60 Written 1993 by Donald Becker.
61 Copyright 1993 United States Government as represented by the Director,
62 National Security Agency. This software may only be used and distributed
63 according to the terms of the GNU Public License as modified by SRC,
64 incorporated herein by reference.
66 The author may be reached as becker@super.org or
67 C/O Supercomputing Research Ctr., 17100 Science Dr., Bowie MD 20715
71 static const char *version = "82596.c $Revision: 1.14 $\n";
73 #include <linux/config.h>
74 #include <linux/module.h>
76 #include <linux/kernel.h>
77 #include <linux/sched.h>
78 #include <linux/string.h>
79 #include <linux/ptrace.h>
80 #include <linux/errno.h>
81 #include <linux/ioport.h>
82 #include <linux/malloc.h>
83 #include <linux/interrupt.h>
84 #include <linux/delay.h>
85 #include <linux/netdevice.h>
86 #include <linux/etherdevice.h>
87 #include <linux/skbuff.h>
88 #include <linux/init.h>
89 #include <linux/pci.h>
91 #include <asm/bitops.h>
92 #include <asm/io.h>
93 #include <asm/pgtable.h>
94 #include <asm/pgalloc.h>
95 #include <asm/irq.h>
97 #include <asm/pdc.h>
98 #include <asm/gsc.h>
99 #include <asm/cache.h>
101 /* DEBUG flags
104 #define DEB_INIT 0x0001
105 #define DEB_PROBE 0x0002
106 #define DEB_SERIOUS 0x0004
107 #define DEB_ERRORS 0x0008
108 #define DEB_MULTI 0x0010
109 #define DEB_TDR 0x0020
110 #define DEB_OPEN 0x0040
111 #define DEB_RESET 0x0080
112 #define DEB_ADDCMD 0x0100
113 #define DEB_STATUS 0x0200
114 #define DEB_STARTTX 0x0400
115 #define DEB_RXADDR 0x0800
116 #define DEB_TXADDR 0x1000
117 #define DEB_RXFRAME 0x2000
118 #define DEB_INTS 0x4000
119 #define DEB_STRUCT 0x8000
120 #define DEB_ANY 0xffff
123 #define DEB(x,y) if (i596_debug & (x)) y
126 #define CHECK_WBACK(addr,len) \
127 do { if (!dma_consistent) dma_cache_wback((unsigned long)addr,len); } while (0)
129 #define CHECK_INV(addr,len) \
130 do { if (!dma_consistent) dma_cache_inv((unsigned long)addr,len); } while(0)
132 #define CHECK_WBACK_INV(addr,len) \
133 do { if (!dma_consistent) dma_cache_wback_inv((unsigned long)addr,len); } while (0)
136 #define PA_I82596_RESET 0 /* Offsets relative to LASI-LAN-Addr.*/
137 #define PA_CPU_PORT_L_ACCESS 4
138 #define PA_CHANNEL_ATTENTION 8
142 * Define various macros for Channel Attention, word swapping etc., dependent
143 * on architecture. MVME and BVME are 680x0 based, otherwise it is Intel.
146 #ifdef __BIG_ENDIAN
147 #define WSWAPrfd(x) ((struct i596_rfd *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
148 #define WSWAPrbd(x) ((struct i596_rbd *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
149 #define WSWAPiscp(x) ((struct i596_iscp *)(((u32)(x)<<16) | ((((u32)(x)))>>16)))
150 #define WSWAPscb(x) ((struct i596_scb *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
151 #define WSWAPcmd(x) ((struct i596_cmd *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
152 #define WSWAPtbd(x) ((struct i596_tbd *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
153 #define WSWAPchar(x) ((char *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
154 #define ISCP_BUSY 0x00010000
155 #define MACH_IS_APRICOT 0
156 #else
157 #define WSWAPrfd(x) ((struct i596_rfd *)(x))
158 #define WSWAPrbd(x) ((struct i596_rbd *)(x))
159 #define WSWAPiscp(x) ((struct i596_iscp *)(x))
160 #define WSWAPscb(x) ((struct i596_scb *)(x))
161 #define WSWAPcmd(x) ((struct i596_cmd *)(x))
162 #define WSWAPtbd(x) ((struct i596_tbd *)(x))
163 #define WSWAPchar(x) ((char *)(x))
164 #define ISCP_BUSY 0x0001
165 #define MACH_IS_APRICOT 1
166 #endif
169 * The MPU_PORT command allows direct access to the 82596. With PORT access
170 * the following commands are available (p5-18). The 32-bit port command
171 * must be word-swapped with the most significant word written first.
172 * This only applies to VME boards.
174 #define PORT_RESET 0x00 /* reset 82596 */
175 #define PORT_SELFTEST 0x01 /* selftest */
176 #define PORT_ALTSCP 0x02 /* alternate SCB address */
177 #define PORT_ALTDUMP 0x03 /* Alternate DUMP address */
179 static int i596_debug = (DEB_SERIOUS|DEB_PROBE);
181 MODULE_AUTHOR("Richard Hirst");
182 MODULE_DESCRIPTION("i82596 driver");
183 MODULE_PARM(i596_debug, "i");
186 /* Copy frames shorter than rx_copybreak, otherwise pass on up in
187 * a full sized sk_buff. Value of 100 stolen from tulip.c (!alpha).
189 static int rx_copybreak = 100;
191 #define PKT_BUF_SZ 1536
192 #define MAX_MC_CNT 64
194 #define I596_TOTAL_SIZE 17
196 #define I596_NULL ((void *)0xffffffff)
198 #define CMD_EOL 0x8000 /* The last command of the list, stop. */
199 #define CMD_SUSP 0x4000 /* Suspend after doing cmd. */
200 #define CMD_INTR 0x2000 /* Interrupt after doing cmd. */
202 #define CMD_FLEX 0x0008 /* Enable flexible memory model */
204 enum commands {
205 CmdNOp = 0, CmdSASetup = 1, CmdConfigure = 2, CmdMulticastList = 3,
206 CmdTx = 4, CmdTDR = 5, CmdDump = 6, CmdDiagnose = 7
209 #define STAT_C 0x8000 /* Set to 0 after execution */
210 #define STAT_B 0x4000 /* Command being executed */
211 #define STAT_OK 0x2000 /* Command executed ok */
212 #define STAT_A 0x1000 /* Command aborted */
214 #define CUC_START 0x0100
215 #define CUC_RESUME 0x0200
216 #define CUC_SUSPEND 0x0300
217 #define CUC_ABORT 0x0400
218 #define RX_START 0x0010
219 #define RX_RESUME 0x0020
220 #define RX_SUSPEND 0x0030
221 #define RX_ABORT 0x0040
223 #define TX_TIMEOUT 5
225 #define OPT_SWAP_PORT 0x0001 /* Need to wordswp on the MPU port */
228 struct i596_reg {
229 unsigned short porthi;
230 unsigned short portlo;
231 unsigned long ca;
234 #define EOF 0x8000
235 #define SIZE_MASK 0x3fff
237 struct i596_tbd {
238 unsigned short size;
239 unsigned short pad;
240 struct i596_tbd *next;
241 char *data;
242 long cache_pad[5]; /* Total 32 bytes... */
245 /* The command structure has two 'next' pointers; v_next is the address of
246 * the next command as seen by the CPU, b_next is the address of the next
247 * command as seen by the 82596. The b_next pointer, as used by the 82596
248 * always references the status field of the next command, rather than the
249 * v_next field, because the 82596 is unaware of v_next. It may seem more
250 * logical to put v_next at the end of the structure, but we cannot do that
251 * because the 82596 expects other fields to be there, depending on command
252 * type.
255 struct i596_cmd {
256 struct i596_cmd *v_next; /* Address from CPUs viewpoint */
257 unsigned short status;
258 unsigned short command;
259 struct i596_cmd *b_next; /* Address from i596 viewpoint */
262 struct tx_cmd {
263 struct i596_cmd cmd;
264 struct i596_tbd *tbd;
265 unsigned short size;
266 unsigned short pad;
267 struct sk_buff *skb; /* So we can free it after tx */
268 dma_addr_t dma_addr;
269 long cache_pad[1]; /* Total 32 bytes... */
272 struct tdr_cmd {
273 struct i596_cmd cmd;
274 unsigned short status;
275 unsigned short pad;
278 struct mc_cmd {
279 struct i596_cmd cmd;
280 short mc_cnt;
281 char mc_addrs[MAX_MC_CNT*6];
284 struct sa_cmd {
285 struct i596_cmd cmd;
286 char eth_addr[8];
289 struct cf_cmd {
290 struct i596_cmd cmd;
291 char i596_config[16];
294 struct i596_rfd {
295 unsigned short stat;
296 unsigned short cmd;
297 struct i596_rfd *b_next; /* Address from i596 viewpoint */
298 struct i596_rbd *rbd;
299 unsigned short count;
300 unsigned short size;
301 struct i596_rfd *v_next; /* Address from CPUs viewpoint */
302 struct i596_rfd *v_prev;
303 long cache_pad[2]; /* Total 32 bytes... */
306 struct i596_rbd {
307 unsigned short count;
308 unsigned short zero1;
309 struct i596_rbd *b_next;
310 unsigned char *b_data; /* Address from i596 viewpoint */
311 unsigned short size;
312 unsigned short zero2;
313 struct sk_buff *skb;
314 struct i596_rbd *v_next;
315 struct i596_rbd *b_addr; /* This rbd addr from i596 view */
316 unsigned char *v_data; /* Address from CPUs viewpoint */
317 /* Total 32 bytes... */
320 /* These values as chosen so struct i596_private fits in one page... */
322 #define TX_RING_SIZE 32
323 #define RX_RING_SIZE 16
325 struct i596_scb {
326 unsigned short status;
327 unsigned short command;
328 struct i596_cmd *cmd;
329 struct i596_rfd *rfd;
330 unsigned long crc_err;
331 unsigned long align_err;
332 unsigned long resource_err;
333 unsigned long over_err;
334 unsigned long rcvdt_err;
335 unsigned long short_err;
336 unsigned short t_on;
337 unsigned short t_off;
340 struct i596_iscp {
341 unsigned long stat;
342 struct i596_scb *scb;
345 struct i596_scp {
346 unsigned long sysbus;
347 unsigned long pad;
348 struct i596_iscp *iscp;
351 struct i596_private {
352 volatile struct i596_scp scp __attribute__((aligned(32)));
353 volatile struct i596_iscp iscp __attribute__((aligned(32)));
354 volatile struct i596_scb scb __attribute__((aligned(32)));
355 struct sa_cmd sa_cmd __attribute__((aligned(32)));
356 struct cf_cmd cf_cmd __attribute__((aligned(32)));
357 struct tdr_cmd tdr_cmd __attribute__((aligned(32)));
358 struct mc_cmd mc_cmd __attribute__((aligned(32)));
359 struct i596_rfd rfds[RX_RING_SIZE] __attribute__((aligned(32)));
360 struct i596_rbd rbds[RX_RING_SIZE] __attribute__((aligned(32)));
361 struct tx_cmd tx_cmds[TX_RING_SIZE] __attribute__((aligned(32)));
362 struct i596_tbd tbds[TX_RING_SIZE] __attribute__((aligned(32)));
363 unsigned long stat;
364 int last_restart;
365 struct i596_rfd *rfd_head;
366 struct i596_rbd *rbd_head;
367 struct i596_cmd *cmd_tail;
368 struct i596_cmd *cmd_head;
369 int cmd_backlog;
370 unsigned long last_cmd;
371 struct net_device_stats stats;
372 int next_tx_cmd;
373 int options;
374 spinlock_t lock;
375 dma_addr_t dma_addr;
378 char init_setup[] =
380 0x8E, /* length, prefetch on */
381 0xC8, /* fifo to 8, monitor off */
382 0x80, /* don't save bad frames */
383 0x2E, /* No source address insertion, 8 byte preamble */
384 0x00, /* priority and backoff defaults */
385 0x60, /* interframe spacing */
386 0x00, /* slot time LSB */
387 0xf2, /* slot time and retries */
388 0x00, /* promiscuous mode */
389 0x00, /* collision detect */
390 0x40, /* minimum frame length */
391 0xff,
392 0x00,
393 0x7f /* *multi IA */ };
395 static int dma_consistent = 1; /* Zero if pci_alloc_consistent() fails */
397 static int i596_open(struct net_device *dev);
398 static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
399 static void i596_interrupt(int irq, void *dev_id, struct pt_regs *regs);
400 static int i596_close(struct net_device *dev);
401 static struct net_device_stats *i596_get_stats(struct net_device *dev);
402 static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd);
403 static void i596_tx_timeout (struct net_device *dev);
404 static void print_eth(unsigned char *buf, char *str);
405 static void set_multicast_list(struct net_device *dev);
407 static int rx_ring_size = RX_RING_SIZE;
408 static int ticks_limit = 100;
409 static int max_cmd_backlog = TX_RING_SIZE-1;
412 static inline void CA(struct net_device *dev)
414 gsc_writel(0, (void*)(dev->base_addr + PA_CHANNEL_ATTENTION));
418 static inline void MPU_PORT(struct net_device *dev, int c, volatile void *x)
420 struct i596_private *lp = (struct i596_private *) dev->priv;
422 u32 v = (u32) (c) | (u32) (x);
424 if (lp->options & OPT_SWAP_PORT)
425 v = ((u32) (v) << 16) | ((u32) (v) >> 16);
427 gsc_writel(v & 0xffff, (void*)(dev->base_addr + PA_CPU_PORT_L_ACCESS));
428 udelay(1);
429 gsc_writel(v >> 16, (void*)(dev->base_addr + PA_CPU_PORT_L_ACCESS));
433 static inline int wait_istat(struct net_device *dev, struct i596_private *lp, int delcnt, char *str)
435 CHECK_INV(&(lp->iscp), sizeof(struct i596_iscp));
436 while (--delcnt && lp->iscp.stat) {
437 udelay(10);
438 CHECK_INV(&(lp->iscp), sizeof(struct i596_iscp));
440 if (!delcnt) {
441 printk("%s: %s, iscp.stat %04lx, didn't clear\n",
442 dev->name, str, lp->iscp.stat);
443 return -1;
445 else
446 return 0;
450 static inline int wait_cmd(struct net_device *dev, struct i596_private *lp, int delcnt, char *str)
452 CHECK_INV(&(lp->scb), sizeof(struct i596_scb));
453 while (--delcnt && lp->scb.command) {
454 udelay(10);
455 CHECK_INV(&(lp->scb), sizeof(struct i596_scb));
457 if (!delcnt) {
458 printk("%s: %s, status %4.4x, cmd %4.4x.\n",
459 dev->name, str, lp->scb.status, lp->scb.command);
460 return -1;
462 else
463 return 0;
467 static void i596_display_data(struct net_device *dev)
469 struct i596_private *lp = (struct i596_private *) dev->priv;
470 struct i596_cmd *cmd;
471 struct i596_rfd *rfd;
472 struct i596_rbd *rbd;
474 printk("lp and scp at %p, .sysbus = %08lx, .iscp = %p\n",
475 &lp->scp, lp->scp.sysbus, lp->scp.iscp);
476 printk("iscp at %p, iscp.stat = %08lx, .scb = %p\n",
477 &lp->iscp, lp->iscp.stat, lp->iscp.scb);
478 printk("scb at %p, scb.status = %04x, .command = %04x,"
479 " .cmd = %p, .rfd = %p\n",
480 &lp->scb, lp->scb.status, lp->scb.command,
481 lp->scb.cmd, lp->scb.rfd);
482 printk(" errors: crc %lx, align %lx, resource %lx,"
483 " over %lx, rcvdt %lx, short %lx\n",
484 lp->scb.crc_err, lp->scb.align_err, lp->scb.resource_err,
485 lp->scb.over_err, lp->scb.rcvdt_err, lp->scb.short_err);
486 cmd = lp->cmd_head;
487 while (cmd != I596_NULL) {
488 printk("cmd at %p, .status = %04x, .command = %04x, .b_next = %p\n",
489 cmd, cmd->status, cmd->command, cmd->b_next);
490 cmd = cmd->v_next;
492 rfd = lp->rfd_head;
493 printk("rfd_head = %p\n", rfd);
494 do {
495 printk (" %p .stat %04x, .cmd %04x, b_next %p, rbd %p,"
496 " count %04x\n",
497 rfd, rfd->stat, rfd->cmd, rfd->b_next, rfd->rbd,
498 rfd->count);
499 rfd = rfd->v_next;
500 } while (rfd != lp->rfd_head);
501 rbd = lp->rbd_head;
502 printk("rbd_head = %p\n", rbd);
503 do {
504 printk(" %p .count %04x, b_next %p, b_data %p, size %04x\n",
505 rbd, rbd->count, rbd->b_next, rbd->b_data, rbd->size);
506 rbd = rbd->v_next;
507 } while (rbd != lp->rbd_head);
508 CHECK_INV(lp, sizeof(struct i596_private));
512 #if defined(ENABLE_MVME16x_NET) || defined(ENABLE_BVME6000_NET)
513 static void i596_error(int irq, void *dev_id, struct pt_regs *regs)
515 struct net_device *dev = dev_id;
516 volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000;
518 pcc2[0x28] = 1;
519 pcc2[0x2b] = 0x1d;
520 printk("%s: Error interrupt\n", dev->name);
521 i596_display_data(dev);
523 #endif
525 #define virt_to_dma(lp,v) ((char *)(v)-(char *)(lp)+(char *)((lp)->dma_addr))
527 static inline void init_rx_bufs(struct net_device *dev)
529 struct i596_private *lp = (struct i596_private *)dev->priv;
530 int i;
531 struct i596_rfd *rfd;
532 struct i596_rbd *rbd;
534 /* First build the Receive Buffer Descriptor List */
536 for (i = 0, rbd = lp->rbds; i < rx_ring_size; i++, rbd++) {
537 dma_addr_t dma_addr;
538 struct sk_buff *skb = dev_alloc_skb(PKT_BUF_SZ + 4);
540 if (skb == NULL)
541 panic("82596: alloc_skb() failed");
542 skb_reserve(skb, 2);
543 dma_addr = pci_map_single(NULL, skb->tail,PKT_BUF_SZ,
544 PCI_DMA_FROMDEVICE);
545 skb->dev = dev;
546 rbd->v_next = rbd+1;
547 rbd->b_next = WSWAPrbd(virt_to_dma(lp,rbd+1));
548 rbd->b_addr = WSWAPrbd(virt_to_dma(lp,rbd));
549 rbd->skb = skb;
550 rbd->v_data = skb->tail;
551 rbd->b_data = WSWAPchar(dma_addr);
552 rbd->size = PKT_BUF_SZ;
554 lp->rbd_head = lp->rbds;
555 rbd = lp->rbds + rx_ring_size - 1;
556 rbd->v_next = lp->rbds;
557 rbd->b_next = WSWAPrbd(virt_to_dma(lp,lp->rbds));
559 /* Now build the Receive Frame Descriptor List */
561 for (i = 0, rfd = lp->rfds; i < rx_ring_size; i++, rfd++) {
562 rfd->rbd = I596_NULL;
563 rfd->v_next = rfd+1;
564 rfd->v_prev = rfd-1;
565 rfd->b_next = WSWAPrfd(virt_to_dma(lp,rfd+1));
566 rfd->cmd = CMD_FLEX;
568 lp->rfd_head = lp->rfds;
569 lp->scb.rfd = WSWAPrfd(virt_to_dma(lp,lp->rfds));
570 rfd = lp->rfds;
571 rfd->rbd = lp->rbd_head;
572 rfd->v_prev = lp->rfds + rx_ring_size - 1;
573 rfd = lp->rfds + rx_ring_size - 1;
574 rfd->v_next = lp->rfds;
575 rfd->b_next = WSWAPrfd(virt_to_dma(lp,lp->rfds));
576 rfd->cmd = CMD_EOL|CMD_FLEX;
578 CHECK_WBACK_INV(lp, sizeof(struct i596_private));
581 static inline void remove_rx_bufs(struct net_device *dev)
583 struct i596_private *lp = (struct i596_private *)dev->priv;
584 struct i596_rbd *rbd;
585 int i;
587 for (i = 0, rbd = lp->rbds; i < rx_ring_size; i++, rbd++) {
588 if (rbd->skb == NULL)
589 break;
590 pci_unmap_single(NULL,(dma_addr_t)WSWAPchar(rbd->b_data), PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
591 dev_kfree_skb(rbd->skb);
596 static void rebuild_rx_bufs(struct net_device *dev)
598 struct i596_private *lp = (struct i596_private *) dev->priv;
599 int i;
601 /* Ensure rx frame/buffer descriptors are tidy */
603 for (i = 0; i < rx_ring_size; i++) {
604 lp->rfds[i].rbd = I596_NULL;
605 lp->rfds[i].cmd = CMD_FLEX;
607 lp->rfds[rx_ring_size-1].cmd = CMD_EOL|CMD_FLEX;
608 lp->rfd_head = lp->rfds;
609 lp->scb.rfd = WSWAPrfd(virt_to_dma(lp,lp->rfds));
610 lp->rbd_head = lp->rbds;
611 lp->rfds[0].rbd = WSWAPrbd(virt_to_dma(lp,lp->rbds));
613 CHECK_WBACK_INV(lp, sizeof(struct i596_private));
617 static int init_i596_mem(struct net_device *dev)
619 struct i596_private *lp = (struct i596_private *) dev->priv;
620 unsigned long flags;
622 disable_irq(dev->irq); /* disable IRQs from LAN */
623 DEB(DEB_INIT,
624 printk("RESET 82596 port: %08lX (with IRQ%d disabled)\n",
625 dev->base_addr + PA_I82596_RESET,
626 dev->irq));
628 gsc_writel(0, (void*)(dev->base_addr + PA_I82596_RESET)); /* Hard Reset */
629 udelay(100); /* Wait 100us - seems to help */
631 /* change the scp address */
633 lp->last_cmd = jiffies;
636 lp->scp.sysbus = 0x0000006c;
637 lp->scp.iscp = WSWAPiscp(virt_to_dma(lp,&(lp->iscp)));
638 lp->iscp.scb = WSWAPscb(virt_to_dma(lp,&(lp->scb)));
639 lp->iscp.stat = ISCP_BUSY;
640 lp->cmd_backlog = 0;
642 lp->cmd_head = lp->scb.cmd = I596_NULL;
644 DEB(DEB_INIT,printk("%s: starting i82596.\n", dev->name));
646 CHECK_WBACK(&(lp->scp), sizeof(struct i596_scp));
647 CHECK_WBACK(&(lp->iscp), sizeof(struct i596_iscp));
649 MPU_PORT(dev, PORT_ALTSCP, (void *)virt_to_dma(lp,&lp->scp));
651 CA(dev);
653 if (wait_istat(dev,lp,1000,"initialization timed out"))
654 goto failed;
655 DEB(DEB_INIT,printk("%s: i82596 initialization successful\n", dev->name));
657 /* Ensure rx frame/buffer descriptors are tidy */
658 rebuild_rx_bufs(dev);
660 lp->scb.command = 0;
661 CHECK_WBACK(&(lp->scb), sizeof(struct i596_scb));
663 enable_irq(dev->irq); /* enable IRQs from LAN */
665 DEB(DEB_INIT,printk("%s: queuing CmdConfigure\n", dev->name));
666 memcpy(lp->cf_cmd.i596_config, init_setup, 14);
667 lp->cf_cmd.cmd.command = CmdConfigure;
668 CHECK_WBACK(&(lp->cf_cmd), sizeof(struct cf_cmd));
669 i596_add_cmd(dev, &lp->cf_cmd.cmd);
671 DEB(DEB_INIT,printk("%s: queuing CmdSASetup\n", dev->name));
672 memcpy(lp->sa_cmd.eth_addr, dev->dev_addr, 6);
673 lp->sa_cmd.cmd.command = CmdSASetup;
674 CHECK_WBACK(&(lp->sa_cmd), sizeof(struct sa_cmd));
675 i596_add_cmd(dev, &lp->sa_cmd.cmd);
677 DEB(DEB_INIT,printk("%s: queuing CmdTDR\n", dev->name));
678 lp->tdr_cmd.cmd.command = CmdTDR;
679 CHECK_WBACK(&(lp->tdr_cmd), sizeof(struct tdr_cmd));
680 i596_add_cmd(dev, &lp->tdr_cmd.cmd);
682 spin_lock_irqsave (&lp->lock, flags);
684 if (wait_cmd(dev,lp,1000,"timed out waiting to issue RX_START")) {
685 spin_unlock_irqrestore (&lp->lock, flags);
686 goto failed;
688 DEB(DEB_INIT,printk("%s: Issuing RX_START\n", dev->name));
689 lp->scb.command = RX_START;
690 lp->scb.rfd = WSWAPrfd(virt_to_dma(lp,lp->rfds));
691 CHECK_WBACK(&(lp->scb), sizeof(struct i596_scb));
693 CA(dev);
695 spin_unlock_irqrestore (&lp->lock, flags);
697 if (wait_cmd(dev,lp,1000,"RX_START not processed"))
698 goto failed;
699 DEB(DEB_INIT,printk("%s: Receive unit started OK\n", dev->name));
701 return 0;
703 failed:
704 printk("%s: Failed to initialise 82596\n", dev->name);
705 MPU_PORT(dev, PORT_RESET, 0);
706 return -1;
710 static inline int i596_rx(struct net_device *dev)
712 struct i596_private *lp = (struct i596_private *)dev->priv;
713 struct i596_rfd *rfd;
714 struct i596_rbd *rbd;
715 int frames = 0;
717 DEB(DEB_RXFRAME,printk ("i596_rx(), rfd_head %p, rbd_head %p\n",
718 lp->rfd_head, lp->rbd_head));
721 rfd = lp->rfd_head; /* Ref next frame to check */
723 CHECK_INV(rfd, sizeof(struct i596_rfd));
724 while ((rfd->stat) & STAT_C) { /* Loop while complete frames */
725 if (rfd->rbd == I596_NULL)
726 rbd = I596_NULL;
727 else if (rfd->rbd == lp->rbd_head->b_addr) {
728 rbd = lp->rbd_head;
729 CHECK_INV(rbd, sizeof(struct i596_rbd));
731 else {
732 printk("%s: rbd chain broken!\n", dev->name);
733 /* XXX Now what? */
734 rbd = I596_NULL;
736 DEB(DEB_RXFRAME, printk(" rfd %p, rfd.rbd %p, rfd.stat %04x\n",
737 rfd, rfd->rbd, rfd->stat));
739 if (rbd != I596_NULL && ((rfd->stat) & STAT_OK)) {
740 /* a good frame */
741 int pkt_len = rbd->count & 0x3fff;
742 struct sk_buff *skb = rbd->skb;
743 int rx_in_place = 0;
745 DEB(DEB_RXADDR,print_eth(rbd->v_data, "received"));
746 frames++;
748 /* Check if the packet is long enough to just accept
749 * without copying to a properly sized skbuff.
752 if (pkt_len > rx_copybreak) {
753 struct sk_buff *newskb;
754 dma_addr_t dma_addr;
756 pci_unmap_single(NULL,(dma_addr_t)WSWAPchar(rbd->b_data), PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
757 /* Get fresh skbuff to replace filled one. */
758 newskb = dev_alloc_skb(PKT_BUF_SZ + 4);
759 if (newskb == NULL) {
760 skb = NULL; /* drop pkt */
761 goto memory_squeeze;
763 skb_reserve(newskb, 2);
765 /* Pass up the skb already on the Rx ring. */
766 skb_put(skb, pkt_len);
767 rx_in_place = 1;
768 rbd->skb = newskb;
769 newskb->dev = dev;
770 dma_addr = pci_map_single(NULL, newskb->tail, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
771 rbd->v_data = newskb->tail;
772 rbd->b_data = WSWAPchar(dma_addr);
773 CHECK_WBACK_INV(rbd, sizeof(struct i596_rbd));
775 else
776 skb = dev_alloc_skb(pkt_len + 2);
777 memory_squeeze:
778 if (skb == NULL) {
779 /* XXX tulip.c can defer packets here!! */
780 printk ("%s: i596_rx Memory squeeze, dropping packet.\n", dev->name);
781 lp->stats.rx_dropped++;
783 else {
784 skb->dev = dev;
785 if (!rx_in_place) {
786 /* 16 byte align the data fields */
787 pci_dma_sync_single(NULL, (dma_addr_t)WSWAPchar(rbd->b_data), PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
788 skb_reserve(skb, 2);
789 memcpy(skb_put(skb,pkt_len), rbd->v_data, pkt_len);
791 skb->len = pkt_len;
792 skb->protocol=eth_type_trans(skb,dev);
793 netif_rx(skb);
794 lp->stats.rx_packets++;
795 lp->stats.rx_bytes+=pkt_len;
798 else {
799 DEB(DEB_ERRORS, printk("%s: Error, rfd.stat = 0x%04x\n",
800 dev->name, rfd->stat));
801 lp->stats.rx_errors++;
802 if ((rfd->stat) & 0x0001)
803 lp->stats.collisions++;
804 if ((rfd->stat) & 0x0080)
805 lp->stats.rx_length_errors++;
806 if ((rfd->stat) & 0x0100)
807 lp->stats.rx_over_errors++;
808 if ((rfd->stat) & 0x0200)
809 lp->stats.rx_fifo_errors++;
810 if ((rfd->stat) & 0x0400)
811 lp->stats.rx_frame_errors++;
812 if ((rfd->stat) & 0x0800)
813 lp->stats.rx_crc_errors++;
814 if ((rfd->stat) & 0x1000)
815 lp->stats.rx_length_errors++;
818 /* Clear the buffer descriptor count and EOF + F flags */
820 if (rbd != I596_NULL && (rbd->count & 0x4000)) {
821 rbd->count = 0;
822 lp->rbd_head = rbd->v_next;
823 CHECK_WBACK_INV(rbd, sizeof(struct i596_rbd));
826 /* Tidy the frame descriptor, marking it as end of list */
828 rfd->rbd = I596_NULL;
829 rfd->stat = 0;
830 rfd->cmd = CMD_EOL|CMD_FLEX;
831 rfd->count = 0;
833 /* Remove end-of-list from old end descriptor */
835 rfd->v_prev->cmd = CMD_FLEX;
837 /* Update record of next frame descriptor to process */
839 lp->scb.rfd = rfd->b_next;
840 lp->rfd_head = rfd->v_next;
841 CHECK_WBACK_INV(rfd->v_prev, sizeof(struct i596_rfd));
842 CHECK_WBACK_INV(rfd, sizeof(struct i596_rfd));
843 rfd = lp->rfd_head;
844 CHECK_INV(rfd, sizeof(struct i596_rfd));
847 DEB(DEB_RXFRAME,printk ("frames %d\n", frames));
849 return 0;
853 static inline void i596_cleanup_cmd(struct net_device *dev, struct i596_private *lp)
855 struct i596_cmd *ptr;
857 while (lp->cmd_head != I596_NULL) {
858 ptr = lp->cmd_head;
859 lp->cmd_head = ptr->v_next;
860 lp->cmd_backlog--;
862 switch ((ptr->command) & 0x7) {
863 case CmdTx:
865 struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
866 struct sk_buff *skb = tx_cmd->skb;
867 pci_unmap_single(NULL, tx_cmd->dma_addr, skb->len, PCI_DMA_TODEVICE);
869 dev_kfree_skb(skb);
871 lp->stats.tx_errors++;
872 lp->stats.tx_aborted_errors++;
874 ptr->v_next = ptr->b_next = I596_NULL;
875 tx_cmd->cmd.command = 0; /* Mark as free */
876 break;
878 default:
879 ptr->v_next = ptr->b_next = I596_NULL;
881 CHECK_WBACK_INV(ptr, sizeof(struct i596_cmd));
884 wait_cmd(dev,lp,100,"i596_cleanup_cmd timed out");
885 lp->scb.cmd = I596_NULL;
886 CHECK_WBACK(&(lp->scb), sizeof(struct i596_scb));
890 static inline void i596_reset(struct net_device *dev, struct i596_private *lp, int ioaddr)
892 unsigned long flags;
894 DEB(DEB_RESET,printk("i596_reset\n"));
896 spin_lock_irqsave (&lp->lock, flags);
898 wait_cmd(dev,lp,100,"i596_reset timed out");
900 netif_stop_queue(dev);
902 /* FIXME: this command might cause an lpmc */
903 lp->scb.command = CUC_ABORT | RX_ABORT;
904 CHECK_WBACK(&(lp->scb), sizeof(struct i596_scb));
905 CA(dev);
907 /* wait for shutdown */
908 wait_cmd(dev,lp,1000,"i596_reset 2 timed out");
909 spin_unlock_irqrestore (&lp->lock, flags);
911 i596_cleanup_cmd(dev,lp);
912 i596_rx(dev);
914 netif_start_queue(dev);
915 init_i596_mem(dev);
919 static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd)
921 struct i596_private *lp = (struct i596_private *) dev->priv;
922 int ioaddr = dev->base_addr;
923 unsigned long flags;
925 DEB(DEB_ADDCMD,printk("i596_add_cmd cmd_head %p\n", lp->cmd_head));
927 cmd->status = 0;
928 cmd->command |= (CMD_EOL | CMD_INTR);
929 cmd->v_next = cmd->b_next = I596_NULL;
930 CHECK_WBACK(cmd, sizeof(struct i596_cmd));
932 spin_lock_irqsave (&lp->lock, flags);
934 if (lp->cmd_head != I596_NULL) {
935 lp->cmd_tail->v_next = cmd;
936 lp->cmd_tail->b_next = WSWAPcmd(virt_to_dma(lp,&cmd->status));
937 CHECK_WBACK(lp->cmd_tail, sizeof(struct i596_cmd));
938 } else {
939 lp->cmd_head = cmd;
940 wait_cmd(dev,lp,100,"i596_add_cmd timed out");
941 lp->scb.cmd = WSWAPcmd(virt_to_dma(lp,&cmd->status));
942 lp->scb.command = CUC_START;
943 CHECK_WBACK(&(lp->scb), sizeof(struct i596_scb));
944 CA(dev);
946 lp->cmd_tail = cmd;
947 lp->cmd_backlog++;
949 spin_unlock_irqrestore (&lp->lock, flags);
951 if (lp->cmd_backlog > max_cmd_backlog) {
952 unsigned long tickssofar = jiffies - lp->last_cmd;
954 if (tickssofar < ticks_limit)
955 return;
957 printk("%s: command unit timed out, status resetting.\n", dev->name);
958 #if 1
959 i596_reset(dev, lp, ioaddr);
960 #endif
964 #if 0
965 /* this function makes a perfectly adequate probe... but we have a
966 device list */
967 static int i596_test(struct net_device *dev)
969 struct i596_private *lp = (struct i596_private *) dev->priv;
970 volatile int *tint;
971 u32 data;
973 tint = (volatile int *)(&(lp->scp));
974 data = virt_to_dma(lp,tint);
976 tint[1] = -1;
977 CHECK_WBACK(tint,PAGE_SIZE);
979 MPU_PORT(dev, 1, data);
981 for(data = 1000000; data; data--) {
982 CHECK_INV(tint,PAGE_SIZE);
983 if(tint[1] != -1)
984 break;
988 printk("i596_test result %d\n", tint[1]);
991 #endif
994 static int i596_open(struct net_device *dev)
996 int res = 0;
998 DEB(DEB_OPEN,printk("%s: i596_open() irq %d.\n", dev->name, dev->irq));
1000 if (request_irq(dev->irq, &i596_interrupt, 0, "i82596", dev)) {
1001 printk("%s: IRQ %d not free\n", dev->name, dev->irq);
1002 return -EAGAIN;
1005 request_region(dev->base_addr, 12, dev->name);
1007 init_rx_bufs(dev);
1009 netif_start_queue(dev);
1011 MOD_INC_USE_COUNT;
1013 /* Initialize the 82596 memory */
1014 if (init_i596_mem(dev)) {
1015 res = -EAGAIN;
1016 free_irq(dev->irq, dev);
1019 return res;
1022 static void i596_tx_timeout (struct net_device *dev)
1024 struct i596_private *lp = (struct i596_private *) dev->priv;
1025 int ioaddr = dev->base_addr;
1027 /* Transmitter timeout, serious problems. */
1028 DEB(DEB_ERRORS,printk("%s: transmit timed out, status resetting.\n",
1029 dev->name));
1031 lp->stats.tx_errors++;
1033 /* Try to restart the adaptor */
1034 if (lp->last_restart == lp->stats.tx_packets) {
1035 DEB(DEB_ERRORS,printk ("Resetting board.\n"));
1036 /* Shutdown and restart */
1037 i596_reset (dev, lp, ioaddr);
1038 } else {
1039 /* Issue a channel attention signal */
1040 DEB(DEB_ERRORS,printk ("Kicking board.\n"));
1041 lp->scb.command = CUC_START | RX_START;
1042 CHECK_WBACK_INV(&(lp->scb), sizeof(struct i596_scb));
1043 CA (dev);
1044 lp->last_restart = lp->stats.tx_packets;
1047 dev->trans_start = jiffies;
1048 netif_wake_queue (dev);
1052 static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
1054 struct i596_private *lp = (struct i596_private *) dev->priv;
1055 struct tx_cmd *tx_cmd;
1056 struct i596_tbd *tbd;
1057 short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
1058 dev->trans_start = jiffies;
1060 DEB(DEB_STARTTX,printk("%s: i596_start_xmit(%x,%x) called\n", dev->name,
1061 skb->len, (unsigned int)skb->data));
1063 netif_stop_queue(dev);
1065 tx_cmd = lp->tx_cmds + lp->next_tx_cmd;
1066 tbd = lp->tbds + lp->next_tx_cmd;
1068 if (tx_cmd->cmd.command) {
1069 DEB(DEB_ERRORS,printk ("%s: xmit ring full, dropping packet.\n",
1070 dev->name));
1071 lp->stats.tx_dropped++;
1073 dev_kfree_skb(skb);
1074 } else {
1075 if (++lp->next_tx_cmd == TX_RING_SIZE)
1076 lp->next_tx_cmd = 0;
1077 tx_cmd->tbd = WSWAPtbd(virt_to_dma(lp,tbd));
1078 tbd->next = I596_NULL;
1080 tx_cmd->cmd.command = CMD_FLEX | CmdTx;
1081 tx_cmd->skb = skb;
1083 tx_cmd->pad = 0;
1084 tx_cmd->size = 0;
1085 tbd->pad = 0;
1086 tbd->size = EOF | length;
1088 tx_cmd->dma_addr = pci_map_single(NULL, skb->data, skb->len,
1089 PCI_DMA_TODEVICE);
1090 tbd->data = WSWAPchar(tx_cmd->dma_addr);
1092 DEB(DEB_TXADDR,print_eth(skb->data, "tx-queued"));
1093 CHECK_WBACK_INV(tx_cmd, sizeof(struct tx_cmd));
1094 CHECK_WBACK_INV(tbd, sizeof(struct i596_tbd));
1095 i596_add_cmd(dev, &tx_cmd->cmd);
1097 lp->stats.tx_packets++;
1098 lp->stats.tx_bytes += length;
1101 netif_start_queue(dev);
1103 return 0;
1106 static void print_eth(unsigned char *add, char *str)
1108 int i;
1110 printk("i596 0x%p, ", add);
1111 for (i = 0; i < 6; i++)
1112 printk(" %02X", add[i + 6]);
1113 printk(" -->");
1114 for (i = 0; i < 6; i++)
1115 printk(" %02X", add[i]);
1116 printk(" %02X%02X, %s\n", add[12], add[13], str);
1120 #define LAN_PROM_ADDR 0xF0810000
1122 static int __init i82596_probe(struct net_device *dev, int options)
1124 int i;
1125 struct i596_private *lp;
1126 char eth_addr[6];
1127 dma_addr_t dma_addr;
1129 /* This lot is ensure things have been cache line aligned. */
1130 if (sizeof(struct i596_rfd) != 32) {
1131 printk("82596: sizeof(struct i596_rfd) = %d\n",
1132 sizeof(struct i596_rfd));
1133 return -ENODEV;
1135 if (sizeof(struct i596_rbd) != 32) {
1136 printk("82596: sizeof(struct i596_rbd) = %d\n",
1137 sizeof(struct i596_rbd));
1138 return -ENODEV;
1140 if (sizeof(struct tx_cmd) != 32) {
1141 printk("82596: sizeof(struct tx_cmd) = %d\n",
1142 sizeof(struct tx_cmd));
1143 return -ENODEV;
1145 if (sizeof(struct i596_tbd) != 32) {
1146 printk("82596: sizeof(struct i596_tbd) = %d\n",
1147 sizeof(struct i596_tbd));
1148 return -ENODEV;
1150 if (sizeof(struct i596_private) > 4096) {
1151 printk("82596: sizeof(struct i596_private) = %d\n",
1152 sizeof(struct i596_private));
1153 return -ENODEV;
1156 /* FIXME:
1157 Currently this works only, if set-up from lasi.c.
1158 This should be changed to use probing too !
1161 if (!dev->base_addr || !dev->irq)
1162 return -ENODEV;
1164 if (!pdc_lan_station_id( (char*)&eth_addr, (void*)dev->base_addr)) {
1165 for(i=0;i<6;i++)
1166 eth_addr[i] = gsc_readb(LAN_PROM_ADDR+i);
1167 printk("82596.c: MAC of HP700 LAN blindely read from the prom!\n");
1170 dev->mem_start = (int)pci_alloc_consistent( NULL,
1171 sizeof(struct i596_private), &dma_addr);
1172 if (!dev->mem_start) {
1173 printk("%s: Couldn't get consistent shared memory\n", dev->name);
1174 dma_consistent = 0;
1175 dev->mem_start = (int)__get_free_pages(GFP_ATOMIC, 0);
1176 if (!dev->mem_start) {
1177 printk("%s: Couldn't get shared memory\n", dev->name);
1178 #ifdef ENABLE_APRICOT
1179 release_region(dev->base_addr, I596_TOTAL_SIZE);
1180 #endif
1181 return -ENOMEM;
1183 dma_addr = virt_to_bus(dev->mem_start);
1186 ether_setup(dev);
1187 DEB(DEB_PROBE,printk("%s: 82596 at %#3lx,", dev->name, dev->base_addr));
1189 for (i = 0; i < 6; i++)
1190 DEB(DEB_PROBE,printk(" %2.2X", dev->dev_addr[i] = eth_addr[i]));
1192 DEB(DEB_PROBE,printk(" IRQ %d.\n", dev->irq));
1194 DEB(DEB_PROBE,printk(version));
1196 /* The 82596-specific entries in the device structure. */
1197 dev->open = i596_open;
1198 dev->stop = i596_close;
1199 dev->hard_start_xmit = i596_start_xmit;
1200 dev->get_stats = i596_get_stats;
1201 dev->set_multicast_list = set_multicast_list;
1202 dev->tx_timeout = i596_tx_timeout;
1203 dev->watchdog_timeo = TX_TIMEOUT;
1205 dev->priv = (void *)(dev->mem_start);
1207 lp = (struct i596_private *) dev->priv;
1208 DEB(DEB_INIT,printk ("%s: lp at 0x%08lx (%d bytes), lp->scb at 0x%08lx\n",
1209 dev->name, (unsigned long)lp,
1210 sizeof(struct i596_private), (unsigned long)&lp->scb));
1211 memset((void *) lp, 0, sizeof(struct i596_private));
1213 #if 0
1214 kernel_set_cachemode((void *)(dev->mem_start), 4096, IOMAP_NOCACHE_SER);
1215 #endif
1216 lp->options = options;
1217 lp->scb.command = 0;
1218 lp->scb.cmd = I596_NULL;
1219 lp->scb.rfd = I596_NULL;
1220 lp->lock = SPIN_LOCK_UNLOCKED;
1221 lp->dma_addr = dma_addr;
1223 CHECK_WBACK_INV(dev->mem_start, sizeof(struct i596_private));
1225 return 0;
1229 int __init lasi_i82596_probe(struct net_device *dev)
1231 return i82596_probe(dev, 0);
1235 int __init asp_i82596_probe(struct net_device *dev)
1237 return i82596_probe(dev, OPT_SWAP_PORT);
1241 static void i596_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1243 struct net_device *dev = dev_id;
1244 struct i596_private *lp;
1245 unsigned short status, ack_cmd = 0;
1247 if (dev == NULL) {
1248 printk("i596_interrupt(): irq %d for unknown device.\n", irq);
1249 return;
1252 lp = (struct i596_private *) dev->priv;
1254 spin_lock (&lp->lock);
1256 wait_cmd(dev,lp,100,"i596 interrupt, timeout");
1257 status = lp->scb.status;
1259 DEB(DEB_INTS,printk("%s: i596 interrupt, IRQ %d, status %4.4x.\n",
1260 dev->name, irq, status));
1262 ack_cmd = status & 0xf000;
1264 if (!ack_cmd) {
1265 DEB(DEB_ERRORS, printk("%s: interrupt with no events\n", dev->name));
1266 spin_unlock (&lp->lock);
1267 return;
1270 if ((status & 0x8000) || (status & 0x2000)) {
1271 struct i596_cmd *ptr;
1273 if ((status & 0x8000))
1274 DEB(DEB_INTS,printk("%s: i596 interrupt completed command.\n", dev->name));
1275 if ((status & 0x2000))
1276 DEB(DEB_INTS,printk("%s: i596 interrupt command unit inactive %x.\n", dev->name, status & 0x0700));
1278 while (lp->cmd_head != I596_NULL) {
1279 CHECK_INV(lp->cmd_head, sizeof(struct i596_cmd));
1280 if (!(lp->cmd_head->status & STAT_C))
1281 break;
1283 ptr = lp->cmd_head;
1285 DEB(DEB_STATUS,printk("cmd_head->status = %04x, ->command = %04x\n",
1286 lp->cmd_head->status, lp->cmd_head->command));
1287 lp->cmd_head = ptr->v_next;
1288 lp->cmd_backlog--;
1290 switch ((ptr->command) & 0x7) {
1291 case CmdTx:
1293 struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
1294 struct sk_buff *skb = tx_cmd->skb;
1296 if ((ptr->status) & STAT_OK) {
1297 DEB(DEB_TXADDR,print_eth(skb->data, "tx-done"));
1298 } else {
1299 lp->stats.tx_errors++;
1300 if ((ptr->status) & 0x0020)
1301 lp->stats.collisions++;
1302 if (!((ptr->status) & 0x0040))
1303 lp->stats.tx_heartbeat_errors++;
1304 if ((ptr->status) & 0x0400)
1305 lp->stats.tx_carrier_errors++;
1306 if ((ptr->status) & 0x0800)
1307 lp->stats.collisions++;
1308 if ((ptr->status) & 0x1000)
1309 lp->stats.tx_aborted_errors++;
1311 pci_unmap_single(NULL, tx_cmd->dma_addr, skb->len, PCI_DMA_TODEVICE);
1312 dev_kfree_skb_irq(skb);
1314 tx_cmd->cmd.command = 0; /* Mark free */
1315 break;
1317 case CmdTDR:
1319 unsigned short status = ((struct tdr_cmd *)ptr)->status;
1321 if (status & 0x8000) {
1322 DEB(DEB_ANY,printk("%s: link ok.\n", dev->name));
1323 } else {
1324 if (status & 0x4000)
1325 printk("%s: Transceiver problem.\n", dev->name);
1326 if (status & 0x2000)
1327 printk("%s: Termination problem.\n", dev->name);
1328 if (status & 0x1000)
1329 printk("%s: Short circuit.\n", dev->name);
1331 DEB(DEB_TDR,printk("%s: Time %d.\n", dev->name, status & 0x07ff));
1333 break;
1335 case CmdConfigure:
1336 /* Zap command so set_multicast_list() knows it is free */
1337 ptr->command = 0;
1338 break;
1340 ptr->v_next = ptr->b_next = I596_NULL;
1341 CHECK_WBACK(ptr, sizeof(struct i596_cmd));
1342 lp->last_cmd = jiffies;
1345 /* This mess is arranging that only the last of any outstanding
1346 * commands has the interrupt bit set. Should probably really
1347 * only add to the cmd queue when the CU is stopped.
1349 ptr = lp->cmd_head;
1350 while ((ptr != I596_NULL) && (ptr != lp->cmd_tail)) {
1351 struct i596_cmd *prev = ptr;
1353 ptr->command &= 0x1fff;
1354 ptr = ptr->v_next;
1355 CHECK_WBACK_INV(prev, sizeof(struct i596_cmd));
1358 if ((lp->cmd_head != I596_NULL))
1359 ack_cmd |= CUC_START;
1360 lp->scb.cmd = WSWAPcmd(virt_to_dma(lp,&lp->cmd_head->status));
1361 CHECK_WBACK_INV(&lp->scb, sizeof(struct i596_scb));
1363 if ((status & 0x1000) || (status & 0x4000)) {
1364 if ((status & 0x4000))
1365 DEB(DEB_INTS,printk("%s: i596 interrupt received a frame.\n", dev->name));
1366 i596_rx(dev);
1367 /* Only RX_START if stopped - RGH 07-07-96 */
1368 if (status & 0x1000) {
1369 if (netif_running(dev)) {
1370 DEB(DEB_ERRORS,printk("%s: i596 interrupt receive unit inactive, status 0x%x\n", dev->name, status));
1371 ack_cmd |= RX_START;
1372 lp->stats.rx_errors++;
1373 lp->stats.rx_fifo_errors++;
1374 rebuild_rx_bufs(dev);
1378 wait_cmd(dev,lp,100,"i596 interrupt, timeout");
1379 lp->scb.command = ack_cmd;
1380 CHECK_WBACK(&lp->scb, sizeof(struct i596_scb));
1382 /* DANGER: I suspect that some kind of interrupt
1383 acknowledgement aside from acking the 82596 might be needed
1384 here... but it's running acceptably without */
1386 CA(dev);
1388 wait_cmd(dev,lp,100,"i596 interrupt, exit timeout");
1389 DEB(DEB_INTS,printk("%s: exiting interrupt.\n", dev->name));
1391 spin_unlock (&lp->lock);
1392 return;
1395 static int i596_close(struct net_device *dev)
1397 struct i596_private *lp = (struct i596_private *) dev->priv;
1398 unsigned long flags;
1400 netif_stop_queue(dev);
1402 DEB(DEB_INIT,printk("%s: Shutting down ethercard, status was %4.4x.\n",
1403 dev->name, lp->scb.status));
1405 save_flags(flags);
1406 cli();
1408 wait_cmd(dev,lp,100,"close1 timed out");
1409 lp->scb.command = CUC_ABORT | RX_ABORT;
1410 CHECK_WBACK(&lp->scb, sizeof(struct i596_scb));
1412 CA(dev);
1414 wait_cmd(dev,lp,100,"close2 timed out");
1415 restore_flags(flags);
1416 DEB(DEB_STRUCT,i596_display_data(dev));
1417 i596_cleanup_cmd(dev,lp);
1419 disable_irq(dev->irq);
1421 free_irq(dev->irq, dev);
1422 remove_rx_bufs(dev);
1424 release_region(dev->base_addr, 12);
1426 MOD_DEC_USE_COUNT;
1428 return 0;
1431 static struct net_device_stats *
1432 i596_get_stats(struct net_device *dev)
1434 struct i596_private *lp = (struct i596_private *) dev->priv;
1436 return &lp->stats;
1440 * Set or clear the multicast filter for this adaptor.
1443 static void set_multicast_list(struct net_device *dev)
1445 struct i596_private *lp = (struct i596_private *) dev->priv;
1446 int config = 0, cnt;
1448 DEB(DEB_MULTI,printk("%s: set multicast list, %d entries, promisc %s, allmulti %s\n", dev->name, dev->mc_count, dev->flags & IFF_PROMISC ? "ON" : "OFF", dev->flags & IFF_ALLMULTI ? "ON" : "OFF"));
1450 if ((dev->flags & IFF_PROMISC) && !(lp->cf_cmd.i596_config[8] & 0x01)) {
1451 lp->cf_cmd.i596_config[8] |= 0x01;
1452 config = 1;
1454 if (!(dev->flags & IFF_PROMISC) && (lp->cf_cmd.i596_config[8] & 0x01)) {
1455 lp->cf_cmd.i596_config[8] &= ~0x01;
1456 config = 1;
1458 if ((dev->flags & IFF_ALLMULTI) && (lp->cf_cmd.i596_config[11] & 0x20)) {
1459 lp->cf_cmd.i596_config[11] &= ~0x20;
1460 config = 1;
1462 if (!(dev->flags & IFF_ALLMULTI) && !(lp->cf_cmd.i596_config[11] & 0x20)) {
1463 lp->cf_cmd.i596_config[11] |= 0x20;
1464 config = 1;
1466 if (config) {
1467 if (lp->cf_cmd.cmd.command)
1468 printk("%s: config change request already queued\n",
1469 dev->name);
1470 else {
1471 lp->cf_cmd.cmd.command = CmdConfigure;
1472 CHECK_WBACK_INV(&lp->cf_cmd, sizeof(struct cf_cmd));
1473 i596_add_cmd(dev, &lp->cf_cmd.cmd);
1477 cnt = dev->mc_count;
1478 if (cnt > MAX_MC_CNT)
1480 cnt = MAX_MC_CNT;
1481 printk("%s: Only %d multicast addresses supported",
1482 dev->name, cnt);
1485 if (dev->mc_count > 0) {
1486 struct dev_mc_list *dmi;
1487 unsigned char *cp;
1488 struct mc_cmd *cmd;
1490 cmd = &lp->mc_cmd;
1491 cmd->cmd.command = CmdMulticastList;
1492 cmd->mc_cnt = dev->mc_count * 6;
1493 cp = cmd->mc_addrs;
1494 for (dmi = dev->mc_list; cnt && dmi != NULL; dmi = dmi->next, cnt--, cp += 6) {
1495 memcpy(cp, dmi->dmi_addr, 6);
1496 if (i596_debug > 1)
1497 DEB(DEB_MULTI,printk("%s: Adding address %02x:%02x:%02x:%02x:%02x:%02x\n",
1498 dev->name, cp[0],cp[1],cp[2],cp[3],cp[4],cp[5]));
1500 CHECK_WBACK_INV(&lp->mc_cmd, sizeof(struct mc_cmd));
1501 i596_add_cmd(dev, &cmd->cmd);
1505 #ifdef HAVE_DEVLIST
1506 static unsigned int i596_portlist[] __initdata =
1507 {0x300, 0};
1508 struct netdev_entry i596_drv =
1509 {"lasi_i82596", lasi_i82596_probe, I596_TOTAL_SIZE, i596_portlist};
1510 #endif
1512 #ifdef MODULE
1513 static char devicename[9] =
1514 {0,};
1515 static struct net_device dev_82596 =
1517 devicename, /* device name inserted by drivers/net/net_init.c */
1518 0, 0, 0, 0,
1519 0, 0, /* base, irq */
1520 0, 0, 0, NULL, lasi_i82596_probe};
1523 MODULE_PARM(debug, "i");
1524 static int debug = -1;
1526 int init_module(void)
1528 if (debug >= 0)
1529 i596_debug = debug;
1530 if (register_netdev(&dev_82596) != 0)
1531 return -EIO;
1532 return 0;
1535 void cleanup_module(void)
1537 unregister_netdev(&dev_82596);
1538 lp = (struct i596_private *) dev_82596.priv;
1540 if (dma_consistent)
1541 pci_free_consistent( NULL, sizeof( struct i596_private),
1542 dev_82596.mem_start, lp->dma_addr);
1543 else
1544 free_page ((u32)(dev_82596.mem_start));
1546 dev_82596.priv = NULL;
1549 #endif /* MODULE */