1 /* lasi_82596.c -- driver for the intel 82596 ethernet controller, as
2 munged into HPPA boxen .
4 This driver is based upon 82596.c, original credits are below...
5 but there were too many hoops which HP wants jumped through to
6 keep this code in there in a sane manner.
8 3 primary sources of the mess --
9 1) hppa needs *lots* of cacheline flushing to keep this kind of
12 2) The 82596 needs to see all of its pointers as their physical
13 address. Thus virt_to_bus/bus_to_virt are *everywhere*.
15 3) The implementation HP is using seems to be significantly pickier
16 about when and how the command and RX units are started. some
17 command ordering was changed.
19 Examination of the mach driver leads one to believe that there
20 might be a saner way to pull this off... anyone who feels like a
21 full rewrite can be my guest.
23 Split 02/13/2000 Sam Creasey (sammy@oh.verio.com)
25 02/01/2000 Initial modifications for parisc by Helge Deller (deller@gmx.de)
26 03/02/2000 changes for better/correct(?) cache-flushing (deller)
29 /* 82596.c: A generic 82596 ethernet driver for linux. */
32 Written 1994 by Mark Evans.
33 This driver is for the Apricot 82596 bus-master interface
35 Modularised 12/94 Mark Evans
38 Modified to support the 82596 ethernet chips on 680x0 VME boards.
39 by Richard Hirst <richard@sleepie.demon.co.uk>
42 980825: Changed to receive directly in to sk_buffs which are
43 allocated at open() time. Eliminates copy on incoming frames
44 (small ones are still copied). Shared data now held in a
45 non-cached page, so we can run on 68060 in copyback mode.
48 * look at deferring rx frames rather than discarding (as per tulip)
49 * handle tx ring full as per tulip
50 * performace test to tune rx_copybreak
52 Most of my modifications relate to the braindead big-endian
53 implementation by Intel. When the i596 is operating in
54 'big-endian' mode, it thinks a 32 bit value of 0x12345678
55 should be stored as 0x56781234. This is a real pain, when
56 you have linked lists which are shared by the 680x0 and the
60 Written 1993 by Donald Becker.
61 Copyright 1993 United States Government as represented by the Director,
62 National Security Agency. This software may only be used and distributed
63 according to the terms of the GNU General Public License as modified by SRC,
64 incorporated herein by reference.
66 The author may be reached as becker@scyld.com, or C/O
67 Scyld Computing Corporation, 410 Severn Ave., Suite 210, Annapolis MD 21403
71 #include <linux/module.h>
72 #include <linux/kernel.h>
73 #include <linux/string.h>
74 #include <linux/ptrace.h>
75 #include <linux/errno.h>
76 #include <linux/ioport.h>
77 #include <linux/slab.h>
78 #include <linux/interrupt.h>
79 #include <linux/delay.h>
80 #include <linux/netdevice.h>
81 #include <linux/etherdevice.h>
82 #include <linux/skbuff.h>
83 #include <linux/init.h>
84 #include <linux/pci.h>
85 #include <linux/types.h>
87 #include <asm/bitops.h>
89 #include <asm/pgtable.h>
90 #include <asm/pgalloc.h>
93 #include <asm/cache.h>
94 #include <asm/parisc-device.h>
96 static char version
[] __devinitdata
=
97 "82596.c $Revision: 1.29 $\n";
102 #define DEB_INIT 0x0001
103 #define DEB_PROBE 0x0002
104 #define DEB_SERIOUS 0x0004
105 #define DEB_ERRORS 0x0008
106 #define DEB_MULTI 0x0010
107 #define DEB_TDR 0x0020
108 #define DEB_OPEN 0x0040
109 #define DEB_RESET 0x0080
110 #define DEB_ADDCMD 0x0100
111 #define DEB_STATUS 0x0200
112 #define DEB_STARTTX 0x0400
113 #define DEB_RXADDR 0x0800
114 #define DEB_TXADDR 0x1000
115 #define DEB_RXFRAME 0x2000
116 #define DEB_INTS 0x4000
117 #define DEB_STRUCT 0x8000
118 #define DEB_ANY 0xffff
121 #define DEB(x,y) if (i596_debug & (x)) { y; }
124 #define CHECK_WBACK(addr,len) \
125 do { dma_cache_sync((void *)addr, len, DMA_TO_DEVICE); } while (0)
127 #define CHECK_INV(addr,len) \
128 do { dma_cache_sync((void *)addr,len, DMA_FROM_DEVICE); } while(0)
130 #define CHECK_WBACK_INV(addr,len) \
131 do { dma_cache_sync((void *)addr,len, DMA_BIDIRECTIONAL); } while (0)
134 #define PA_I82596_RESET 0 /* Offsets relative to LASI-LAN-Addr.*/
135 #define PA_CPU_PORT_L_ACCESS 4
136 #define PA_CHANNEL_ATTENTION 8
140 * Define various macros for Channel Attention, word swapping etc., dependent
141 * on architecture. MVME and BVME are 680x0 based, otherwise it is Intel.
145 #define WSWAPrfd(x) (((u32)(x)<<16) | ((((u32)(x)))>>16))
146 #define WSWAPrbd(x) (((u32)(x)<<16) | ((((u32)(x)))>>16))
147 #define WSWAPiscp(x) (((u32)(x)<<16) | ((((u32)(x)))>>16))
148 #define WSWAPscb(x) (((u32)(x)<<16) | ((((u32)(x)))>>16))
149 #define WSWAPcmd(x) (((u32)(x)<<16) | ((((u32)(x)))>>16))
150 #define WSWAPtbd(x) (((u32)(x)<<16) | ((((u32)(x)))>>16))
151 #define WSWAPchar(x) (((u32)(x)<<16) | ((((u32)(x)))>>16))
152 #define ISCP_BUSY 0x00010000
153 #define MACH_IS_APRICOT 0
155 #define WSWAPrfd(x) ((struct i596_rfd *)(x))
156 #define WSWAPrbd(x) ((struct i596_rbd *)(x))
157 #define WSWAPiscp(x) ((struct i596_iscp *)(x))
158 #define WSWAPscb(x) ((struct i596_scb *)(x))
159 #define WSWAPcmd(x) ((struct i596_cmd *)(x))
160 #define WSWAPtbd(x) ((struct i596_tbd *)(x))
161 #define WSWAPchar(x) ((char *)(x))
162 #define ISCP_BUSY 0x0001
163 #define MACH_IS_APRICOT 1
167 * The MPU_PORT command allows direct access to the 82596. With PORT access
168 * the following commands are available (p5-18). The 32-bit port command
169 * must be word-swapped with the most significant word written first.
170 * This only applies to VME boards.
172 #define PORT_RESET 0x00 /* reset 82596 */
173 #define PORT_SELFTEST 0x01 /* selftest */
174 #define PORT_ALTSCP 0x02 /* alternate SCB address */
175 #define PORT_ALTDUMP 0x03 /* Alternate DUMP address */
177 static int i596_debug
= (DEB_SERIOUS
|DEB_PROBE
);
179 MODULE_AUTHOR("Richard Hirst");
180 MODULE_DESCRIPTION("i82596 driver");
181 MODULE_LICENSE("GPL");
182 MODULE_PARM(i596_debug
, "i");
183 MODULE_PARM_DESC(i596_debug
, "lasi_82596 debug mask");
185 /* Copy frames shorter than rx_copybreak, otherwise pass on up in
186 * a full sized sk_buff. Value of 100 stolen from tulip.c (!alpha).
188 static int rx_copybreak
= 100;
190 #define MAX_DRIVERS 4 /* max count of drivers */
192 #define PKT_BUF_SZ 1536
193 #define MAX_MC_CNT 64
195 #define I596_NULL ((u32)0xffffffff)
197 #define CMD_EOL 0x8000 /* The last command of the list, stop. */
198 #define CMD_SUSP 0x4000 /* Suspend after doing cmd. */
199 #define CMD_INTR 0x2000 /* Interrupt after doing cmd. */
201 #define CMD_FLEX 0x0008 /* Enable flexible memory model */
204 CmdNOp
= 0, CmdSASetup
= 1, CmdConfigure
= 2, CmdMulticastList
= 3,
205 CmdTx
= 4, CmdTDR
= 5, CmdDump
= 6, CmdDiagnose
= 7
208 #define STAT_C 0x8000 /* Set to 0 after execution */
209 #define STAT_B 0x4000 /* Command being executed */
210 #define STAT_OK 0x2000 /* Command executed ok */
211 #define STAT_A 0x1000 /* Command aborted */
213 #define CUC_START 0x0100
214 #define CUC_RESUME 0x0200
215 #define CUC_SUSPEND 0x0300
216 #define CUC_ABORT 0x0400
217 #define RX_START 0x0010
218 #define RX_RESUME 0x0020
219 #define RX_SUSPEND 0x0030
220 #define RX_ABORT 0x0040
224 #define OPT_SWAP_PORT 0x0001 /* Need to wordswp on the MPU port */
228 unsigned short porthi
;
229 unsigned short portlo
;
234 #define SIZE_MASK 0x3fff
241 u32 cache_pad
[5]; /* Total 32 bytes... */
244 /* The command structure has two 'next' pointers; v_next is the address of
245 * the next command as seen by the CPU, b_next is the address of the next
246 * command as seen by the 82596. The b_next pointer, as used by the 82596
247 * always references the status field of the next command, rather than the
248 * v_next field, because the 82596 is unaware of v_next. It may seem more
249 * logical to put v_next at the end of the structure, but we cannot do that
250 * because the 82596 expects other fields to be there, depending on command
255 struct i596_cmd
*v_next
; /* Address from CPUs viewpoint */
256 unsigned short status
;
257 unsigned short command
;
258 dma_addr_t b_next
; /* Address from i596 viewpoint */
266 struct sk_buff
*skb
; /* So we can free it after tx */
269 u32 cache_pad
[6]; /* Total 64 bytes... */
271 u32 cache_pad
[1]; /* Total 32 bytes... */
277 unsigned short status
;
284 char mc_addrs
[MAX_MC_CNT
*6];
294 char i596_config
[16];
300 dma_addr_t b_next
; /* Address from i596 viewpoint */
302 unsigned short count
;
304 struct i596_rfd
*v_next
; /* Address from CPUs viewpoint */
305 struct i596_rfd
*v_prev
;
307 u32 cache_pad
[2]; /* Total 32 bytes... */
313 unsigned short count
;
314 unsigned short zero1
;
316 dma_addr_t b_data
; /* Address from i596 viewpoint */
318 unsigned short zero2
;
321 struct i596_rbd
*v_next
;
322 dma_addr_t b_addr
; /* This rbd addr from i596 view */
323 unsigned char *v_data
; /* Address from CPUs viewpoint */
324 /* Total 32 bytes... */
330 /* These values as chosen so struct i596_private fits in one page... */
332 #define TX_RING_SIZE 32
333 #define RX_RING_SIZE 16
336 unsigned short status
;
337 unsigned short command
;
347 unsigned short t_off
;
361 struct i596_private
{
362 volatile struct i596_scp scp
__attribute__((aligned(32)));
363 volatile struct i596_iscp iscp
__attribute__((aligned(32)));
364 volatile struct i596_scb scb
__attribute__((aligned(32)));
365 struct sa_cmd sa_cmd
__attribute__((aligned(32)));
366 struct cf_cmd cf_cmd
__attribute__((aligned(32)));
367 struct tdr_cmd tdr_cmd
__attribute__((aligned(32)));
368 struct mc_cmd mc_cmd
__attribute__((aligned(32)));
369 struct i596_rfd rfds
[RX_RING_SIZE
] __attribute__((aligned(32)));
370 struct i596_rbd rbds
[RX_RING_SIZE
] __attribute__((aligned(32)));
371 struct tx_cmd tx_cmds
[TX_RING_SIZE
] __attribute__((aligned(32)));
372 struct i596_tbd tbds
[TX_RING_SIZE
] __attribute__((aligned(32)));
375 struct i596_rfd
*rfd_head
;
376 struct i596_rbd
*rbd_head
;
377 struct i596_cmd
*cmd_tail
;
378 struct i596_cmd
*cmd_head
;
381 struct net_device_stats stats
;
389 static char init_setup
[] =
391 0x8E, /* length, prefetch on */
392 0xC8, /* fifo to 8, monitor off */
393 0x80, /* don't save bad frames */
394 0x2E, /* No source address insertion, 8 byte preamble */
395 0x00, /* priority and backoff defaults */
396 0x60, /* interframe spacing */
397 0x00, /* slot time LSB */
398 0xf2, /* slot time and retries */
399 0x00, /* promiscuous mode */
400 0x00, /* collision detect */
401 0x40, /* minimum frame length */
404 0x7f /* *multi IA */ };
406 static int i596_open(struct net_device
*dev
);
407 static int i596_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
);
408 static irqreturn_t
i596_interrupt(int irq
, void *dev_id
, struct pt_regs
*regs
);
409 static int i596_close(struct net_device
*dev
);
410 static struct net_device_stats
*i596_get_stats(struct net_device
*dev
);
411 static void i596_add_cmd(struct net_device
*dev
, struct i596_cmd
*cmd
);
412 static void i596_tx_timeout (struct net_device
*dev
);
413 static void print_eth(unsigned char *buf
, char *str
);
414 static void set_multicast_list(struct net_device
*dev
);
416 static int rx_ring_size
= RX_RING_SIZE
;
417 static int ticks_limit
= 100;
418 static int max_cmd_backlog
= TX_RING_SIZE
-1;
421 static inline void CA(struct net_device
*dev
)
423 gsc_writel(0, dev
->base_addr
+ PA_CHANNEL_ATTENTION
);
427 static inline void MPU_PORT(struct net_device
*dev
, int c
, dma_addr_t x
)
429 struct i596_private
*lp
= (struct i596_private
*) dev
->priv
;
431 u32 v
= (u32
) (c
) | (u32
) (x
);
434 if (lp
->options
& OPT_SWAP_PORT
) {
442 gsc_writel(a
, dev
->base_addr
+ PA_CPU_PORT_L_ACCESS
);
444 gsc_writel(b
, dev
->base_addr
+ PA_CPU_PORT_L_ACCESS
);
448 static inline int wait_istat(struct net_device
*dev
, struct i596_private
*lp
, int delcnt
, char *str
)
450 CHECK_INV(&(lp
->iscp
), sizeof(struct i596_iscp
));
451 while (--delcnt
&& lp
->iscp
.stat
) {
453 CHECK_INV(&(lp
->iscp
), sizeof(struct i596_iscp
));
456 printk("%s: %s, iscp.stat %04x, didn't clear\n",
457 dev
->name
, str
, lp
->iscp
.stat
);
465 static inline int wait_cmd(struct net_device
*dev
, struct i596_private
*lp
, int delcnt
, char *str
)
467 CHECK_INV(&(lp
->scb
), sizeof(struct i596_scb
));
468 while (--delcnt
&& lp
->scb
.command
) {
470 CHECK_INV(&(lp
->scb
), sizeof(struct i596_scb
));
473 printk("%s: %s, status %4.4x, cmd %4.4x.\n",
474 dev
->name
, str
, lp
->scb
.status
, lp
->scb
.command
);
482 static void i596_display_data(struct net_device
*dev
)
484 struct i596_private
*lp
= (struct i596_private
*) dev
->priv
;
485 struct i596_cmd
*cmd
;
486 struct i596_rfd
*rfd
;
487 struct i596_rbd
*rbd
;
489 printk("lp and scp at %p, .sysbus = %08x, .iscp = %08x\n",
490 &lp
->scp
, lp
->scp
.sysbus
, lp
->scp
.iscp
);
491 printk("iscp at %p, iscp.stat = %08x, .scb = %08x\n",
492 &lp
->iscp
, lp
->iscp
.stat
, lp
->iscp
.scb
);
493 printk("scb at %p, scb.status = %04x, .command = %04x,"
494 " .cmd = %08x, .rfd = %08x\n",
495 &lp
->scb
, lp
->scb
.status
, lp
->scb
.command
,
496 lp
->scb
.cmd
, lp
->scb
.rfd
);
497 printk(" errors: crc %x, align %x, resource %x,"
498 " over %x, rcvdt %x, short %x\n",
499 lp
->scb
.crc_err
, lp
->scb
.align_err
, lp
->scb
.resource_err
,
500 lp
->scb
.over_err
, lp
->scb
.rcvdt_err
, lp
->scb
.short_err
);
502 while (cmd
!= NULL
) {
503 printk("cmd at %p, .status = %04x, .command = %04x, .b_next = %08x\n",
504 cmd
, cmd
->status
, cmd
->command
, cmd
->b_next
);
508 printk("rfd_head = %p\n", rfd
);
510 printk (" %p .stat %04x, .cmd %04x, b_next %08x, rbd %08x,"
512 rfd
, rfd
->stat
, rfd
->cmd
, rfd
->b_next
, rfd
->rbd
,
515 } while (rfd
!= lp
->rfd_head
);
517 printk("rbd_head = %p\n", rbd
);
519 printk(" %p .count %04x, b_next %08x, b_data %08x, size %04x\n",
520 rbd
, rbd
->count
, rbd
->b_next
, rbd
->b_data
, rbd
->size
);
522 } while (rbd
!= lp
->rbd_head
);
523 CHECK_INV(lp
, sizeof(struct i596_private
));
527 #if defined(ENABLE_MVME16x_NET) || defined(ENABLE_BVME6000_NET)
528 static void i596_error(int irq
, void *dev_id
, struct pt_regs
*regs
)
530 struct net_device
*dev
= dev_id
;
531 volatile unsigned char *pcc2
= (unsigned char *) 0xfff42000;
535 printk("%s: Error interrupt\n", dev
->name
);
536 i596_display_data(dev
);
540 #define virt_to_dma(lp,v) ((lp)->dma_addr + (dma_addr_t)((unsigned long)(v)-(unsigned long)(lp)))
542 static inline void init_rx_bufs(struct net_device
*dev
)
544 struct i596_private
*lp
= (struct i596_private
*)dev
->priv
;
546 struct i596_rfd
*rfd
;
547 struct i596_rbd
*rbd
;
549 /* First build the Receive Buffer Descriptor List */
551 for (i
= 0, rbd
= lp
->rbds
; i
< rx_ring_size
; i
++, rbd
++) {
553 struct sk_buff
*skb
= dev_alloc_skb(PKT_BUF_SZ
+ 4);
556 panic("82596: alloc_skb() failed");
558 dma_addr
= dma_map_single(lp
->dev
, skb
->tail
,PKT_BUF_SZ
,
562 rbd
->b_next
= WSWAPrbd(virt_to_dma(lp
,rbd
+1));
563 rbd
->b_addr
= WSWAPrbd(virt_to_dma(lp
,rbd
));
565 rbd
->v_data
= skb
->tail
;
566 rbd
->b_data
= WSWAPchar(dma_addr
);
567 rbd
->size
= PKT_BUF_SZ
;
569 lp
->rbd_head
= lp
->rbds
;
570 rbd
= lp
->rbds
+ rx_ring_size
- 1;
571 rbd
->v_next
= lp
->rbds
;
572 rbd
->b_next
= WSWAPrbd(virt_to_dma(lp
,lp
->rbds
));
574 /* Now build the Receive Frame Descriptor List */
576 for (i
= 0, rfd
= lp
->rfds
; i
< rx_ring_size
; i
++, rfd
++) {
577 rfd
->rbd
= I596_NULL
;
580 rfd
->b_next
= WSWAPrfd(virt_to_dma(lp
,rfd
+1));
583 lp
->rfd_head
= lp
->rfds
;
584 lp
->scb
.rfd
= WSWAPrfd(virt_to_dma(lp
,lp
->rfds
));
586 rfd
->rbd
= WSWAPrbd(virt_to_dma(lp
,lp
->rbd_head
));
587 rfd
->v_prev
= lp
->rfds
+ rx_ring_size
- 1;
588 rfd
= lp
->rfds
+ rx_ring_size
- 1;
589 rfd
->v_next
= lp
->rfds
;
590 rfd
->b_next
= WSWAPrfd(virt_to_dma(lp
,lp
->rfds
));
591 rfd
->cmd
= CMD_EOL
|CMD_FLEX
;
593 CHECK_WBACK_INV(lp
, sizeof(struct i596_private
));
596 static inline void remove_rx_bufs(struct net_device
*dev
)
598 struct i596_private
*lp
= (struct i596_private
*)dev
->priv
;
599 struct i596_rbd
*rbd
;
602 for (i
= 0, rbd
= lp
->rbds
; i
< rx_ring_size
; i
++, rbd
++) {
603 if (rbd
->skb
== NULL
)
605 dma_unmap_single(lp
->dev
,
606 (dma_addr_t
)WSWAPchar(rbd
->b_data
),
607 PKT_BUF_SZ
, DMA_FROM_DEVICE
);
608 dev_kfree_skb(rbd
->skb
);
613 static void rebuild_rx_bufs(struct net_device
*dev
)
615 struct i596_private
*lp
= (struct i596_private
*) dev
->priv
;
618 /* Ensure rx frame/buffer descriptors are tidy */
620 for (i
= 0; i
< rx_ring_size
; i
++) {
621 lp
->rfds
[i
].rbd
= I596_NULL
;
622 lp
->rfds
[i
].cmd
= CMD_FLEX
;
624 lp
->rfds
[rx_ring_size
-1].cmd
= CMD_EOL
|CMD_FLEX
;
625 lp
->rfd_head
= lp
->rfds
;
626 lp
->scb
.rfd
= WSWAPrfd(virt_to_dma(lp
,lp
->rfds
));
627 lp
->rbd_head
= lp
->rbds
;
628 lp
->rfds
[0].rbd
= WSWAPrbd(virt_to_dma(lp
,lp
->rbds
));
630 CHECK_WBACK_INV(lp
, sizeof(struct i596_private
));
634 static int init_i596_mem(struct net_device
*dev
)
636 struct i596_private
*lp
= (struct i596_private
*) dev
->priv
;
639 disable_irq(dev
->irq
); /* disable IRQs from LAN */
641 printk("RESET 82596 port: %08lX (with IRQ%d disabled)\n",
642 dev
->base_addr
+ PA_I82596_RESET
,
645 gsc_writel(0, (void*)(dev
->base_addr
+ PA_I82596_RESET
)); /* Hard Reset */
646 udelay(100); /* Wait 100us - seems to help */
648 /* change the scp address */
650 lp
->last_cmd
= jiffies
;
653 lp
->scp
.sysbus
= 0x0000006c;
654 lp
->scp
.iscp
= WSWAPiscp(virt_to_dma(lp
,&(lp
->iscp
)));
655 lp
->iscp
.scb
= WSWAPscb(virt_to_dma(lp
,&(lp
->scb
)));
656 lp
->iscp
.stat
= ISCP_BUSY
;
660 lp
->scb
.cmd
= I596_NULL
;
662 DEB(DEB_INIT
,printk("%s: starting i82596.\n", dev
->name
));
664 CHECK_WBACK(&(lp
->scp
), sizeof(struct i596_scp
));
665 CHECK_WBACK(&(lp
->iscp
), sizeof(struct i596_iscp
));
667 MPU_PORT(dev
, PORT_ALTSCP
, virt_to_dma(lp
,&lp
->scp
));
671 if (wait_istat(dev
,lp
,1000,"initialization timed out"))
673 DEB(DEB_INIT
,printk("%s: i82596 initialization successful\n", dev
->name
));
675 /* Ensure rx frame/buffer descriptors are tidy */
676 rebuild_rx_bufs(dev
);
679 CHECK_WBACK(&(lp
->scb
), sizeof(struct i596_scb
));
681 enable_irq(dev
->irq
); /* enable IRQs from LAN */
683 DEB(DEB_INIT
,printk("%s: queuing CmdConfigure\n", dev
->name
));
684 memcpy(lp
->cf_cmd
.i596_config
, init_setup
, 14);
685 lp
->cf_cmd
.cmd
.command
= CmdConfigure
;
686 CHECK_WBACK(&(lp
->cf_cmd
), sizeof(struct cf_cmd
));
687 i596_add_cmd(dev
, &lp
->cf_cmd
.cmd
);
689 DEB(DEB_INIT
,printk("%s: queuing CmdSASetup\n", dev
->name
));
690 memcpy(lp
->sa_cmd
.eth_addr
, dev
->dev_addr
, 6);
691 lp
->sa_cmd
.cmd
.command
= CmdSASetup
;
692 CHECK_WBACK(&(lp
->sa_cmd
), sizeof(struct sa_cmd
));
693 i596_add_cmd(dev
, &lp
->sa_cmd
.cmd
);
695 DEB(DEB_INIT
,printk("%s: queuing CmdTDR\n", dev
->name
));
696 lp
->tdr_cmd
.cmd
.command
= CmdTDR
;
697 CHECK_WBACK(&(lp
->tdr_cmd
), sizeof(struct tdr_cmd
));
698 i596_add_cmd(dev
, &lp
->tdr_cmd
.cmd
);
700 spin_lock_irqsave (&lp
->lock
, flags
);
702 if (wait_cmd(dev
,lp
,1000,"timed out waiting to issue RX_START")) {
703 spin_unlock_irqrestore (&lp
->lock
, flags
);
706 DEB(DEB_INIT
,printk("%s: Issuing RX_START\n", dev
->name
));
707 lp
->scb
.command
= RX_START
;
708 lp
->scb
.rfd
= WSWAPrfd(virt_to_dma(lp
,lp
->rfds
));
709 CHECK_WBACK(&(lp
->scb
), sizeof(struct i596_scb
));
713 spin_unlock_irqrestore (&lp
->lock
, flags
);
715 if (wait_cmd(dev
,lp
,1000,"RX_START not processed"))
717 DEB(DEB_INIT
,printk("%s: Receive unit started OK\n", dev
->name
));
722 printk("%s: Failed to initialise 82596\n", dev
->name
);
723 MPU_PORT(dev
, PORT_RESET
, 0);
728 static inline int i596_rx(struct net_device
*dev
)
730 struct i596_private
*lp
= (struct i596_private
*)dev
->priv
;
731 struct i596_rfd
*rfd
;
732 struct i596_rbd
*rbd
;
735 DEB(DEB_RXFRAME
,printk ("i596_rx(), rfd_head %p, rbd_head %p\n",
736 lp
->rfd_head
, lp
->rbd_head
));
739 rfd
= lp
->rfd_head
; /* Ref next frame to check */
741 CHECK_INV(rfd
, sizeof(struct i596_rfd
));
742 while ((rfd
->stat
) & STAT_C
) { /* Loop while complete frames */
743 if (rfd
->rbd
== I596_NULL
)
745 else if (rfd
->rbd
== lp
->rbd_head
->b_addr
) {
747 CHECK_INV(rbd
, sizeof(struct i596_rbd
));
750 printk("%s: rbd chain broken!\n", dev
->name
);
754 DEB(DEB_RXFRAME
, printk(" rfd %p, rfd.rbd %08x, rfd.stat %04x\n",
755 rfd
, rfd
->rbd
, rfd
->stat
));
757 if (rbd
!= NULL
&& ((rfd
->stat
) & STAT_OK
)) {
759 int pkt_len
= rbd
->count
& 0x3fff;
760 struct sk_buff
*skb
= rbd
->skb
;
763 DEB(DEB_RXADDR
,print_eth(rbd
->v_data
, "received"));
766 /* Check if the packet is long enough to just accept
767 * without copying to a properly sized skbuff.
770 if (pkt_len
> rx_copybreak
) {
771 struct sk_buff
*newskb
;
774 dma_unmap_single(lp
->dev
,(dma_addr_t
)WSWAPchar(rbd
->b_data
), PKT_BUF_SZ
, DMA_FROM_DEVICE
);
775 /* Get fresh skbuff to replace filled one. */
776 newskb
= dev_alloc_skb(PKT_BUF_SZ
+ 4);
777 if (newskb
== NULL
) {
778 skb
= NULL
; /* drop pkt */
781 skb_reserve(newskb
, 2);
783 /* Pass up the skb already on the Rx ring. */
784 skb_put(skb
, pkt_len
);
788 dma_addr
= dma_map_single(lp
->dev
, newskb
->tail
, PKT_BUF_SZ
, DMA_FROM_DEVICE
);
789 rbd
->v_data
= newskb
->tail
;
790 rbd
->b_data
= WSWAPchar(dma_addr
);
791 CHECK_WBACK_INV(rbd
, sizeof(struct i596_rbd
));
794 skb
= dev_alloc_skb(pkt_len
+ 2);
797 /* XXX tulip.c can defer packets here!! */
798 printk ("%s: i596_rx Memory squeeze, dropping packet.\n", dev
->name
);
799 lp
->stats
.rx_dropped
++;
804 /* 16 byte align the data fields */
805 dma_sync_single(lp
->dev
, (dma_addr_t
)WSWAPchar(rbd
->b_data
), PKT_BUF_SZ
, DMA_FROM_DEVICE
);
807 memcpy(skb_put(skb
,pkt_len
), rbd
->v_data
, pkt_len
);
810 skb
->protocol
=eth_type_trans(skb
,dev
);
812 dev
->last_rx
= jiffies
;
813 lp
->stats
.rx_packets
++;
814 lp
->stats
.rx_bytes
+=pkt_len
;
818 DEB(DEB_ERRORS
, printk("%s: Error, rfd.stat = 0x%04x\n",
819 dev
->name
, rfd
->stat
));
820 lp
->stats
.rx_errors
++;
821 if ((rfd
->stat
) & 0x0001)
822 lp
->stats
.collisions
++;
823 if ((rfd
->stat
) & 0x0080)
824 lp
->stats
.rx_length_errors
++;
825 if ((rfd
->stat
) & 0x0100)
826 lp
->stats
.rx_over_errors
++;
827 if ((rfd
->stat
) & 0x0200)
828 lp
->stats
.rx_fifo_errors
++;
829 if ((rfd
->stat
) & 0x0400)
830 lp
->stats
.rx_frame_errors
++;
831 if ((rfd
->stat
) & 0x0800)
832 lp
->stats
.rx_crc_errors
++;
833 if ((rfd
->stat
) & 0x1000)
834 lp
->stats
.rx_length_errors
++;
837 /* Clear the buffer descriptor count and EOF + F flags */
839 if (rbd
!= NULL
&& (rbd
->count
& 0x4000)) {
841 lp
->rbd_head
= rbd
->v_next
;
842 CHECK_WBACK_INV(rbd
, sizeof(struct i596_rbd
));
845 /* Tidy the frame descriptor, marking it as end of list */
847 rfd
->rbd
= I596_NULL
;
849 rfd
->cmd
= CMD_EOL
|CMD_FLEX
;
852 /* Remove end-of-list from old end descriptor */
854 rfd
->v_prev
->cmd
= CMD_FLEX
;
856 /* Update record of next frame descriptor to process */
858 lp
->scb
.rfd
= rfd
->b_next
;
859 lp
->rfd_head
= rfd
->v_next
;
860 CHECK_WBACK_INV(rfd
->v_prev
, sizeof(struct i596_rfd
));
861 CHECK_WBACK_INV(rfd
, sizeof(struct i596_rfd
));
863 CHECK_INV(rfd
, sizeof(struct i596_rfd
));
866 DEB(DEB_RXFRAME
,printk ("frames %d\n", frames
));
872 static inline void i596_cleanup_cmd(struct net_device
*dev
, struct i596_private
*lp
)
874 struct i596_cmd
*ptr
;
876 while (lp
->cmd_head
!= NULL
) {
878 lp
->cmd_head
= ptr
->v_next
;
881 switch ((ptr
->command
) & 0x7) {
884 struct tx_cmd
*tx_cmd
= (struct tx_cmd
*) ptr
;
885 struct sk_buff
*skb
= tx_cmd
->skb
;
886 dma_unmap_single(lp
->dev
, tx_cmd
->dma_addr
, skb
->len
, DMA_TO_DEVICE
);
890 lp
->stats
.tx_errors
++;
891 lp
->stats
.tx_aborted_errors
++;
894 ptr
->b_next
= I596_NULL
;
895 tx_cmd
->cmd
.command
= 0; /* Mark as free */
900 ptr
->b_next
= I596_NULL
;
902 CHECK_WBACK_INV(ptr
, sizeof(struct i596_cmd
));
905 wait_cmd(dev
,lp
,100,"i596_cleanup_cmd timed out");
906 lp
->scb
.cmd
= I596_NULL
;
907 CHECK_WBACK(&(lp
->scb
), sizeof(struct i596_scb
));
911 static inline void i596_reset(struct net_device
*dev
, struct i596_private
*lp
)
915 DEB(DEB_RESET
,printk("i596_reset\n"));
917 spin_lock_irqsave (&lp
->lock
, flags
);
919 wait_cmd(dev
,lp
,100,"i596_reset timed out");
921 netif_stop_queue(dev
);
923 /* FIXME: this command might cause an lpmc */
924 lp
->scb
.command
= CUC_ABORT
| RX_ABORT
;
925 CHECK_WBACK(&(lp
->scb
), sizeof(struct i596_scb
));
928 /* wait for shutdown */
929 wait_cmd(dev
,lp
,1000,"i596_reset 2 timed out");
930 spin_unlock_irqrestore (&lp
->lock
, flags
);
932 i596_cleanup_cmd(dev
,lp
);
935 netif_start_queue(dev
);
940 static void i596_add_cmd(struct net_device
*dev
, struct i596_cmd
*cmd
)
942 struct i596_private
*lp
= (struct i596_private
*) dev
->priv
;
945 DEB(DEB_ADDCMD
,printk("i596_add_cmd cmd_head %p\n", lp
->cmd_head
));
948 cmd
->command
|= (CMD_EOL
| CMD_INTR
);
950 cmd
->b_next
= I596_NULL
;
951 CHECK_WBACK(cmd
, sizeof(struct i596_cmd
));
953 spin_lock_irqsave (&lp
->lock
, flags
);
955 if (lp
->cmd_head
!= NULL
) {
956 lp
->cmd_tail
->v_next
= cmd
;
957 lp
->cmd_tail
->b_next
= WSWAPcmd(virt_to_dma(lp
,&cmd
->status
));
958 CHECK_WBACK(lp
->cmd_tail
, sizeof(struct i596_cmd
));
961 wait_cmd(dev
,lp
,100,"i596_add_cmd timed out");
962 lp
->scb
.cmd
= WSWAPcmd(virt_to_dma(lp
,&cmd
->status
));
963 lp
->scb
.command
= CUC_START
;
964 CHECK_WBACK(&(lp
->scb
), sizeof(struct i596_scb
));
970 spin_unlock_irqrestore (&lp
->lock
, flags
);
972 if (lp
->cmd_backlog
> max_cmd_backlog
) {
973 unsigned long tickssofar
= jiffies
- lp
->last_cmd
;
975 if (tickssofar
< ticks_limit
)
978 printk("%s: command unit timed out, status resetting.\n", dev
->name
);
986 /* this function makes a perfectly adequate probe... but we have a
988 static int i596_test(struct net_device
*dev
)
990 struct i596_private
*lp
= (struct i596_private
*) dev
->priv
;
994 tint
= (volatile int *)(&(lp
->scp
));
995 data
= virt_to_dma(lp
,tint
);
998 CHECK_WBACK(tint
,PAGE_SIZE
);
1000 MPU_PORT(dev
, 1, data
);
1002 for(data
= 1000000; data
; data
--) {
1003 CHECK_INV(tint
,PAGE_SIZE
);
1009 printk("i596_test result %d\n", tint
[1]);
1015 static int i596_open(struct net_device
*dev
)
1017 DEB(DEB_OPEN
,printk("%s: i596_open() irq %d.\n", dev
->name
, dev
->irq
));
1021 if (request_irq(dev
->irq
, &i596_interrupt
, 0, "i82596", dev
)) {
1022 printk("%s: IRQ %d not free\n", dev
->name
, dev
->irq
);
1028 if (init_i596_mem(dev
)) {
1029 printk("%s: Failed to init memory\n", dev
->name
);
1030 goto out_remove_rx_bufs
;
1033 netif_start_queue(dev
);
1038 remove_rx_bufs(dev
);
1039 free_irq(dev
->irq
, dev
);
1046 static void i596_tx_timeout (struct net_device
*dev
)
1048 struct i596_private
*lp
= (struct i596_private
*) dev
->priv
;
1050 /* Transmitter timeout, serious problems. */
1051 DEB(DEB_ERRORS
,printk("%s: transmit timed out, status resetting.\n",
1054 lp
->stats
.tx_errors
++;
1056 /* Try to restart the adaptor */
1057 if (lp
->last_restart
== lp
->stats
.tx_packets
) {
1058 DEB(DEB_ERRORS
,printk ("Resetting board.\n"));
1059 /* Shutdown and restart */
1060 i596_reset (dev
, lp
);
1062 /* Issue a channel attention signal */
1063 DEB(DEB_ERRORS
,printk ("Kicking board.\n"));
1064 lp
->scb
.command
= CUC_START
| RX_START
;
1065 CHECK_WBACK_INV(&(lp
->scb
), sizeof(struct i596_scb
));
1067 lp
->last_restart
= lp
->stats
.tx_packets
;
1070 dev
->trans_start
= jiffies
;
1071 netif_wake_queue (dev
);
1075 static int i596_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1077 struct i596_private
*lp
= (struct i596_private
*) dev
->priv
;
1078 struct tx_cmd
*tx_cmd
;
1079 struct i596_tbd
*tbd
;
1080 short length
= skb
->len
;
1081 dev
->trans_start
= jiffies
;
1083 DEB(DEB_STARTTX
,printk("%s: i596_start_xmit(%x,%p) called\n", dev
->name
,
1084 skb
->len
, skb
->data
));
1086 if (length
< ETH_ZLEN
) {
1087 skb
= skb_padto(skb
, ETH_ZLEN
);
1093 netif_stop_queue(dev
);
1095 tx_cmd
= lp
->tx_cmds
+ lp
->next_tx_cmd
;
1096 tbd
= lp
->tbds
+ lp
->next_tx_cmd
;
1098 if (tx_cmd
->cmd
.command
) {
1099 DEB(DEB_ERRORS
,printk ("%s: xmit ring full, dropping packet.\n",
1101 lp
->stats
.tx_dropped
++;
1105 if (++lp
->next_tx_cmd
== TX_RING_SIZE
)
1106 lp
->next_tx_cmd
= 0;
1107 tx_cmd
->tbd
= WSWAPtbd(virt_to_dma(lp
,tbd
));
1108 tbd
->next
= I596_NULL
;
1110 tx_cmd
->cmd
.command
= CMD_FLEX
| CmdTx
;
1116 tbd
->size
= EOF
| length
;
1118 tx_cmd
->dma_addr
= dma_map_single(lp
->dev
, skb
->data
, skb
->len
,
1120 tbd
->data
= WSWAPchar(tx_cmd
->dma_addr
);
1122 DEB(DEB_TXADDR
,print_eth(skb
->data
, "tx-queued"));
1123 CHECK_WBACK_INV(tx_cmd
, sizeof(struct tx_cmd
));
1124 CHECK_WBACK_INV(tbd
, sizeof(struct i596_tbd
));
1125 i596_add_cmd(dev
, &tx_cmd
->cmd
);
1127 lp
->stats
.tx_packets
++;
1128 lp
->stats
.tx_bytes
+= length
;
1131 netif_start_queue(dev
);
1136 static void print_eth(unsigned char *add
, char *str
)
1140 printk("i596 0x%p, ", add
);
1141 for (i
= 0; i
< 6; i
++)
1142 printk(" %02X", add
[i
+ 6]);
1144 for (i
= 0; i
< 6; i
++)
1145 printk(" %02X", add
[i
]);
1146 printk(" %02X%02X, %s\n", add
[12], add
[13], str
);
1150 #define LAN_PROM_ADDR 0xF0810000
1152 static int __devinit
i82596_probe(struct net_device
*dev
)
1155 struct i596_private
*lp
;
1156 /* we're going to overwrite dev->priv, so pull the device out */
1157 struct device
*gen_dev
= dev
->priv
;
1159 dma_addr_t dma_addr
;
1161 /* This lot is ensure things have been cache line aligned. */
1162 if (sizeof(struct i596_rfd
) != 32) {
1163 printk("82596: sizeof(struct i596_rfd) = %d\n",
1164 sizeof(struct i596_rfd
));
1167 if ((sizeof(struct i596_rbd
) % 32) != 0) {
1168 printk("82596: sizeof(struct i596_rbd) = %d\n",
1169 sizeof(struct i596_rbd
));
1172 if ((sizeof(struct tx_cmd
) % 32) != 0) {
1173 printk("82596: sizeof(struct tx_cmd) = %d\n",
1174 sizeof(struct tx_cmd
));
1177 if (sizeof(struct i596_tbd
) != 32) {
1178 printk("82596: sizeof(struct i596_tbd) = %d\n",
1179 sizeof(struct i596_tbd
));
1183 if (sizeof(struct i596_private
) > 4096) {
1184 printk("82596: sizeof(struct i596_private) = %d\n",
1185 sizeof(struct i596_private
));
1190 if (!dev
->base_addr
|| !dev
->irq
)
1193 if (pdc_lan_station_id(eth_addr
, dev
->base_addr
)) {
1194 for (i
=0; i
< 6; i
++) {
1195 eth_addr
[i
] = gsc_readb(LAN_PROM_ADDR
+ i
);
1197 printk("82596.c: MAC of HP700 LAN read from EEPROM\n");
1200 dev
->mem_start
= (unsigned long) dma_alloc_noncoherent(gen_dev
,
1201 sizeof(struct i596_private
), &dma_addr
, GFP_KERNEL
);
1202 if (!dev
->mem_start
) {
1203 printk("%s: Couldn't get shared memory\n", dev
->name
);
1208 DEB(DEB_PROBE
,printk("%s: 82596 at %#3lx,", dev
->name
, dev
->base_addr
));
1210 for (i
= 0; i
< 6; i
++)
1211 DEB(DEB_PROBE
,printk(" %2.2X", dev
->dev_addr
[i
] = eth_addr
[i
]));
1213 DEB(DEB_PROBE
,printk(" IRQ %d.\n", dev
->irq
));
1215 DEB(DEB_PROBE
,printk(version
));
1217 /* The 82596-specific entries in the device structure. */
1218 dev
->open
= i596_open
;
1219 dev
->stop
= i596_close
;
1220 dev
->hard_start_xmit
= i596_start_xmit
;
1221 dev
->get_stats
= i596_get_stats
;
1222 dev
->set_multicast_list
= set_multicast_list
;
1223 dev
->tx_timeout
= i596_tx_timeout
;
1224 dev
->watchdog_timeo
= TX_TIMEOUT
;
1226 dev
->priv
= (void *)(dev
->mem_start
);
1228 lp
= (struct i596_private
*) dev
->priv
;
1229 DEB(DEB_INIT
,printk ("%s: lp at 0x%08lx (%d bytes), lp->scb at 0x%08lx\n",
1230 dev
->name
, (unsigned long)lp
,
1231 sizeof(struct i596_private
), (unsigned long)&lp
->scb
));
1232 memset(lp
, 0, sizeof(struct i596_private
));
1234 lp
->scb
.command
= 0;
1235 lp
->scb
.cmd
= I596_NULL
;
1236 lp
->scb
.rfd
= I596_NULL
;
1237 lp
->lock
= SPIN_LOCK_UNLOCKED
;
1238 lp
->dma_addr
= dma_addr
;
1241 CHECK_WBACK_INV(dev
->mem_start
, sizeof(struct i596_private
));
1247 static irqreturn_t
i596_interrupt(int irq
, void *dev_id
, struct pt_regs
*regs
)
1249 struct net_device
*dev
= dev_id
;
1250 struct i596_private
*lp
;
1251 unsigned short status
, ack_cmd
= 0;
1254 printk("i596_interrupt(): irq %d for unknown device.\n", irq
);
1258 lp
= (struct i596_private
*) dev
->priv
;
1260 spin_lock (&lp
->lock
);
1262 wait_cmd(dev
,lp
,100,"i596 interrupt, timeout");
1263 status
= lp
->scb
.status
;
1265 DEB(DEB_INTS
,printk("%s: i596 interrupt, IRQ %d, status %4.4x.\n",
1266 dev
->name
, irq
, status
));
1268 ack_cmd
= status
& 0xf000;
1271 DEB(DEB_ERRORS
, printk("%s: interrupt with no events\n", dev
->name
));
1272 spin_unlock (&lp
->lock
);
1276 if ((status
& 0x8000) || (status
& 0x2000)) {
1277 struct i596_cmd
*ptr
;
1279 if ((status
& 0x8000))
1280 DEB(DEB_INTS
,printk("%s: i596 interrupt completed command.\n", dev
->name
));
1281 if ((status
& 0x2000))
1282 DEB(DEB_INTS
,printk("%s: i596 interrupt command unit inactive %x.\n", dev
->name
, status
& 0x0700));
1284 while (lp
->cmd_head
!= NULL
) {
1285 CHECK_INV(lp
->cmd_head
, sizeof(struct i596_cmd
));
1286 if (!(lp
->cmd_head
->status
& STAT_C
))
1291 DEB(DEB_STATUS
,printk("cmd_head->status = %04x, ->command = %04x\n",
1292 lp
->cmd_head
->status
, lp
->cmd_head
->command
));
1293 lp
->cmd_head
= ptr
->v_next
;
1296 switch ((ptr
->command
) & 0x7) {
1299 struct tx_cmd
*tx_cmd
= (struct tx_cmd
*) ptr
;
1300 struct sk_buff
*skb
= tx_cmd
->skb
;
1302 if ((ptr
->status
) & STAT_OK
) {
1303 DEB(DEB_TXADDR
,print_eth(skb
->data
, "tx-done"));
1305 lp
->stats
.tx_errors
++;
1306 if ((ptr
->status
) & 0x0020)
1307 lp
->stats
.collisions
++;
1308 if (!((ptr
->status
) & 0x0040))
1309 lp
->stats
.tx_heartbeat_errors
++;
1310 if ((ptr
->status
) & 0x0400)
1311 lp
->stats
.tx_carrier_errors
++;
1312 if ((ptr
->status
) & 0x0800)
1313 lp
->stats
.collisions
++;
1314 if ((ptr
->status
) & 0x1000)
1315 lp
->stats
.tx_aborted_errors
++;
1317 dma_unmap_single(lp
->dev
, tx_cmd
->dma_addr
, skb
->len
, DMA_TO_DEVICE
);
1318 dev_kfree_skb_irq(skb
);
1320 tx_cmd
->cmd
.command
= 0; /* Mark free */
1325 unsigned short status
= ((struct tdr_cmd
*)ptr
)->status
;
1327 if (status
& 0x8000) {
1328 DEB(DEB_ANY
,printk("%s: link ok.\n", dev
->name
));
1330 if (status
& 0x4000)
1331 printk("%s: Transceiver problem.\n", dev
->name
);
1332 if (status
& 0x2000)
1333 printk("%s: Termination problem.\n", dev
->name
);
1334 if (status
& 0x1000)
1335 printk("%s: Short circuit.\n", dev
->name
);
1337 DEB(DEB_TDR
,printk("%s: Time %d.\n", dev
->name
, status
& 0x07ff));
1342 /* Zap command so set_multicast_list() knows it is free */
1347 ptr
->b_next
= I596_NULL
;
1348 CHECK_WBACK(ptr
, sizeof(struct i596_cmd
));
1349 lp
->last_cmd
= jiffies
;
1352 /* This mess is arranging that only the last of any outstanding
1353 * commands has the interrupt bit set. Should probably really
1354 * only add to the cmd queue when the CU is stopped.
1357 while ((ptr
!= NULL
) && (ptr
!= lp
->cmd_tail
)) {
1358 struct i596_cmd
*prev
= ptr
;
1360 ptr
->command
&= 0x1fff;
1362 CHECK_WBACK_INV(prev
, sizeof(struct i596_cmd
));
1365 if ((lp
->cmd_head
!= NULL
))
1366 ack_cmd
|= CUC_START
;
1367 lp
->scb
.cmd
= WSWAPcmd(virt_to_dma(lp
,&lp
->cmd_head
->status
));
1368 CHECK_WBACK_INV(&lp
->scb
, sizeof(struct i596_scb
));
1370 if ((status
& 0x1000) || (status
& 0x4000)) {
1371 if ((status
& 0x4000))
1372 DEB(DEB_INTS
,printk("%s: i596 interrupt received a frame.\n", dev
->name
));
1374 /* Only RX_START if stopped - RGH 07-07-96 */
1375 if (status
& 0x1000) {
1376 if (netif_running(dev
)) {
1377 DEB(DEB_ERRORS
,printk("%s: i596 interrupt receive unit inactive, status 0x%x\n", dev
->name
, status
));
1378 ack_cmd
|= RX_START
;
1379 lp
->stats
.rx_errors
++;
1380 lp
->stats
.rx_fifo_errors
++;
1381 rebuild_rx_bufs(dev
);
1385 wait_cmd(dev
,lp
,100,"i596 interrupt, timeout");
1386 lp
->scb
.command
= ack_cmd
;
1387 CHECK_WBACK(&lp
->scb
, sizeof(struct i596_scb
));
1389 /* DANGER: I suspect that some kind of interrupt
1390 acknowledgement aside from acking the 82596 might be needed
1391 here... but it's running acceptably without */
1395 wait_cmd(dev
,lp
,100,"i596 interrupt, exit timeout");
1396 DEB(DEB_INTS
,printk("%s: exiting interrupt.\n", dev
->name
));
1398 spin_unlock (&lp
->lock
);
1402 static int i596_close(struct net_device
*dev
)
1404 struct i596_private
*lp
= (struct i596_private
*) dev
->priv
;
1405 unsigned long flags
;
1407 netif_stop_queue(dev
);
1409 DEB(DEB_INIT
,printk("%s: Shutting down ethercard, status was %4.4x.\n",
1410 dev
->name
, lp
->scb
.status
));
1412 spin_lock_irqsave(&lp
->lock
, flags
);
1414 wait_cmd(dev
,lp
,100,"close1 timed out");
1415 lp
->scb
.command
= CUC_ABORT
| RX_ABORT
;
1416 CHECK_WBACK(&lp
->scb
, sizeof(struct i596_scb
));
1420 wait_cmd(dev
,lp
,100,"close2 timed out");
1421 spin_unlock_irqrestore(&lp
->lock
, flags
);
1422 DEB(DEB_STRUCT
,i596_display_data(dev
));
1423 i596_cleanup_cmd(dev
,lp
);
1425 disable_irq(dev
->irq
);
1427 free_irq(dev
->irq
, dev
);
1428 remove_rx_bufs(dev
);
1435 static struct net_device_stats
*
1436 i596_get_stats(struct net_device
*dev
)
1438 struct i596_private
*lp
= (struct i596_private
*) dev
->priv
;
1444 * Set or clear the multicast filter for this adaptor.
1447 static void set_multicast_list(struct net_device
*dev
)
1449 struct i596_private
*lp
= (struct i596_private
*) dev
->priv
;
1450 int config
= 0, cnt
;
1452 DEB(DEB_MULTI
,printk("%s: set multicast list, %d entries, promisc %s, allmulti %s\n", dev
->name
, dev
->mc_count
, dev
->flags
& IFF_PROMISC
? "ON" : "OFF", dev
->flags
& IFF_ALLMULTI
? "ON" : "OFF"));
1454 if ((dev
->flags
& IFF_PROMISC
) && !(lp
->cf_cmd
.i596_config
[8] & 0x01)) {
1455 lp
->cf_cmd
.i596_config
[8] |= 0x01;
1458 if (!(dev
->flags
& IFF_PROMISC
) && (lp
->cf_cmd
.i596_config
[8] & 0x01)) {
1459 lp
->cf_cmd
.i596_config
[8] &= ~0x01;
1462 if ((dev
->flags
& IFF_ALLMULTI
) && (lp
->cf_cmd
.i596_config
[11] & 0x20)) {
1463 lp
->cf_cmd
.i596_config
[11] &= ~0x20;
1466 if (!(dev
->flags
& IFF_ALLMULTI
) && !(lp
->cf_cmd
.i596_config
[11] & 0x20)) {
1467 lp
->cf_cmd
.i596_config
[11] |= 0x20;
1471 if (lp
->cf_cmd
.cmd
.command
)
1472 printk("%s: config change request already queued\n",
1475 lp
->cf_cmd
.cmd
.command
= CmdConfigure
;
1476 CHECK_WBACK_INV(&lp
->cf_cmd
, sizeof(struct cf_cmd
));
1477 i596_add_cmd(dev
, &lp
->cf_cmd
.cmd
);
1481 cnt
= dev
->mc_count
;
1482 if (cnt
> MAX_MC_CNT
)
1485 printk("%s: Only %d multicast addresses supported",
1489 if (dev
->mc_count
> 0) {
1490 struct dev_mc_list
*dmi
;
1495 cmd
->cmd
.command
= CmdMulticastList
;
1496 cmd
->mc_cnt
= dev
->mc_count
* 6;
1498 for (dmi
= dev
->mc_list
; cnt
&& dmi
!= NULL
; dmi
= dmi
->next
, cnt
--, cp
+= 6) {
1499 memcpy(cp
, dmi
->dmi_addr
, 6);
1501 DEB(DEB_MULTI
,printk("%s: Adding address %02x:%02x:%02x:%02x:%02x:%02x\n",
1502 dev
->name
, cp
[0],cp
[1],cp
[2],cp
[3],cp
[4],cp
[5]));
1504 CHECK_WBACK_INV(&lp
->mc_cmd
, sizeof(struct mc_cmd
));
1505 i596_add_cmd(dev
, &cmd
->cmd
);
1509 MODULE_PARM(debug
, "i");
1510 MODULE_PARM_DESC(debug
, "lasi_82596 debug mask");
1511 static int debug
= -1;
1513 static int num_drivers
;
1514 static struct net_device
*netdevs
[MAX_DRIVERS
];
1516 static int __devinit
1517 lan_init_chip(struct parisc_device
*dev
)
1519 struct net_device
*netdevice
;
1522 if (num_drivers
>= MAX_DRIVERS
) {
1523 /* max count of possible i82596 drivers reached */
1528 printk(KERN_ERR __FILE__
": IRQ not found for i82596 at 0x%lx\n", dev
->hpa
);
1532 printk(KERN_INFO
"Found i82596 at 0x%lx, IRQ %d\n", dev
->hpa
, dev
->irq
);
1534 netdevice
= alloc_etherdev(0);
1538 netdevice
->base_addr
= dev
->hpa
;
1539 netdevice
->irq
= dev
->irq
;
1540 netdevice
->init
= i82596_probe
;
1541 netdevice
->priv
= &dev
->dev
;
1543 retval
= register_netdev(netdevice
);
1545 printk(KERN_WARNING __FILE__
": register_netdevice ret'd %d\n", retval
);
1549 if (dev
->id
.sversion
== 0x72) {
1550 ((struct i596_private
*)netdevice
->priv
)->options
= OPT_SWAP_PORT
;
1553 netdevs
[num_drivers
++] = netdevice
;
1559 static struct parisc_device_id lan_tbl
[] = {
1560 { HPHW_FIO
, HVERSION_REV_ANY_ID
, HVERSION_ANY_ID
, 0x0008a },
1561 { HPHW_FIO
, HVERSION_REV_ANY_ID
, HVERSION_ANY_ID
, 0x00072 },
1565 MODULE_DEVICE_TABLE(parisc
, lan_tbl
);
1567 static struct parisc_driver lan_driver
= {
1569 .id_table
= lan_tbl
,
1570 .probe
= lan_init_chip
,
1573 static int __devinit
lasi_82596_init(void)
1577 return register_parisc_driver(&lan_driver
);
1580 module_init(lasi_82596_init
);
1582 static void __exit
lasi_82596_exit(void)
1586 for (i
=0; i
<MAX_DRIVERS
; i
++) {
1587 struct i596_private
*lp
;
1588 struct net_device
*netdevice
;
1590 netdevice
= netdevs
[i
];
1594 unregister_netdev(netdevice
);
1596 lp
= (struct i596_private
*) netdevice
->priv
;
1597 dma_free_noncoherent(lp
->dev
, sizeof(struct i596_private
),
1598 (void *)netdevice
->mem_start
, lp
->dma_addr
);
1599 netdevice
->priv
= NULL
;
1602 unregister_parisc_driver(&lan_driver
);
1605 module_exit(lasi_82596_exit
);