2 * ni6510 (am7990 'lance' chip) driver for Linux-net-3
3 * BETAcode v0.71 (96/09/29) for 2.0.0 (or later)
4 * copyrights (c) 1994,1995,1996 by M.Hipp
6 * This driver can handle the old ni6510 board and the newer ni6510
7 * EtherBlaster. (probably it also works with every full NE2100
10 * driver probes: io: 0x360,0x300,0x320,0x340 / dma: 3,5,6,7
12 * This is an extension to the Linux operating system, and is covered by the
13 * same GNU General Public License that covers the Linux-kernel.
15 * comments/bugs/suggestions can be sent to:
17 * email: hippm@informatik.uni-tuebingen.de
20 * some things are from the 'ni6510-packet-driver for dos by Russ Nelson'
21 * and from the original drivers by D.Becker
24 * - on some PCI boards (including my own) the card/board/ISA-bridge has
25 * problems with bus master DMA. This results in lotsa overruns.
26 * It may help to '#define RCV_PARANOIA_CHECK' or try to #undef
27 * the XMT and RCV_VIA_SKB option .. this reduces driver performance.
28 * Or just play with your BIOS options to optimize ISA-DMA access.
29 * Maybe you also wanna play with the LOW_PERFORAMCE and MID_PERFORMANCE
30 * defines -> please report me your experience then
31 * - Harald reported for ASUS SP3G mainboards, that you should use
32 * the 'optimal settings' from the user's manual on page 3-12!
35 * thanx to Jason Sullivan for sending me a ni6510 card!
36 * lot of debug runs with ASUS SP3G Boards (Intel Saturn) by Harald Koenig
38 * simple performance test: (486DX-33/Ni6510-EB receives from 486DX4-100/Ni6510-EB)
39 * average: FTP -> 8384421 bytes received in 8.5 seconds
40 * (no RCV_VIA_SKB,no XMT_VIA_SKB,PARANOIA_CHECK,4 XMIT BUFS, 8 RCV_BUFFS)
41 * peak: FTP -> 8384421 bytes received in 7.5 seconds
42 * (RCV_VIA_SKB,XMT_VIA_SKB,no PARANOIA_CHECK,1(!) XMIT BUF, 16 RCV BUFFS)
46 * 99.Jun.8: added support for /proc/net/dev byte count for xosview (HK)
47 * 96.Sept.29: virt_to_bus stuff added for new memory modell
48 * 96.April.29: Added Harald Koenig's Patches (MH)
49 * 96.April.13: enhanced error handling .. more tests (MH)
50 * 96.April.5/6: a lot of performance tests. Got it stable now (hopefully) (MH)
51 * 96.April.1: (no joke ;) .. added EtherBlaster and Module support (MH)
52 * 96.Feb.19: fixed a few bugs .. cleanups .. tested for 1.3.66 (MH)
53 * hopefully no more 16MB limit
55 * 95.Nov.18: multicast tweaked (AC).
57 * 94.Aug.22: changes in xmit_intr (ack more than one xmitted-packet), ni65_send_packet (p->lock) (MH)
59 * 94.July.16: fixed bugs in recv_skb and skb-alloc stuff (MH)
62 #include <linux/kernel.h>
63 #include <linux/string.h>
64 #include <linux/errno.h>
65 #include <linux/ioport.h>
66 #include <linux/slab.h>
67 #include <linux/interrupt.h>
68 #include <linux/delay.h>
69 #include <linux/init.h>
70 #include <linux/netdevice.h>
71 #include <linux/etherdevice.h>
72 #include <linux/skbuff.h>
73 #include <linux/module.h>
74 #include <linux/bitops.h>
82 * the current setting allows an acceptable performance
83 * for 'RCV_PARANOIA_CHECK' read the 'known problems' part in
84 * the header of this file
85 * 'invert' the defines for max. performance. This may cause DMA problems
86 * on some boards (e.g on my ASUS SP3G)
90 #define RCV_PARANOIA_CHECK
92 #define MID_PERFORMANCE
94 #if defined( LOW_PERFORMANCE )
95 static int isa0
=7,isa1
=7,csr80
=0x0c10;
96 #elif defined( MID_PERFORMANCE )
97 static int isa0
=5,isa1
=5,csr80
=0x2810;
98 #else /* high performance */
99 static int isa0
=4,isa1
=4,csr80
=0x0017;
103 * a few card/vendor specific defines
105 #define NI65_ID0 0x00
106 #define NI65_ID1 0x55
107 #define NI65_EB_ID0 0x52
108 #define NI65_EB_ID1 0x44
109 #define NE2100_ID0 0x57
110 #define NE2100_ID1 0x57
112 #define PORT p->cmdr_addr
115 * buffer configuration
119 #define RMDNUMMASK 0x80000000
122 #define RMDNUMMASK 0x60000000 /* log2(RMDNUM)<<29 */
127 #define TMDNUMMASK 0x00000000
130 #define TMDNUMMASK 0x40000000 /* log2(TMDNUM)<<29 */
133 /* slightly oversized */
134 #define R_BUF_SIZE 1544
135 #define T_BUF_SIZE 1544
138 * lance register defines
140 #define L_DATAREG 0x00
141 #define L_ADDRREG 0x02
143 #define L_CONFIG 0x05
147 * to access the lance/am7990-regs, you have to write
148 * reg-number into L_ADDRREG, then you can access it using L_DATAREG
155 #define INIT_RING_BEFORE_START 0x1
156 #define FULL_RESET_ON_ERROR 0x2
159 #define writereg(val,reg) {outw(reg,PORT+L_ADDRREG);inw(PORT+L_ADDRREG); \
160 outw(val,PORT+L_DATAREG);inw(PORT+L_DATAREG);}
161 #define readreg(reg) (outw(reg,PORT+L_ADDRREG),inw(PORT+L_ADDRREG),\
164 #define writedatareg(val) {outw(val,PORT+L_DATAREG);inw(PORT+L_DATAREG);}
166 #define writedatareg(val) { writereg(val,CSR0); }
169 #define writereg(val,reg) {outw(reg,PORT+L_ADDRREG);outw(val,PORT+L_DATAREG);}
170 #define readreg(reg) (outw(reg,PORT+L_ADDRREG),inw(PORT+L_DATAREG))
171 #define writedatareg(val) { writereg(val,CSR0); }
174 static unsigned char ni_vendor
[] = { 0x02,0x07,0x01 };
177 unsigned char id0
,id1
;
182 unsigned char *vendor_id
;
184 unsigned long config
;
193 .vendor_id
= ni_vendor
,
194 .cardname
= "ni6510",
204 .vendor_id
= ni_vendor
,
205 .cardname
= "ni6510 EtherBlaster",
216 .cardname
= "generic NE2100",
224 struct rmd rmdhead
[RMDNUM
];
225 struct tmd tmdhead
[TMDNUM
];
226 struct init_block ib
;
230 struct sk_buff
*recv_skb
[RMDNUM
];
232 void *recvbounce
[RMDNUM
];
235 struct sk_buff
*tmd_skb
[TMDNUM
];
237 void *tmdbounce
[TMDNUM
];
239 int lock
,xmit_queued
;
245 spinlock_t ring_lock
;
248 static int ni65_probe1(struct net_device
*dev
,int);
249 static irqreturn_t
ni65_interrupt(int irq
, void * dev_id
);
250 static void ni65_recv_intr(struct net_device
*dev
,int);
251 static void ni65_xmit_intr(struct net_device
*dev
,int);
252 static int ni65_open(struct net_device
*dev
);
253 static int ni65_lance_reinit(struct net_device
*dev
);
254 static void ni65_init_lance(struct priv
*p
,unsigned char*,int,int);
255 static int ni65_send_packet(struct sk_buff
*skb
, struct net_device
*dev
);
256 static void ni65_timeout(struct net_device
*dev
);
257 static int ni65_close(struct net_device
*dev
);
258 static int ni65_alloc_buffer(struct net_device
*dev
);
259 static void ni65_free_buffer(struct priv
*p
);
260 static void set_multicast_list(struct net_device
*dev
);
262 static int irqtab
[] __initdata
= { 9,12,15,5 }; /* irq config-translate */
263 static int dmatab
[] __initdata
= { 0,3,5,6,7 }; /* dma config-translate and autodetect */
265 static int debuglevel
= 1;
268 * set 'performance' registers .. we must STOP lance for that
270 static void ni65_set_performance(struct priv
*p
)
272 writereg(CSR0_STOP
| CSR0_CLRALL
,CSR0
); /* STOP */
274 if( !(cards
[p
->cardno
].config
& 0x02) )
277 outw(80,PORT
+L_ADDRREG
);
278 if(inw(PORT
+L_ADDRREG
) != 80)
281 writereg( (csr80
& 0x3fff) ,80); /* FIFO watermarks */
282 outw(0,PORT
+L_ADDRREG
);
283 outw((short)isa0
,PORT
+L_BUSIF
); /* write ISA 0: DMA_R : isa0 * 50ns */
284 outw(1,PORT
+L_ADDRREG
);
285 outw((short)isa1
,PORT
+L_BUSIF
); /* write ISA 1: DMA_W : isa1 * 50ns */
287 outw(CSR0
,PORT
+L_ADDRREG
); /* switch back to CSR0 */
291 * open interface (up)
293 static int ni65_open(struct net_device
*dev
)
295 struct priv
*p
= dev
->ml_priv
;
296 int irqval
= request_irq(dev
->irq
, &ni65_interrupt
,0,
297 cards
[p
->cardno
].cardname
,dev
);
299 printk(KERN_ERR
"%s: unable to get IRQ %d (irqval=%d).\n",
300 dev
->name
,dev
->irq
, irqval
);
304 if(ni65_lance_reinit(dev
))
306 netif_start_queue(dev
);
311 free_irq(dev
->irq
,dev
);
317 * close interface (down)
319 static int ni65_close(struct net_device
*dev
)
321 struct priv
*p
= dev
->ml_priv
;
323 netif_stop_queue(dev
);
325 outw(inw(PORT
+L_RESET
),PORT
+L_RESET
); /* that's the hard way */
330 for(i
=0;i
<TMDNUM
;i
++)
333 dev_kfree_skb(p
->tmd_skb
[i
]);
334 p
->tmd_skb
[i
] = NULL
;
339 free_irq(dev
->irq
,dev
);
343 static void cleanup_card(struct net_device
*dev
)
345 struct priv
*p
= dev
->ml_priv
;
346 disable_dma(dev
->dma
);
348 release_region(dev
->base_addr
, cards
[p
->cardno
].total_size
);
352 /* set: io,irq,dma or set it when calling insmod */
358 * Probe The Card (not the lance-chip)
360 struct net_device
* __init
ni65_probe(int unit
)
362 struct net_device
*dev
= alloc_etherdev(0);
363 static int ports
[] = {0x360,0x300,0x320,0x340, 0};
368 return ERR_PTR(-ENOMEM
);
371 sprintf(dev
->name
, "eth%d", unit
);
372 netdev_boot_setup_check(dev
);
379 if (dev
->base_addr
> 0x1ff) { /* Check a single specified location. */
380 err
= ni65_probe1(dev
, dev
->base_addr
);
381 } else if (dev
->base_addr
> 0) { /* Don't probe at all. */
384 for (port
= ports
; *port
&& ni65_probe1(dev
, *port
); port
++)
392 err
= register_netdev(dev
);
403 static const struct net_device_ops ni65_netdev_ops
= {
404 .ndo_open
= ni65_open
,
405 .ndo_stop
= ni65_close
,
406 .ndo_start_xmit
= ni65_send_packet
,
407 .ndo_tx_timeout
= ni65_timeout
,
408 .ndo_set_multicast_list
= set_multicast_list
,
409 .ndo_change_mtu
= eth_change_mtu
,
410 .ndo_set_mac_address
= eth_mac_addr
,
411 .ndo_validate_addr
= eth_validate_addr
,
415 * this is the real card probe ..
417 static int __init
ni65_probe1(struct net_device
*dev
,int ioaddr
)
426 for(i
=0;i
<NUM_CARDS
;i
++) {
427 if(!request_region(ioaddr
, cards
[i
].total_size
, cards
[i
].cardname
))
429 if(cards
[i
].id_offset
>= 0) {
430 if(inb(ioaddr
+cards
[i
].id_offset
+0) != cards
[i
].id0
||
431 inb(ioaddr
+cards
[i
].id_offset
+1) != cards
[i
].id1
) {
432 release_region(ioaddr
, cards
[i
].total_size
);
436 if(cards
[i
].vendor_id
) {
438 if(inb(ioaddr
+cards
[i
].addr_offset
+j
) != cards
[i
].vendor_id
[j
]) {
439 release_region(ioaddr
, cards
[i
].total_size
);
449 dev
->dev_addr
[j
] = inb(ioaddr
+cards
[i
].addr_offset
+j
);
451 if( (j
=ni65_alloc_buffer(dev
)) < 0) {
452 release_region(ioaddr
, cards
[i
].total_size
);
456 p
->cmdr_addr
= ioaddr
+ cards
[i
].cmd_offset
;
458 spin_lock_init(&p
->ring_lock
);
460 printk(KERN_INFO
"%s: %s found at %#3x, ", dev
->name
, cards
[p
->cardno
].cardname
, ioaddr
);
462 outw(inw(PORT
+L_RESET
),PORT
+L_RESET
); /* first: reset the card */
463 if( (j
=readreg(CSR0
)) != 0x4) {
465 printk(KERN_ERR
"%s: Can't RESET card: %04x\n", dev
->name
, j
);
467 release_region(ioaddr
, cards
[p
->cardno
].total_size
);
471 outw(88,PORT
+L_ADDRREG
);
472 if(inw(PORT
+L_ADDRREG
) == 88) {
474 v
= inw(PORT
+L_DATAREG
);
476 outw(89,PORT
+L_ADDRREG
);
477 v
|= inw(PORT
+L_DATAREG
);
478 printk("Version %#08lx, ",v
);
479 p
->features
= INIT_RING_BEFORE_START
;
482 printk("ancient LANCE, ");
486 if(test_bit(0,&cards
[i
].config
)) {
487 dev
->irq
= irqtab
[(inw(ioaddr
+L_CONFIG
)>>2)&3];
488 dev
->dma
= dmatab
[inw(ioaddr
+L_CONFIG
)&3];
489 printk("IRQ %d (from card), DMA %d (from card).\n",dev
->irq
,dev
->dma
);
493 /* 'stuck test' from lance.c */
494 unsigned long dma_channels
=
495 ((inb(DMA1_STAT_REG
) >> 4) & 0x0f)
496 | (inb(DMA2_STAT_REG
) & 0xf0);
499 if(test_bit(dma
,&dma_channels
) || request_dma(dma
,"ni6510"))
502 flags
=claim_dma_lock();
504 set_dma_mode(dma
,DMA_MODE_CASCADE
);
506 release_dma_lock(flags
);
508 ni65_init_lance(p
,dev
->dev_addr
,0,0); /* trigger memory access */
510 flags
=claim_dma_lock();
513 release_dma_lock(flags
);
515 if(readreg(CSR0
) & CSR0_IDON
)
520 printk(KERN_ERR
"%s: Can't detect DMA channel!\n", dev
->name
);
522 release_region(ioaddr
, cards
[p
->cardno
].total_size
);
525 dev
->dma
= dmatab
[i
];
526 printk("DMA %d (autodetected), ",dev
->dma
);
529 printk("DMA %d (assigned), ",dev
->dma
);
533 unsigned long irq_mask
;
535 ni65_init_lance(p
,dev
->dev_addr
,0,0);
536 irq_mask
= probe_irq_on();
537 writereg(CSR0_INIT
|CSR0_INEA
,CSR0
); /* trigger interrupt */
539 dev
->irq
= probe_irq_off(irq_mask
);
542 printk("Failed to detect IRQ line!\n");
544 release_region(ioaddr
, cards
[p
->cardno
].total_size
);
547 printk("IRQ %d (autodetected).\n",dev
->irq
);
550 printk("IRQ %d (assigned).\n",dev
->irq
);
553 if(request_dma(dev
->dma
, cards
[p
->cardno
].cardname
) != 0)
555 printk(KERN_ERR
"%s: Can't request dma-channel %d\n",dev
->name
,(int) dev
->dma
);
557 release_region(ioaddr
, cards
[p
->cardno
].total_size
);
561 dev
->base_addr
= ioaddr
;
562 dev
->netdev_ops
= &ni65_netdev_ops
;
563 dev
->watchdog_timeo
= HZ
/2;
565 return 0; /* everything is OK */
569 * set lance register and trigger init
571 static void ni65_init_lance(struct priv
*p
,unsigned char *daddr
,int filter
,int mode
)
576 writereg(CSR0_CLRALL
|CSR0_STOP
,CSR0
);
579 p
->ib
.eaddr
[i
] = daddr
[i
];
582 p
->ib
.filter
[i
] = filter
;
585 p
->ib
.trp
= (u32
) isa_virt_to_bus(p
->tmdhead
) | TMDNUMMASK
;
586 p
->ib
.rrp
= (u32
) isa_virt_to_bus(p
->rmdhead
) | RMDNUMMASK
;
587 writereg(0,CSR3
); /* busmaster/no word-swap */
588 pib
= (u32
) isa_virt_to_bus(&p
->ib
);
589 writereg(pib
& 0xffff,CSR1
);
590 writereg(pib
>> 16,CSR2
);
592 writereg(CSR0_INIT
,CSR0
); /* this changes L_ADDRREG to CSR0 */
597 if(inw(PORT
+L_DATAREG
) & (CSR0_IDON
| CSR0_MERR
) )
598 break; /* init ok ? */
603 * allocate memory area and check the 16MB border
605 static void *ni65_alloc_mem(struct net_device
*dev
,char *what
,int size
,int type
)
607 struct sk_buff
*skb
=NULL
;
612 ret
= skb
= alloc_skb(2+16+size
,GFP_KERNEL
|GFP_DMA
);
614 printk(KERN_WARNING
"%s: unable to allocate %s memory.\n",dev
->name
,what
);
617 skb_reserve(skb
,2+16);
618 skb_put(skb
,R_BUF_SIZE
); /* grab the whole space .. (not necessary) */
622 ret
= ptr
= kmalloc(T_BUF_SIZE
,GFP_KERNEL
| GFP_DMA
);
624 printk(KERN_WARNING
"%s: unable to allocate %s memory.\n",dev
->name
,what
);
628 if( (u32
) virt_to_phys(ptr
+size
) > 0x1000000) {
629 printk(KERN_WARNING
"%s: unable to allocate %s memory in lower 16MB!\n",dev
->name
,what
);
640 * allocate all memory structures .. send/recv buffers etc ...
642 static int ni65_alloc_buffer(struct net_device
*dev
)
649 * we need 8-aligned memory ..
651 ptr
= ni65_alloc_mem(dev
,"BUFFER",sizeof(struct priv
)+8,0);
655 p
= dev
->ml_priv
= (struct priv
*) (((unsigned long) ptr
+ 7) & ~0x7);
656 memset((char *)p
, 0, sizeof(struct priv
));
659 for(i
=0;i
<TMDNUM
;i
++)
662 p
->tmd_skb
[i
] = NULL
;
664 p
->tmdbounce
[i
] = ni65_alloc_mem(dev
,"XMIT",T_BUF_SIZE
,0);
665 if(!p
->tmdbounce
[i
]) {
671 for(i
=0;i
<RMDNUM
;i
++)
674 p
->recv_skb
[i
] = ni65_alloc_mem(dev
,"RECV",R_BUF_SIZE
,1);
675 if(!p
->recv_skb
[i
]) {
680 p
->recvbounce
[i
] = ni65_alloc_mem(dev
,"RECV",R_BUF_SIZE
,0);
681 if(!p
->recvbounce
[i
]) {
688 return 0; /* everything is OK */
692 * free buffers and private struct
694 static void ni65_free_buffer(struct priv
*p
)
701 for(i
=0;i
<TMDNUM
;i
++) {
702 kfree(p
->tmdbounce
[i
]);
705 dev_kfree_skb(p
->tmd_skb
[i
]);
709 for(i
=0;i
<RMDNUM
;i
++)
713 dev_kfree_skb(p
->recv_skb
[i
]);
715 kfree(p
->recvbounce
[i
]);
723 * stop and (re)start lance .. e.g after an error
725 static void ni65_stop_start(struct net_device
*dev
,struct priv
*p
)
727 int csr0
= CSR0_INEA
;
729 writedatareg(CSR0_STOP
);
732 printk(KERN_DEBUG
"ni65_stop_start\n");
734 if(p
->features
& INIT_RING_BEFORE_START
) {
737 struct sk_buff
*skb_save
[TMDNUM
];
739 unsigned long buffer
[TMDNUM
];
744 if((p
->tmdhead
[p
->tmdlast
].u
.s
.status
& XMIT_OWN
))
746 p
->tmdlast
= (p
->tmdlast
+ 1) & (TMDNUM
-1);
747 if(p
->tmdlast
== p
->tmdnum
)
752 for(i
=0;i
<TMDNUM
;i
++) {
753 struct tmd
*tmdp
= p
->tmdhead
+ i
;
755 skb_save
[i
] = p
->tmd_skb
[i
];
757 buffer
[i
] = (u32
) isa_bus_to_virt(tmdp
->u
.buffer
);
758 blen
[i
] = tmdp
->blen
;
759 tmdp
->u
.s
.status
= 0x0;
762 for(i
=0;i
<RMDNUM
;i
++) {
763 struct rmd
*rmdp
= p
->rmdhead
+ i
;
764 rmdp
->u
.s
.status
= RCV_OWN
;
766 p
->tmdnum
= p
->xmit_queued
= 0;
767 writedatareg(CSR0_STRT
| csr0
);
769 for(i
=0;i
<TMDNUM
;i
++) {
770 int num
= (i
+ p
->tmdlast
) & (TMDNUM
-1);
771 p
->tmdhead
[i
].u
.buffer
= (u32
) isa_virt_to_bus((char *)buffer
[num
]); /* status is part of buffer field */
772 p
->tmdhead
[i
].blen
= blen
[num
];
773 if(p
->tmdhead
[i
].u
.s
.status
& XMIT_OWN
) {
774 p
->tmdnum
= (p
->tmdnum
+ 1) & (TMDNUM
-1);
776 writedatareg(CSR0_TDMD
| CSR0_INEA
| csr0
);
779 p
->tmd_skb
[i
] = skb_save
[num
];
782 p
->rmdnum
= p
->tmdlast
= 0;
784 if (p
->tmdnum
|| !p
->xmit_queued
)
785 netif_wake_queue(dev
);
786 dev
->trans_start
= jiffies
;
789 writedatareg(CSR0_STRT
| csr0
);
793 * init lance (write init-values .. init-buffers) (open-helper)
795 static int ni65_lance_reinit(struct net_device
*dev
)
798 struct priv
*p
= dev
->ml_priv
;
804 flags
=claim_dma_lock();
805 disable_dma(dev
->dma
); /* I've never worked with dma, but we do it like the packetdriver */
806 set_dma_mode(dev
->dma
,DMA_MODE_CASCADE
);
807 enable_dma(dev
->dma
);
808 release_dma_lock(flags
);
810 outw(inw(PORT
+L_RESET
),PORT
+L_RESET
); /* first: reset the card */
811 if( (i
=readreg(CSR0
) ) != 0x4)
813 printk(KERN_ERR
"%s: can't RESET %s card: %04x\n",dev
->name
,
814 cards
[p
->cardno
].cardname
,(int) i
);
815 flags
=claim_dma_lock();
816 disable_dma(dev
->dma
);
817 release_dma_lock(flags
);
821 p
->rmdnum
= p
->tmdnum
= p
->tmdlast
= p
->tmdbouncenum
= 0;
822 for(i
=0;i
<TMDNUM
;i
++)
824 struct tmd
*tmdp
= p
->tmdhead
+ i
;
827 dev_kfree_skb(p
->tmd_skb
[i
]);
828 p
->tmd_skb
[i
] = NULL
;
831 tmdp
->u
.buffer
= 0x0;
832 tmdp
->u
.s
.status
= XMIT_START
| XMIT_END
;
833 tmdp
->blen
= tmdp
->status2
= 0;
836 for(i
=0;i
<RMDNUM
;i
++)
838 struct rmd
*rmdp
= p
->rmdhead
+ i
;
840 rmdp
->u
.buffer
= (u32
) isa_virt_to_bus(p
->recv_skb
[i
]->data
);
842 rmdp
->u
.buffer
= (u32
) isa_virt_to_bus(p
->recvbounce
[i
]);
844 rmdp
->blen
= -(R_BUF_SIZE
-8);
846 rmdp
->u
.s
.status
= RCV_OWN
;
849 if(dev
->flags
& IFF_PROMISC
)
850 ni65_init_lance(p
,dev
->dev_addr
,0x00,M_PROM
);
851 else if(dev
->mc_count
|| dev
->flags
& IFF_ALLMULTI
)
852 ni65_init_lance(p
,dev
->dev_addr
,0xff,0x0);
854 ni65_init_lance(p
,dev
->dev_addr
,0x00,0x00);
857 * ni65_set_lance_mem() sets L_ADDRREG to CSR0
858 * NOW, WE WILL NEVER CHANGE THE L_ADDRREG, CSR0 IS ALWAYS SELECTED
861 if(inw(PORT
+L_DATAREG
) & CSR0_IDON
) {
862 ni65_set_performance(p
);
863 /* init OK: start lance , enable interrupts */
864 writedatareg(CSR0_CLRALL
| CSR0_INEA
| CSR0_STRT
);
867 printk(KERN_ERR
"%s: can't init lance, status: %04x\n",dev
->name
,(int) inw(PORT
+L_DATAREG
));
868 flags
=claim_dma_lock();
869 disable_dma(dev
->dma
);
870 release_dma_lock(flags
);
871 return 0; /* ->Error */
877 static irqreturn_t
ni65_interrupt(int irq
, void * dev_id
)
880 struct net_device
*dev
= dev_id
;
886 spin_lock(&p
->ring_lock
);
889 csr0
= inw(PORT
+L_DATAREG
);
892 writedatareg( (csr0
& CSR0_CLRALL
) ); /* ack interrupts, disable int. */
894 writedatareg( (csr0
& CSR0_CLRALL
) | CSR0_INEA
); /* ack interrupts, interrupts enabled */
897 if(!(csr0
& (CSR0_ERR
| CSR0_RINT
| CSR0_TINT
)))
900 if(csr0
& CSR0_RINT
) /* RECV-int? */
901 ni65_recv_intr(dev
,csr0
);
902 if(csr0
& CSR0_TINT
) /* XMIT-int? */
903 ni65_xmit_intr(dev
,csr0
);
908 printk(KERN_ERR
"%s: general error: %04x.\n",dev
->name
,csr0
);
910 dev
->stats
.tx_errors
++;
911 if(csr0
& CSR0_MISS
) {
913 for(i
=0;i
<RMDNUM
;i
++)
914 printk("%02x ",p
->rmdhead
[i
].u
.s
.status
);
916 dev
->stats
.rx_errors
++;
918 if(csr0
& CSR0_MERR
) {
920 printk(KERN_ERR
"%s: Ooops .. memory error: %04x.\n",dev
->name
,csr0
);
921 ni65_stop_start(dev
,p
);
926 #ifdef RCV_PARANOIA_CHECK
929 for(j
=0;j
<RMDNUM
;j
++)
932 for(i
=RMDNUM
-1;i
>0;i
--) {
933 num2
= (p
->rmdnum
+ i
) & (RMDNUM
-1);
934 if(!(p
->rmdhead
[num2
].u
.s
.status
& RCV_OWN
))
940 for(k
=0;k
<RMDNUM
;k
++) {
941 num1
= (p
->rmdnum
+ k
) & (RMDNUM
-1);
942 if(!(p
->rmdhead
[num1
].u
.s
.status
& RCV_OWN
))
952 for(k
=0;k
<RMDNUM
;k
++) {
953 sprintf(buf1
,"%02x ",(p
->rmdhead
[k
].u
.s
.status
)); /* & RCV_OWN) ); */
957 printk(KERN_ERR
"%s: Ooops, receive ring corrupted %2d %2d | %s\n",dev
->name
,p
->rmdnum
,i
,buf
);
961 ni65_recv_intr(dev
,csr0
);
962 if((p
->rmdhead
[num2
].u
.s
.status
& RCV_OWN
))
963 break; /* ok, we are 'in sync' again */
971 if( (csr0
& (CSR0_RXON
| CSR0_TXON
)) != (CSR0_RXON
| CSR0_TXON
) ) {
972 printk(KERN_DEBUG
"%s: RX or TX was offline -> restart\n",dev
->name
);
973 ni65_stop_start(dev
,p
);
976 writedatareg(CSR0_INEA
);
978 spin_unlock(&p
->ring_lock
);
983 * We have received an Xmit-Interrupt ..
984 * send a new packet if necessary
986 static void ni65_xmit_intr(struct net_device
*dev
,int csr0
)
988 struct priv
*p
= dev
->ml_priv
;
990 while(p
->xmit_queued
)
992 struct tmd
*tmdp
= p
->tmdhead
+ p
->tmdlast
;
993 int tmdstat
= tmdp
->u
.s
.status
;
995 if(tmdstat
& XMIT_OWN
)
998 if(tmdstat
& XMIT_ERR
)
1001 if(tmdp
->status2
& XMIT_TDRMASK
&& debuglevel
> 3)
1002 printk(KERN_ERR
"%s: tdr-problems (e.g. no resistor)\n",dev
->name
);
1004 /* checking some errors */
1005 if(tmdp
->status2
& XMIT_RTRY
)
1006 dev
->stats
.tx_aborted_errors
++;
1007 if(tmdp
->status2
& XMIT_LCAR
)
1008 dev
->stats
.tx_carrier_errors
++;
1009 if(tmdp
->status2
& (XMIT_BUFF
| XMIT_UFLO
)) {
1010 /* this stops the xmitter */
1011 dev
->stats
.tx_fifo_errors
++;
1013 printk(KERN_ERR
"%s: Xmit FIFO/BUFF error\n",dev
->name
);
1014 if(p
->features
& INIT_RING_BEFORE_START
) {
1015 tmdp
->u
.s
.status
= XMIT_OWN
| XMIT_START
| XMIT_END
; /* test: resend this frame */
1016 ni65_stop_start(dev
,p
);
1017 break; /* no more Xmit processing .. */
1020 ni65_stop_start(dev
,p
);
1023 printk(KERN_ERR
"%s: xmit-error: %04x %02x-%04x\n",dev
->name
,csr0
,(int) tmdstat
,(int) tmdp
->status2
);
1024 if(!(csr0
& CSR0_BABL
)) /* don't count errors twice */
1025 dev
->stats
.tx_errors
++;
1029 dev
->stats
.tx_bytes
-= (short)(tmdp
->blen
);
1030 dev
->stats
.tx_packets
++;
1034 if(p
->tmd_skb
[p
->tmdlast
]) {
1035 dev_kfree_skb_irq(p
->tmd_skb
[p
->tmdlast
]);
1036 p
->tmd_skb
[p
->tmdlast
] = NULL
;
1040 p
->tmdlast
= (p
->tmdlast
+ 1) & (TMDNUM
-1);
1041 if(p
->tmdlast
== p
->tmdnum
)
1044 netif_wake_queue(dev
);
1048 * We have received a packet
1050 static void ni65_recv_intr(struct net_device
*dev
,int csr0
)
1055 struct priv
*p
= dev
->ml_priv
;
1057 rmdp
= p
->rmdhead
+ p
->rmdnum
;
1058 while(!( (rmdstat
= rmdp
->u
.s
.status
) & RCV_OWN
))
1061 if( (rmdstat
& (RCV_START
| RCV_END
| RCV_ERR
)) != (RCV_START
| RCV_END
) ) /* error or oversized? */
1063 if(!(rmdstat
& RCV_ERR
)) {
1064 if(rmdstat
& RCV_START
)
1066 dev
->stats
.rx_length_errors
++;
1067 printk(KERN_ERR
"%s: recv, packet too long: %d\n",dev
->name
,rmdp
->mlen
& 0x0fff);
1072 printk(KERN_ERR
"%s: receive-error: %04x, lance-status: %04x/%04x\n",
1073 dev
->name
,(int) rmdstat
,csr0
,(int) inw(PORT
+L_DATAREG
) );
1074 if(rmdstat
& RCV_FRAM
)
1075 dev
->stats
.rx_frame_errors
++;
1076 if(rmdstat
& RCV_OFLO
)
1077 dev
->stats
.rx_over_errors
++;
1078 if(rmdstat
& RCV_CRC
)
1079 dev
->stats
.rx_crc_errors
++;
1080 if(rmdstat
& RCV_BUF_ERR
)
1081 dev
->stats
.rx_fifo_errors
++;
1083 if(!(csr0
& CSR0_MISS
)) /* don't count errors twice */
1084 dev
->stats
.rx_errors
++;
1086 else if( (len
= (rmdp
->mlen
& 0x0fff) - 4) >= 60)
1089 struct sk_buff
*skb
= alloc_skb(R_BUF_SIZE
+2+16,GFP_ATOMIC
);
1091 skb_reserve(skb
,16);
1093 struct sk_buff
*skb
= dev_alloc_skb(len
+2);
1099 if( (unsigned long) (skb
->data
+ R_BUF_SIZE
) > 0x1000000) {
1101 skb_copy_to_linear_data(skb
, (unsigned char *)(p
->recv_skb
[p
->rmdnum
]->data
),len
);
1104 struct sk_buff
*skb1
= p
->recv_skb
[p
->rmdnum
];
1105 skb_put(skb
,R_BUF_SIZE
);
1106 p
->recv_skb
[p
->rmdnum
] = skb
;
1107 rmdp
->u
.buffer
= (u32
) isa_virt_to_bus(skb
->data
);
1113 skb_copy_to_linear_data(skb
, (unsigned char *) p
->recvbounce
[p
->rmdnum
],len
);
1115 dev
->stats
.rx_packets
++;
1116 dev
->stats
.rx_bytes
+= len
;
1117 skb
->protocol
=eth_type_trans(skb
,dev
);
1122 printk(KERN_ERR
"%s: can't alloc new sk_buff\n",dev
->name
);
1123 dev
->stats
.rx_dropped
++;
1127 printk(KERN_INFO
"%s: received runt packet\n",dev
->name
);
1128 dev
->stats
.rx_errors
++;
1130 rmdp
->blen
= -(R_BUF_SIZE
-8);
1132 rmdp
->u
.s
.status
= RCV_OWN
; /* change owner */
1133 p
->rmdnum
= (p
->rmdnum
+ 1) & (RMDNUM
-1);
1134 rmdp
= p
->rmdhead
+ p
->rmdnum
;
1142 static void ni65_timeout(struct net_device
*dev
)
1145 struct priv
*p
= dev
->ml_priv
;
1147 printk(KERN_ERR
"%s: xmitter timed out, try to restart!\n",dev
->name
);
1148 for(i
=0;i
<TMDNUM
;i
++)
1149 printk("%02x ",p
->tmdhead
[i
].u
.s
.status
);
1151 ni65_lance_reinit(dev
);
1152 dev
->trans_start
= jiffies
;
1153 netif_wake_queue(dev
);
1160 static int ni65_send_packet(struct sk_buff
*skb
, struct net_device
*dev
)
1162 struct priv
*p
= dev
->ml_priv
;
1164 netif_stop_queue(dev
);
1166 if (test_and_set_bit(0, (void*)&p
->lock
)) {
1167 printk(KERN_ERR
"%s: Queue was locked.\n", dev
->name
);
1168 return NETDEV_TX_BUSY
;
1172 short len
= ETH_ZLEN
< skb
->len
? skb
->len
: ETH_ZLEN
;
1174 unsigned long flags
;
1177 if( (unsigned long) (skb
->data
+ skb
->len
) > 0x1000000) {
1180 skb_copy_from_linear_data(skb
, p
->tmdbounce
[p
->tmdbouncenum
],
1181 skb
->len
> T_BUF_SIZE
? T_BUF_SIZE
:
1184 memset((char *)p
->tmdbounce
[p
->tmdbouncenum
]+skb
->len
, 0, len
-skb
->len
);
1185 dev_kfree_skb (skb
);
1187 spin_lock_irqsave(&p
->ring_lock
, flags
);
1188 tmdp
= p
->tmdhead
+ p
->tmdnum
;
1189 tmdp
->u
.buffer
= (u32
) isa_virt_to_bus(p
->tmdbounce
[p
->tmdbouncenum
]);
1190 p
->tmdbouncenum
= (p
->tmdbouncenum
+ 1) & (TMDNUM
- 1);
1195 spin_lock_irqsave(&p
->ring_lock
, flags
);
1197 tmdp
= p
->tmdhead
+ p
->tmdnum
;
1198 tmdp
->u
.buffer
= (u32
) isa_virt_to_bus(skb
->data
);
1199 p
->tmd_skb
[p
->tmdnum
] = skb
;
1204 tmdp
->u
.s
.status
= XMIT_OWN
| XMIT_START
| XMIT_END
;
1205 writedatareg(CSR0_TDMD
| CSR0_INEA
); /* enable xmit & interrupt */
1208 p
->tmdnum
= (p
->tmdnum
+ 1) & (TMDNUM
-1);
1210 if(p
->tmdnum
!= p
->tmdlast
)
1211 netif_wake_queue(dev
);
1214 dev
->trans_start
= jiffies
;
1216 spin_unlock_irqrestore(&p
->ring_lock
, flags
);
1222 static void set_multicast_list(struct net_device
*dev
)
1224 if(!ni65_lance_reinit(dev
))
1225 printk(KERN_ERR
"%s: Can't switch card into MC mode!\n",dev
->name
);
1226 netif_wake_queue(dev
);
1230 static struct net_device
*dev_ni65
;
1232 module_param(irq
, int, 0);
1233 module_param(io
, int, 0);
1234 module_param(dma
, int, 0);
1235 MODULE_PARM_DESC(irq
, "ni6510 IRQ number (ignored for some cards)");
1236 MODULE_PARM_DESC(io
, "ni6510 I/O base address");
1237 MODULE_PARM_DESC(dma
, "ni6510 ISA DMA channel (ignored for some cards)");
1239 int __init
init_module(void)
1241 dev_ni65
= ni65_probe(-1);
1242 return IS_ERR(dev_ni65
) ? PTR_ERR(dev_ni65
) : 0;
1245 void __exit
cleanup_module(void)
1247 unregister_netdev(dev_ni65
);
1248 cleanup_card(dev_ni65
);
1249 free_netdev(dev_ni65
);
1253 MODULE_LICENSE("GPL");