2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version
5 * 2 of the License, or (at your option) any later version.
7 * (c) Copyright 1998 Building Number Three Ltd
9 * Development of this driver was funded by Equiinet Ltd
10 * http://www.equiinet.com
14 * Asynchronous mode dropped for 2.2. For 2.3 we will attempt the
15 * unification of all the Z85x30 asynchronous drivers for real.
19 * Finish DMA mode support.
24 * Non DMA you want a 486DX50 or better to do 64Kbits. 9600 baud
25 * X.25 is not unrealistic on all machines. DMA mode can in theory
26 * handle T1/E1 quite nicely. In practice the limit seems to be about
27 * 512Kbit->1Mbit depending on motherboard.
30 * 64K will take DMA, 9600 baud X.25 should be ok.
33 * Synchronous mode without DMA is unlikely to pass about 2400 baud.
36 #include <linux/module.h>
37 #include <linux/kernel.h>
39 #include <linux/net.h>
40 #include <linux/skbuff.h>
41 #include <linux/netdevice.h>
42 #include <linux/if_arp.h>
43 #include <linux/delay.h>
44 #include <linux/ioport.h>
49 #include <asm/spinlock.h>
55 static spinlock_t z8530_buffer_lock
= SPIN_LOCK_UNLOCKED
;
58 * Provided port access methods. The Comtrol SV11 requires no delays
59 * between accesses and uses PC I/O. Some drivers may need a 5uS delay
62 extern __inline__
int z8530_read_port(int p
)
64 u8 r
=inb(Z8530_PORT_OF(p
));
65 if(p
&Z8530_PORT_SLEEP
) /* gcc should figure this out efficiently ! */
70 extern __inline__
void z8530_write_port(int p
, u8 d
)
72 outb(d
,Z8530_PORT_OF(p
));
73 if(p
&Z8530_PORT_SLEEP
)
79 static void z8530_rx_done(struct z8530_channel
*c
);
80 static void z8530_tx_done(struct z8530_channel
*c
);
87 extern inline u8
read_zsreg(struct z8530_channel
*c
, u8 reg
)
94 z8530_write_port(c
->ctrlio
, reg
);
95 r
=z8530_read_port(c
->ctrlio
);
100 extern inline u8
read_zsdata(struct z8530_channel
*c
)
103 r
=z8530_read_port(c
->dataio
);
107 extern inline void write_zsreg(struct z8530_channel
*c
, u8 reg
, u8 val
)
113 z8530_write_port(c
->ctrlio
, reg
);
114 z8530_write_port(c
->ctrlio
, val
);
115 restore_flags(flags
);
118 extern inline void write_zsctrl(struct z8530_channel
*c
, u8 val
)
120 z8530_write_port(c
->ctrlio
, val
);
123 extern inline void write_zsdata(struct z8530_channel
*c
, u8 val
)
125 z8530_write_port(c
->dataio
, val
);
129 * Register loading parameters for a dead port
132 u8 z8530_dead_port
[]=
137 EXPORT_SYMBOL(z8530_dead_port
);
140 * Register loading parameters for currently supported circuit types
145 * Data clocked by telco end. This is the correct data for the UK
146 * "kilostream" service, and most other similar services.
149 u8 z8530_hdlc_kilostream
[]=
151 4, SYNC_ENAB
|SDLC
|X1CLK
,
152 2, 0, /* No vector */
154 3, ENT_HM
|RxCRC_ENAB
|Rx8
,
155 5, TxCRC_ENAB
|RTS
|TxENAB
|Tx8
|DTR
,
156 9, 0, /* Disable interrupts */
159 10, ABUNDER
|NRZ
|CRCPS
,/*MARKIDLE ??*/
162 15, DCDIE
|SYNCIE
|CTSIE
|TxUIE
|BRKIE
,
163 1, EXT_INT_ENAB
|TxINT_ENAB
|INT_ALL_Rx
,
168 EXPORT_SYMBOL(z8530_hdlc_kilostream
);
171 * As above but for enhanced chips.
174 u8 z8530_hdlc_kilostream_85230
[]=
176 4, SYNC_ENAB
|SDLC
|X1CLK
,
177 2, 0, /* No vector */
179 3, ENT_HM
|RxCRC_ENAB
|Rx8
,
180 5, TxCRC_ENAB
|RTS
|TxENAB
|Tx8
|DTR
,
181 9, 0, /* Disable interrupts */
184 10, ABUNDER
|NRZ
|CRCPS
, /* MARKIDLE?? */
187 15, DCDIE
|SYNCIE
|CTSIE
|TxUIE
|BRKIE
,
188 1, EXT_INT_ENAB
|TxINT_ENAB
|INT_ALL_Rx
,
190 23, 3, /* Extended mode AUTO TX and EOM*/
195 EXPORT_SYMBOL(z8530_hdlc_kilostream_85230
);
201 static void z8530_flush_fifo(struct z8530_channel
*c
)
207 if(c
->dev
->type
==Z85230
)
216 /* Sets or clears DTR/RTS on the requested line */
218 static void z8530_rtsdtr(struct z8530_channel
*c
, int set
)
221 c
->regs
[5] |= (RTS
| DTR
);
223 c
->regs
[5] &= ~(RTS
| DTR
);
224 write_zsreg(c
, R5
, c
->regs
[5]);
228 * Receive handler. This is much like the async one but not quite the
231 * Note: Its intended that this handler can easily be separated from
232 * the main code to run realtime. That'll be needed for some machines
233 * (eg to ever clock 64kbits on a sparc ;)).
235 * The RT_LOCK macros don't do anything now. Keep the code covered
236 * by them as short as possible in all circumstances - clocks cost
237 * baud. The interrupt handler is assumed to be atomic w.r.t. to
238 * other code - this is true in the RT case too.
240 * We only cover the sync cases for this. If you want 2Mbit async
241 * do it yourself but consider medical assistance first.
243 * This non DMA synchronous mode is portable code.
246 static void z8530_rx(struct z8530_channel
*c
)
253 if(!(read_zsreg(c
, R0
)&1))
256 stat
=read_zsreg(c
, R1
);
261 if(c
->count
< c
->max
)
273 if(stat
&(Rx_OVR
|CRC_ERR
))
275 /* Rewind the buffer and return */
277 c
->dptr
=c
->skb
->data
;
281 printk(KERN_WARNING
"%s: overrun\n", c
->dev
->name
);
287 /* printk("crc error\n"); */
289 /* Shove the frame upstream */
294 write_zsctrl(c
, RES_Rx_CRC
);
301 write_zsctrl(c
, ERR_RES
);
302 write_zsctrl(c
, RES_H_IUS
);
307 * Z8530 transmit interrupt handler
310 static void z8530_tx(struct z8530_channel
*c
)
315 if(!(read_zsreg(c
, R0
)&4))
319 * Shovel out the byte
321 write_zsreg(c
, R8
, *c
->tx_ptr
++);
322 write_zsctrl(c
, RES_H_IUS
);
323 /* We are about to underflow */
326 write_zsctrl(c
, RES_EOM_L
);
327 write_zsreg(c
, R10
, c
->regs
[10]&~ABUNDER
);
333 * End of frame TX - fire another one
336 write_zsctrl(c
, RES_Tx_P
);
339 /* write_zsreg(c, R8, *c->tx_ptr++); */
340 write_zsctrl(c
, RES_H_IUS
);
343 static void z8530_status(struct z8530_channel
*chan
)
345 u8 status
=read_zsreg(chan
, R0
);
346 u8 altered
=chan
->status
^status
;
352 /* printk("%s: Tx underrun.\n", chan->dev->name); */
353 chan
->stats
.tx_fifo_errors
++;
354 write_zsctrl(chan
, ERR_RES
);
362 printk(KERN_INFO
"%s: DCD raised\n", chan
->dev
->name
);
363 write_zsreg(chan
, R3
, chan
->regs
[3]|RxENABLE
);
365 sppp_reopen(chan
->netdevice
);
369 printk(KERN_INFO
"%s: DCD lost\n", chan
->dev
->name
);
370 write_zsreg(chan
, R3
, chan
->regs
[3]&~RxENABLE
);
371 z8530_flush_fifo(chan
);
375 write_zsctrl(chan
, RES_EXT_INT
);
376 write_zsctrl(chan
, RES_H_IUS
);
379 struct z8530_irqhandler z8530_sync
=
386 EXPORT_SYMBOL(z8530_sync
);
389 * Non bus mastering DMA interfaces for the Z8x30 devices. This
390 * is really pretty PC specific.
393 static void z8530_dma_rx(struct z8530_channel
*chan
)
397 /* Special condition check only */
400 read_zsreg(chan
, R7
);
401 read_zsreg(chan
, R6
);
403 status
=read_zsreg(chan
, R1
);
406 z8530_rx_done(chan
); /* Fire up the next one */
408 write_zsctrl(chan
, ERR_RES
);
409 write_zsctrl(chan
, RES_H_IUS
);
413 /* DMA is off right now, drain the slow way */
418 static void z8530_dma_tx(struct z8530_channel
*chan
)
422 printk("Hey who turned the DMA off?\n");
426 /* This shouldnt occur in DMA mode */
427 printk(KERN_ERR
"DMA tx ??\n");
431 static void z8530_dma_status(struct z8530_channel
*chan
)
434 u8 status
=read_zsreg(chan
, R0
);
435 u8 altered
=chan
->status
^status
;
443 flags
=claim_dma_lock();
444 /* Transmit underrun */
445 disable_dma(chan
->txdma
);
446 clear_dma_ff(chan
->txdma
);
448 release_dma_lock(flags
);
456 printk(KERN_INFO
"%s: DCD raised\n", chan
->dev
->name
);
457 write_zsreg(chan
, R3
, chan
->regs
[3]|RxENABLE
);
459 sppp_reopen(chan
->netdevice
);
463 printk(KERN_INFO
"%s:DCD lost\n", chan
->dev
->name
);
464 write_zsreg(chan
, R3
, chan
->regs
[3]&~RxENABLE
);
465 z8530_flush_fifo(chan
);
468 write_zsctrl(chan
, RES_EXT_INT
);
469 write_zsctrl(chan
, RES_H_IUS
);
472 struct z8530_irqhandler z8530_dma_sync
=
479 EXPORT_SYMBOL(z8530_dma_sync
);
481 struct z8530_irqhandler z8530_txdma_sync
=
488 EXPORT_SYMBOL(z8530_txdma_sync
);
491 * Interrupt vectors for a Z8530 that is in 'parked' mode.
492 * For machines with PCI Z85x30 cards, or level triggered interrupts
493 * (eg the MacII) we must clear the interrupt cause or die.
497 static void z8530_rx_clear(struct z8530_channel
*c
)
500 * Data and status bytes
505 stat
=read_zsreg(c
, R1
);
508 write_zsctrl(c
, RES_Rx_CRC
);
512 write_zsctrl(c
, ERR_RES
);
513 write_zsctrl(c
, RES_H_IUS
);
516 static void z8530_tx_clear(struct z8530_channel
*c
)
518 write_zsctrl(c
, RES_Tx_P
);
519 write_zsctrl(c
, RES_H_IUS
);
522 static void z8530_status_clear(struct z8530_channel
*chan
)
524 u8 status
=read_zsreg(chan
, R0
);
526 write_zsctrl(chan
, ERR_RES
);
527 write_zsctrl(chan
, RES_EXT_INT
);
528 write_zsctrl(chan
, RES_H_IUS
);
531 struct z8530_irqhandler z8530_nop
=
539 EXPORT_SYMBOL(z8530_nop
);
542 * A Z85[2]30 device has stuck its hand in the air for attention
545 void z8530_interrupt(int irq
, void *dev_id
, struct pt_regs
*regs
)
547 struct z8530_dev
*dev
=dev_id
;
549 static volatile int locker
=0;
554 printk(KERN_ERR
"IRQ re-enter\n");
561 struct z8530_irqhandler
*irqs
=dev
->chanA
.irqs
;
563 intr
= read_zsreg(&dev
->chanA
, R3
);
564 if(!(intr
& (CHARxIP
|CHATxIP
|CHAEXT
|CHBRxIP
|CHBTxIP
|CHBEXT
)))
567 /* This holds the IRQ status. On the 8530 you must read it from chan
568 A even though it applies to the whole chip */
570 /* Now walk the chip and see what it is wanting - it may be
571 an IRQ for someone else remember */
573 if(intr
& (CHARxIP
|CHATxIP
|CHAEXT
))
576 irqs
->rx(&dev
->chanA
);
578 irqs
->tx(&dev
->chanA
);
580 irqs
->status(&dev
->chanA
);
583 irqs
=dev
->chanB
.irqs
;
585 if(intr
& (CHBRxIP
|CHBTxIP
|CHBEXT
))
588 irqs
->rx(&dev
->chanB
);
590 irqs
->tx(&dev
->chanB
);
592 irqs
->status(&dev
->chanB
);
596 printk(KERN_ERR
"%s: interrupt jammed - abort(0x%X)!\n", dev
->name
, intr
);
601 EXPORT_SYMBOL(z8530_interrupt
);
603 static char reg_init
[16]=
612 int z8530_sync_open(struct device
*dev
, struct z8530_channel
*c
)
615 c
->mtu
= dev
->mtu
+64;
619 c
->irqs
= &z8530_sync
;
620 /* This loads the double buffer up */
621 z8530_rx_done(c
); /* Load the frame ring */
622 z8530_rx_done(c
); /* Load the backup frame */
625 c
->regs
[R1
]|=TxINT_ENAB
;
626 write_zsreg(c
, R1
, c
->regs
[R1
]);
627 write_zsreg(c
, R3
, c
->regs
[R3
]|RxENABLE
);
632 EXPORT_SYMBOL(z8530_sync_open
);
634 int z8530_sync_close(struct device
*dev
, struct z8530_channel
*c
)
637 c
->irqs
= &z8530_nop
;
641 chk
=read_zsreg(c
,R0
);
642 write_zsreg(c
, R3
, c
->regs
[R3
]);
647 EXPORT_SYMBOL(z8530_sync_close
);
649 int z8530_sync_dma_open(struct device
*dev
, struct z8530_channel
*c
)
654 c
->mtu
= dev
->mtu
+64;
659 * Load the DMA interfaces up
665 * Allocate the DMA flip buffers
668 c
->rx_buf
[0]=kmalloc(c
->mtu
, GFP_KERNEL
|GFP_DMA
);
669 if(c
->rx_buf
[0]==NULL
)
671 c
->rx_buf
[1]=kmalloc(c
->mtu
, GFP_KERNEL
|GFP_DMA
);
672 if(c
->rx_buf
[1]==NULL
)
679 c
->tx_dma_buf
[0]=kmalloc(c
->mtu
, GFP_KERNEL
|GFP_DMA
);
680 if(c
->tx_dma_buf
[0]==NULL
)
687 c
->tx_dma_buf
[1]=kmalloc(c
->mtu
, GFP_KERNEL
|GFP_DMA
);
688 if(c
->tx_dma_buf
[1]==NULL
)
690 kfree(c
->tx_dma_buf
[0]);
695 c
->tx_dma_buf
[0]=NULL
;
704 * Enable DMA control mode
711 c
->regs
[R14
]|= DTRREQ
;
712 write_zsreg(c
, R14
, c
->regs
[R14
]);
714 c
->regs
[R1
]&= ~TxINT_ENAB
;
715 write_zsreg(c
, R1
, c
->regs
[R1
]);
721 c
->regs
[R1
]|= WT_FN_RDYFN
;
722 c
->regs
[R1
]|= WT_RDY_RT
;
723 c
->regs
[R1
]|= INT_ERR_Rx
;
724 c
->regs
[R1
]&= ~TxINT_ENAB
;
725 write_zsreg(c
, R1
, c
->regs
[R1
]);
726 c
->regs
[R1
]|= WT_RDY_ENAB
;
727 write_zsreg(c
, R1
, c
->regs
[R1
]);
734 * Set up the DMA configuration
737 flags
=claim_dma_lock();
739 disable_dma(c
->rxdma
);
740 clear_dma_ff(c
->rxdma
);
741 set_dma_mode(c
->rxdma
, DMA_MODE_READ
|0x10);
742 set_dma_addr(c
->rxdma
, virt_to_bus(c
->rx_buf
[0]));
743 set_dma_count(c
->rxdma
, c
->mtu
);
744 enable_dma(c
->rxdma
);
746 disable_dma(c
->txdma
);
747 clear_dma_ff(c
->txdma
);
748 set_dma_mode(c
->txdma
, DMA_MODE_WRITE
);
749 disable_dma(c
->txdma
);
751 release_dma_lock(flags
);
754 * Select the DMA interrupt handlers
761 c
->irqs
= &z8530_dma_sync
;
763 write_zsreg(c
, R3
, c
->regs
[R3
]|RxENABLE
);
767 EXPORT_SYMBOL(z8530_sync_dma_open
);
769 int z8530_sync_dma_close(struct device
*dev
, struct z8530_channel
*c
)
774 c
->irqs
= &z8530_nop
;
779 * Disable the PC DMA channels
782 flags
=claim_dma_lock();
783 disable_dma(c
->rxdma
);
784 clear_dma_ff(c
->rxdma
);
788 disable_dma(c
->txdma
);
789 clear_dma_ff(c
->txdma
);
790 release_dma_lock(flags
);
796 * Disable DMA control mode
799 c
->regs
[R1
]&= ~WT_RDY_ENAB
;
800 write_zsreg(c
, R1
, c
->regs
[R1
]);
801 c
->regs
[R1
]&= ~(WT_RDY_RT
|WT_FN_RDYFN
|INT_ERR_Rx
);
802 c
->regs
[R1
]|= INT_ALL_Rx
;
803 write_zsreg(c
, R1
, c
->regs
[R1
]);
804 c
->regs
[R14
]&= ~DTRREQ
;
805 write_zsreg(c
, R14
, c
->regs
[R14
]);
819 kfree(c
->tx_dma_buf
[0]);
820 c
->tx_dma_buf
[0]=NULL
;
824 kfree(c
->tx_dma_buf
[1]);
825 c
->tx_dma_buf
[1]=NULL
;
827 chk
=read_zsreg(c
,R0
);
828 write_zsreg(c
, R3
, c
->regs
[R3
]);
833 EXPORT_SYMBOL(z8530_sync_dma_close
);
835 int z8530_sync_txdma_open(struct device
*dev
, struct z8530_channel
*c
)
839 printk("Opening sync interface for TX-DMA\n");
841 c
->mtu
= dev
->mtu
+64;
847 * Load the PIO receive ring
854 * Load the DMA interfaces up
860 c
->tx_dma_buf
[0]=kmalloc(c
->mtu
, GFP_KERNEL
|GFP_DMA
);
861 if(c
->tx_dma_buf
[0]==NULL
)
868 c
->tx_dma_buf
[1]=kmalloc(c
->mtu
, GFP_KERNEL
|GFP_DMA
);
869 if(c
->tx_dma_buf
[1]==NULL
)
871 kfree(c
->tx_dma_buf
[0]);
876 c
->tx_dma_buf
[0]=NULL
;
885 * Enable DMA control mode
891 c
->regs
[R14
]|= DTRREQ
;
892 write_zsreg(c
, R14
, c
->regs
[R14
]);
894 c
->regs
[R1
]&= ~TxINT_ENAB
;
895 write_zsreg(c
, R1
, c
->regs
[R1
]);
898 * Set up the DMA configuration
901 flags
= claim_dma_lock();
903 disable_dma(c
->txdma
);
904 clear_dma_ff(c
->txdma
);
905 set_dma_mode(c
->txdma
, DMA_MODE_WRITE
);
906 disable_dma(c
->txdma
);
908 release_dma_lock(flags
);
911 * Select the DMA interrupt handlers
918 c
->irqs
= &z8530_txdma_sync
;
919 printk("Loading RX\n");
921 printk("Rx interrupts ON\n");
922 write_zsreg(c
, R3
, c
->regs
[R3
]|RxENABLE
);
926 EXPORT_SYMBOL(z8530_sync_txdma_open
);
928 int z8530_sync_txdma_close(struct device
*dev
, struct z8530_channel
*c
)
932 c
->irqs
= &z8530_nop
;
937 * Disable the PC DMA channels
940 flags
= claim_dma_lock();
942 disable_dma(c
->txdma
);
943 clear_dma_ff(c
->txdma
);
947 release_dma_lock(flags
);
950 * Disable DMA control mode
953 c
->regs
[R1
]&= ~WT_RDY_ENAB
;
954 write_zsreg(c
, R1
, c
->regs
[R1
]);
955 c
->regs
[R1
]&= ~(WT_RDY_RT
|WT_FN_RDYFN
|INT_ERR_Rx
);
956 c
->regs
[R1
]|= INT_ALL_Rx
;
957 write_zsreg(c
, R1
, c
->regs
[R1
]);
958 c
->regs
[R14
]&= ~DTRREQ
;
959 write_zsreg(c
, R14
, c
->regs
[R14
]);
963 kfree(c
->tx_dma_buf
[0]);
964 c
->tx_dma_buf
[0]=NULL
;
968 kfree(c
->tx_dma_buf
[1]);
969 c
->tx_dma_buf
[1]=NULL
;
971 chk
=read_zsreg(c
,R0
);
972 write_zsreg(c
, R3
, c
->regs
[R3
]);
978 EXPORT_SYMBOL(z8530_sync_txdma_close
);
981 * Describe a Z8530 in a standard format. We must pass the I/O as
982 * the port offset isnt predictable. The main reason for this function
983 * is to try and get a common format of report.
986 static char *z8530_type_name
[]={
992 void z8530_describe(struct z8530_dev
*dev
, char *mapping
, int io
)
994 printk(KERN_INFO
"%s: %s found at %s 0x%X, IRQ %d.\n",
996 z8530_type_name
[dev
->type
],
1002 EXPORT_SYMBOL(z8530_describe
);
1005 * Configure up a Z8530
1009 int z8530_init(struct z8530_dev
*dev
)
1011 /* NOP the interrupt handlers first - we might get a
1012 floating IRQ transition when we reset the chip */
1013 dev
->chanA
.irqs
=&z8530_nop
;
1014 dev
->chanB
.irqs
=&z8530_nop
;
1015 /* Reset the chip */
1016 write_zsreg(&dev
->chanA
, R9
, 0xC0);
1018 /* Now check its valid */
1019 write_zsreg(&dev
->chanA
, R12
, 0xAA);
1020 if(read_zsreg(&dev
->chanA
, R12
)!=0xAA)
1022 write_zsreg(&dev
->chanA
, R12
, 0x55);
1023 if(read_zsreg(&dev
->chanA
, R12
)!=0x55)
1029 * See the application note.
1032 write_zsreg(&dev
->chanA
, R15
, 0x01);
1035 * If we can set the low bit of R15 then
1036 * the chip is enhanced.
1039 if(read_zsreg(&dev
->chanA
, R15
)==0x01)
1041 /* This C30 versus 230 detect is from Klaus Kudielka's dmascc */
1042 /* Put a char in the fifo */
1043 write_zsreg(&dev
->chanA
, R8
, 0);
1044 if(read_zsreg(&dev
->chanA
, R0
)&Tx_BUF_EMP
)
1045 dev
->type
= Z85230
; /* Has a FIFO */
1047 dev
->type
= Z85C30
; /* Z85C30, 1 byte FIFO */
1051 * The code assumes R7' and friends are
1052 * off. Use write_zsext() for these and keep
1056 write_zsreg(&dev
->chanA
, R15
, 0);
1059 * At this point it looks like the chip is behaving
1062 memcpy(dev
->chanA
.regs
, reg_init
, 16);
1063 memcpy(dev
->chanB
.regs
, reg_init
,16);
1069 EXPORT_SYMBOL(z8530_init
);
1071 int z8530_shutdown(struct z8530_dev
*dev
)
1073 /* Reset the chip */
1074 dev
->chanA
.irqs
=&z8530_nop
;
1075 dev
->chanB
.irqs
=&z8530_nop
;
1076 write_zsreg(&dev
->chanA
, R9
, 0xC0);
1081 EXPORT_SYMBOL(z8530_shutdown
);
1084 * Load a Z8530 channel up from the system data
1085 * We use +16 to indicate the 'prime' registers
1088 int z8530_channel_load(struct z8530_channel
*c
, u8
*rtable
)
1094 write_zsreg(c
, R15
, c
->regs
[15]|1);
1095 write_zsreg(c
, reg
&0x0F, *rtable
);
1097 write_zsreg(c
, R15
, c
->regs
[15]&~1);
1098 c
->regs
[reg
]=*rtable
++;
1100 c
->rx_function
=z8530_null_rx
;
1103 c
->tx_next_skb
=NULL
;
1107 c
->status
=0; /* Fixme - check DCD now */
1109 write_zsreg(c
, R3
, c
->regs
[R3
]|RxENABLE
);
1113 EXPORT_SYMBOL(z8530_channel_load
);
1117 * Higher level shovelling - transmit chains
1120 static void z8530_tx_begin(struct z8530_channel
*c
)
1122 unsigned long flags
;
1126 c
->tx_skb
=c
->tx_next_skb
;
1127 c
->tx_next_skb
=NULL
;
1128 c
->tx_ptr
=c
->tx_next_ptr
;
1136 flags
=claim_dma_lock();
1137 disable_dma(c
->txdma
);
1139 * Check if we crapped out.
1141 if(get_dma_residue(c
->txdma
))
1143 c
->stats
.tx_dropped
++;
1144 c
->stats
.tx_fifo_errors
++;
1146 release_dma_lock(flags
);
1152 c
->txcount
=c
->tx_skb
->len
;
1158 * FIXME. DMA is broken for the original 8530,
1159 * on the older parts we need to set a flag and
1160 * wait for a further TX interrupt to fire this
1164 flags
=claim_dma_lock();
1165 disable_dma(c
->txdma
);
1168 * These two are needed by the 8530/85C30
1169 * and must be issued when idling.
1172 if(c
->dev
->type
!=Z85230
)
1174 write_zsctrl(c
, RES_Tx_CRC
);
1175 write_zsctrl(c
, RES_EOM_L
);
1177 write_zsreg(c
, R10
, c
->regs
[10]&~ABUNDER
);
1178 clear_dma_ff(c
->txdma
);
1179 set_dma_addr(c
->txdma
, virt_to_bus(c
->tx_ptr
));
1180 set_dma_count(c
->txdma
, c
->txcount
);
1181 enable_dma(c
->txdma
);
1182 release_dma_lock(flags
);
1183 write_zsctrl(c
, RES_EOM_L
);
1184 write_zsreg(c
, R5
, c
->regs
[R5
]|TxENAB
);
1191 write_zsreg(c
, R10
, c
->regs
[10]);
1192 write_zsctrl(c
, RES_Tx_CRC
);
1193 //??? write_zsctrl(c, RES_EOM_L);
1195 while(c
->txcount
&& (read_zsreg(c
,R0
)&Tx_BUF_EMP
))
1197 write_zsreg(c
, R8
, *c
->tx_ptr
++);
1200 restore_flags(flags
);
1206 static void z8530_tx_done(struct z8530_channel
*c
)
1208 unsigned long flags
;
1209 struct sk_buff
*skb
;
1211 spin_lock_irqsave(&z8530_buffer_lock
, flags
);
1212 c
->netdevice
->tbusy
=0;
1213 /* Actually this can happen.*/
1216 spin_unlock_irqrestore(&z8530_buffer_lock
, flags
);
1222 spin_unlock_irqrestore(&z8530_buffer_lock
, flags
);
1223 c
->stats
.tx_packets
++;
1224 c
->stats
.tx_bytes
+=skb
->len
;
1229 * Higher level shovelling - receive chains
1232 void z8530_null_rx(struct z8530_channel
*c
, struct sk_buff
*skb
)
1237 EXPORT_SYMBOL(z8530_null_rx
);
1239 static void z8530_rx_done(struct z8530_channel
*c
)
1241 struct sk_buff
*skb
;
1245 * Is our receive engine in DMA mode
1251 * Save the ready state and the buffer currently
1252 * being used as the DMA target
1255 int ready
=c
->dma_ready
;
1256 unsigned char *rxb
=c
->rx_buf
[c
->dma_num
];
1257 unsigned long flags
;
1260 * Complete this DMA. Neccessary to find the length
1263 flags
=claim_dma_lock();
1265 disable_dma(c
->rxdma
);
1266 clear_dma_ff(c
->rxdma
);
1268 ct
=c
->mtu
-get_dma_residue(c
->rxdma
);
1270 ct
=2; /* Shit happens.. */
1274 * Normal case: the other slot is free, start the next DMA
1275 * into it immediately.
1281 set_dma_mode(c
->rxdma
, DMA_MODE_READ
|0x10);
1282 set_dma_addr(c
->rxdma
, virt_to_bus(c
->rx_buf
[c
->dma_num
]));
1283 set_dma_count(c
->rxdma
, c
->mtu
);
1285 enable_dma(c
->rxdma
);
1286 /* Stop any frames that we missed the head of
1288 write_zsreg(c
, R0
, RES_Rx_CRC
);
1291 /* Can't occur as we dont reenable the DMA irq until
1292 after the flip is done */
1293 printk("DMA flip overrun!\n");
1295 release_dma_lock(flags
);
1298 * Shove the old buffer into an sk_buff. We can't DMA
1299 * directly into one on a PC - it might be above the 16Mb
1300 * boundary. Optimisation - we could check to see if we
1301 * can avoid the copy. Optimisation 2 - make the memcpy
1305 skb
=dev_alloc_skb(ct
);
1308 c
->stats
.rx_dropped
++;
1309 printk(KERN_WARNING
"%s: Memory squeeze.\n", c
->netdevice
->name
);
1314 memcpy(skb
->data
, rxb
, ct
);
1315 c
->stats
.rx_packets
++;
1316 c
->stats
.rx_bytes
+=ct
;
1326 * The game we play for non DMA is similar. We want to
1327 * get the controller set up for the next packet as fast
1328 * as possible. We potentially only have one byte + the
1329 * fifo length for this. Thus we want to flip to the new
1330 * buffer and then mess around copying and allocating
1331 * things. For the current case it doesn't matter but
1332 * if you build a system where the sync irq isnt blocked
1333 * by the kernel IRQ disable then you need only block the
1334 * sync IRQ for the RT_LOCK area.
1344 c
->dptr
= c
->skb
->data
;
1354 c
->skb2
= dev_alloc_skb(c
->mtu
);
1356 printk(KERN_WARNING
"%s: memory squeeze.\n",
1357 c
->netdevice
->name
);
1360 skb_put(c
->skb2
,c
->mtu
);
1362 c
->stats
.rx_packets
++;
1363 c
->stats
.rx_bytes
+=ct
;
1367 * If we received a frame we must now process it.
1372 c
->rx_function(c
,skb
);
1376 c
->stats
.rx_dropped
++;
1377 printk(KERN_ERR
"%s: Lost a frame\n", c
->netdevice
->name
);
1382 * Cannot DMA over a 64K boundary on a PC
1385 extern inline int spans_boundary(struct sk_buff
*skb
)
1387 unsigned long a
=(unsigned long)skb
->data
;
1389 if(a
&0x00010000) /* If the 64K bit is different.. */
1391 printk("spanner\n");
1398 * Queue a packet for transmission. Because we have rather
1399 * hard to hit interrupt latencies for the Z85230 per packet
1400 * even in DMA mode we do the flip to DMA buffer if needed here
1404 int z8530_queue_xmit(struct z8530_channel
*c
, struct sk_buff
*skb
)
1406 unsigned long flags
;
1413 /* PC SPECIFIC - DMA limits */
1416 * If we will DMA the transmit and its gone over the ISA bus
1417 * limit, then copy to the flip buffer
1420 if(c
->dma_tx
&& ((unsigned long)(virt_to_bus(skb
->data
+skb
->len
))>=16*1024*1024 || spans_boundary(skb
)))
1423 * Send the flip buffer, and flip the flippy bit.
1424 * We don't care which is used when just so long as
1425 * we never use the same buffer twice in a row. Since
1426 * only one buffer can be going out at a time the other
1429 c
->tx_next_ptr
=c
->tx_dma_buf
[c
->tx_dma_used
];
1430 c
->tx_dma_used
^=1; /* Flip temp buffer */
1431 memcpy(c
->tx_next_ptr
, skb
->data
, skb
->len
);
1434 c
->tx_next_ptr
=skb
->data
;
1439 spin_lock_irqsave(&z8530_buffer_lock
, flags
);
1441 spin_unlock_irqrestore(&z8530_buffer_lock
, flags
);
1445 EXPORT_SYMBOL(z8530_queue_xmit
);
1447 struct net_device_stats
*z8530_get_stats(struct z8530_channel
*c
)
1452 EXPORT_SYMBOL(z8530_get_stats
);
1460 int init_module(void)
1462 printk(KERN_INFO
"Generic Z85C30/Z85230 interface driver v0.02\n");
1466 void cleanup_module(void)