1 /* $Id: netjet.c,v 1.24.6.1 2000/12/06 16:59:20 kai Exp $
3 * netjet.c low level stuff for Traverse Technologie NETJet ISDN cards
5 * Author Karsten Keil (keil@isdn4linux.de)
7 * Thanks to Traverse Technologie Australia for documents and informations
9 * This file is (c) under GNU PUBLIC LICENSE
13 #define __NO_VERSION__
14 #include <linux/init.h>
19 #include <linux/pci.h>
20 #include <linux/interrupt.h>
21 #include <linux/ppp_defs.h>
25 #define bus_to_virt (u_int *)
29 #define virt_to_bus (u_int)
32 const char *NETjet_revision
= "$Revision: 1.24.6.1 $";
34 /* Interface functions */
37 NETjet_ReadIC(struct IsdnCardState
*cs
, u_char offset
)
44 cs
->hw
.njet
.auxd
&= 0xfc;
45 cs
->hw
.njet
.auxd
|= (offset
>>4) & 3;
46 byteout(cs
->hw
.njet
.auxa
, cs
->hw
.njet
.auxd
);
47 ret
= bytein(cs
->hw
.njet
.isac
+ ((offset
& 0xf)<<2));
53 NETjet_WriteIC(struct IsdnCardState
*cs
, u_char offset
, u_char value
)
59 cs
->hw
.njet
.auxd
&= 0xfc;
60 cs
->hw
.njet
.auxd
|= (offset
>>4) & 3;
61 byteout(cs
->hw
.njet
.auxa
, cs
->hw
.njet
.auxd
);
62 byteout(cs
->hw
.njet
.isac
+ ((offset
& 0xf)<<2), value
);
67 NETjet_ReadICfifo(struct IsdnCardState
*cs
, u_char
*data
, int size
)
69 cs
->hw
.njet
.auxd
&= 0xfc;
70 byteout(cs
->hw
.njet
.auxa
, cs
->hw
.njet
.auxd
);
71 insb(cs
->hw
.njet
.isac
, data
, size
);
76 0x0000, 0x1189, 0x2312, 0x329b, 0x4624, 0x57ad, 0x6536, 0x74bf,
77 0x8c48, 0x9dc1, 0xaf5a, 0xbed3, 0xca6c, 0xdbe5, 0xe97e, 0xf8f7,
78 0x1081, 0x0108, 0x3393, 0x221a, 0x56a5, 0x472c, 0x75b7, 0x643e,
79 0x9cc9, 0x8d40, 0xbfdb, 0xae52, 0xdaed, 0xcb64, 0xf9ff, 0xe876,
80 0x2102, 0x308b, 0x0210, 0x1399, 0x6726, 0x76af, 0x4434, 0x55bd,
81 0xad4a, 0xbcc3, 0x8e58, 0x9fd1, 0xeb6e, 0xfae7, 0xc87c, 0xd9f5,
82 0x3183, 0x200a, 0x1291, 0x0318, 0x77a7, 0x662e, 0x54b5, 0x453c,
83 0xbdcb, 0xac42, 0x9ed9, 0x8f50, 0xfbef, 0xea66, 0xd8fd, 0xc974,
84 0x4204, 0x538d, 0x6116, 0x709f, 0x0420, 0x15a9, 0x2732, 0x36bb,
85 0xce4c, 0xdfc5, 0xed5e, 0xfcd7, 0x8868, 0x99e1, 0xab7a, 0xbaf3,
86 0x5285, 0x430c, 0x7197, 0x601e, 0x14a1, 0x0528, 0x37b3, 0x263a,
87 0xdecd, 0xcf44, 0xfddf, 0xec56, 0x98e9, 0x8960, 0xbbfb, 0xaa72,
88 0x6306, 0x728f, 0x4014, 0x519d, 0x2522, 0x34ab, 0x0630, 0x17b9,
89 0xef4e, 0xfec7, 0xcc5c, 0xddd5, 0xa96a, 0xb8e3, 0x8a78, 0x9bf1,
90 0x7387, 0x620e, 0x5095, 0x411c, 0x35a3, 0x242a, 0x16b1, 0x0738,
91 0xffcf, 0xee46, 0xdcdd, 0xcd54, 0xb9eb, 0xa862, 0x9af9, 0x8b70,
92 0x8408, 0x9581, 0xa71a, 0xb693, 0xc22c, 0xd3a5, 0xe13e, 0xf0b7,
93 0x0840, 0x19c9, 0x2b52, 0x3adb, 0x4e64, 0x5fed, 0x6d76, 0x7cff,
94 0x9489, 0x8500, 0xb79b, 0xa612, 0xd2ad, 0xc324, 0xf1bf, 0xe036,
95 0x18c1, 0x0948, 0x3bd3, 0x2a5a, 0x5ee5, 0x4f6c, 0x7df7, 0x6c7e,
96 0xa50a, 0xb483, 0x8618, 0x9791, 0xe32e, 0xf2a7, 0xc03c, 0xd1b5,
97 0x2942, 0x38cb, 0x0a50, 0x1bd9, 0x6f66, 0x7eef, 0x4c74, 0x5dfd,
98 0xb58b, 0xa402, 0x9699, 0x8710, 0xf3af, 0xe226, 0xd0bd, 0xc134,
99 0x39c3, 0x284a, 0x1ad1, 0x0b58, 0x7fe7, 0x6e6e, 0x5cf5, 0x4d7c,
100 0xc60c, 0xd785, 0xe51e, 0xf497, 0x8028, 0x91a1, 0xa33a, 0xb2b3,
101 0x4a44, 0x5bcd, 0x6956, 0x78df, 0x0c60, 0x1de9, 0x2f72, 0x3efb,
102 0xd68d, 0xc704, 0xf59f, 0xe416, 0x90a9, 0x8120, 0xb3bb, 0xa232,
103 0x5ac5, 0x4b4c, 0x79d7, 0x685e, 0x1ce1, 0x0d68, 0x3ff3, 0x2e7a,
104 0xe70e, 0xf687, 0xc41c, 0xd595, 0xa12a, 0xb0a3, 0x8238, 0x93b1,
105 0x6b46, 0x7acf, 0x4854, 0x59dd, 0x2d62, 0x3ceb, 0x0e70, 0x1ff9,
106 0xf78f, 0xe606, 0xd49d, 0xc514, 0xb1ab, 0xa022, 0x92b9, 0x8330,
107 0x7bc7, 0x6a4e, 0x58d5, 0x495c, 0x3de3, 0x2c6a, 0x1ef1, 0x0f78
111 NETjet_WriteICfifo(struct IsdnCardState
*cs
, u_char
*data
, int size
)
113 cs
->hw
.njet
.auxd
&= 0xfc;
114 byteout(cs
->hw
.njet
.auxa
, cs
->hw
.njet
.auxd
);
115 outsb(cs
->hw
.njet
.isac
, data
, size
);
118 void fill_mem(struct BCState
*bcs
, u_int
*pos
, u_int cnt
, int chan
, u_char fill
)
120 u_int mask
=0x000000ff, val
= 0, *p
=pos
;
129 for (i
=0; i
<cnt
; i
++) {
132 if (p
> bcs
->hw
.tiger
.s_end
)
133 p
= bcs
->hw
.tiger
.send
;
138 mode_tiger(struct BCState
*bcs
, int mode
, int bc
)
140 struct IsdnCardState
*cs
= bcs
->cs
;
142 if (cs
->debug
& L1_DEB_HSCX
)
143 debugl1(cs
, "Tiger mode %d bchan %d/%d",
144 mode
, bc
, bcs
->channel
);
149 fill_mem(bcs
, bcs
->hw
.tiger
.send
,
150 NETJET_DMA_TXSIZE
, bc
, 0xff);
151 if (cs
->debug
& L1_DEB_HSCX
)
152 debugl1(cs
, "Tiger stat rec %d/%d send %d",
153 bcs
->hw
.tiger
.r_tot
, bcs
->hw
.tiger
.r_err
,
154 bcs
->hw
.tiger
.s_tot
);
155 if ((cs
->bcs
[0].mode
== L1_MODE_NULL
) &&
156 (cs
->bcs
[1].mode
== L1_MODE_NULL
)) {
157 cs
->hw
.njet
.dmactrl
= 0;
158 byteout(cs
->hw
.njet
.base
+ NETJET_DMACTRL
,
159 cs
->hw
.njet
.dmactrl
);
160 byteout(cs
->hw
.njet
.base
+ NETJET_IRQMASK0
, 0);
163 case (L1_MODE_TRANS
):
165 case (L1_MODE_HDLC_56K
):
167 fill_mem(bcs
, bcs
->hw
.tiger
.send
,
168 NETJET_DMA_TXSIZE
, bc
, 0xff);
169 bcs
->hw
.tiger
.r_state
= HDLC_ZERO_SEARCH
;
170 bcs
->hw
.tiger
.r_tot
= 0;
171 bcs
->hw
.tiger
.r_bitcnt
= 0;
172 bcs
->hw
.tiger
.r_one
= 0;
173 bcs
->hw
.tiger
.r_err
= 0;
174 bcs
->hw
.tiger
.s_tot
= 0;
175 if (! cs
->hw
.njet
.dmactrl
) {
176 fill_mem(bcs
, bcs
->hw
.tiger
.send
,
177 NETJET_DMA_TXSIZE
, !bc
, 0xff);
178 cs
->hw
.njet
.dmactrl
= 1;
179 byteout(cs
->hw
.njet
.base
+ NETJET_DMACTRL
,
180 cs
->hw
.njet
.dmactrl
);
181 byteout(cs
->hw
.njet
.base
+ NETJET_IRQMASK0
, 0x0f);
182 /* was 0x3f now 0x0f for TJ300 and TJ320 GE 13/07/00 */
184 bcs
->hw
.tiger
.sendp
= bcs
->hw
.tiger
.send
;
185 bcs
->hw
.tiger
.free
= NETJET_DMA_TXSIZE
;
186 test_and_set_bit(BC_FLG_EMPTY
, &bcs
->Flag
);
189 if (cs
->debug
& L1_DEB_HSCX
)
190 debugl1(cs
, "tiger: set %x %x %x %x/%x pulse=%d",
191 bytein(cs
->hw
.njet
.base
+ NETJET_DMACTRL
),
192 bytein(cs
->hw
.njet
.base
+ NETJET_IRQMASK0
),
193 bytein(cs
->hw
.njet
.base
+ NETJET_IRQSTAT0
),
194 inl(cs
->hw
.njet
.base
+ NETJET_DMA_READ_ADR
),
195 inl(cs
->hw
.njet
.base
+ NETJET_DMA_WRITE_ADR
),
196 bytein(cs
->hw
.njet
.base
+ NETJET_PULSE_CNT
));
199 static void printframe(struct IsdnCardState
*cs
, u_char
*buf
, int count
, char *s
) {
205 t
+= sprintf(t
, "tiger %s(%4d)", s
, count
);
216 t
+= sprintf(t
, "tiger %s ", s
);
222 #define MAKE_RAW_BYTE for (j=0; j<8; j++) { \
233 bcs->hw.tiger.sendbuf[s_cnt++] = s_val;\
243 bcs->hw.tiger.sendbuf[s_cnt++] = s_val;\
249 static int make_raw_data(struct BCState
*bcs
) {
250 // this make_raw is for 64k
251 register u_int i
,s_cnt
=0;
254 register u_char s_one
= 0;
255 register u_char s_val
= 0;
256 register u_char bitcnt
= 0;
260 debugl1(bcs
->cs
, "tiger make_raw: NULL skb");
263 bcs
->hw
.tiger
.sendbuf
[s_cnt
++] = HDLC_FLAG_VALUE
;
265 for (i
=0; i
<bcs
->tx_skb
->len
; i
++) {
266 val
= bcs
->tx_skb
->data
[i
];
267 fcs
= PPP_FCS (fcs
, val
);
273 val
= (fcs
>>8) & 0xff;
275 val
= HDLC_FLAG_VALUE
;
276 for (j
=0; j
<8; j
++) {
284 bcs
->hw
.tiger
.sendbuf
[s_cnt
++] = s_val
;
289 if (bcs
->cs
->debug
& L1_DEB_HSCX
)
290 debugl1(bcs
->cs
,"tiger make_raw: in %ld out %d.%d",
291 bcs
->tx_skb
->len
, s_cnt
, bitcnt
);
297 bcs
->hw
.tiger
.sendbuf
[s_cnt
++] = s_val
;
298 bcs
->hw
.tiger
.sendbuf
[s_cnt
++] = 0xff; // NJ<->NJ thoughput bug fix
300 bcs
->hw
.tiger
.sendcnt
= s_cnt
;
301 bcs
->tx_cnt
-= bcs
->tx_skb
->len
;
302 bcs
->hw
.tiger
.sp
= bcs
->hw
.tiger
.sendbuf
;
308 #define MAKE_RAW_BYTE_56K for (j=0; j<8; j++) { \
321 bcs->hw.tiger.sendbuf[s_cnt++] = s_val;\
333 bcs->hw.tiger.sendbuf[s_cnt++] = s_val;\
339 static int make_raw_data_56k(struct BCState
*bcs
) {
340 // this make_raw is for 56k
341 register u_int i
,s_cnt
=0;
344 register u_char s_one
= 0;
345 register u_char s_val
= 0;
346 register u_char bitcnt
= 0;
350 debugl1(bcs
->cs
, "tiger make_raw_56k: NULL skb");
353 val
= HDLC_FLAG_VALUE
;
354 for (j
=0; j
<8; j
++) {
364 bcs
->hw
.tiger
.sendbuf
[s_cnt
++] = s_val
;
370 for (i
=0; i
<bcs
->tx_skb
->len
; i
++) {
371 val
= bcs
->tx_skb
->data
[i
];
372 fcs
= PPP_FCS (fcs
, val
);
378 val
= (fcs
>>8) & 0xff;
380 val
= HDLC_FLAG_VALUE
;
381 for (j
=0; j
<8; j
++) {
391 bcs
->hw
.tiger
.sendbuf
[s_cnt
++] = s_val
;
396 if (bcs
->cs
->debug
& L1_DEB_HSCX
)
397 debugl1(bcs
->cs
,"tiger make_raw_56k: in %ld out %d.%d",
398 bcs
->tx_skb
->len
, s_cnt
, bitcnt
);
404 bcs
->hw
.tiger
.sendbuf
[s_cnt
++] = s_val
;
405 bcs
->hw
.tiger
.sendbuf
[s_cnt
++] = 0xff; // NJ<->NJ thoughput bug fix
407 bcs
->hw
.tiger
.sendcnt
= s_cnt
;
408 bcs
->tx_cnt
-= bcs
->tx_skb
->len
;
409 bcs
->hw
.tiger
.sp
= bcs
->hw
.tiger
.sendbuf
;
413 static void got_frame(struct BCState
*bcs
, int count
) {
416 if (!(skb
= dev_alloc_skb(count
)))
417 printk(KERN_WARNING
"TIGER: receive out of memory\n");
419 memcpy(skb_put(skb
, count
), bcs
->hw
.tiger
.rcvbuf
, count
);
420 skb_queue_tail(&bcs
->rqueue
, skb
);
422 bcs
->event
|= 1 << B_RCVBUFREADY
;
423 queue_task(&bcs
->tqueue
, &tq_immediate
);
424 mark_bh(IMMEDIATE_BH
);
426 if (bcs
->cs
->debug
& L1_DEB_RECEIVE_FRAME
)
427 printframe(bcs
->cs
, bcs
->hw
.tiger
.rcvbuf
, count
, "rec");
432 static void read_raw(struct BCState
*bcs
, u_int
*buf
, int cnt
){
436 u_int
*pend
= bcs
->hw
.tiger
.rec
+NETJET_DMA_RXSIZE
-1;
437 register u_char state
= bcs
->hw
.tiger
.r_state
;
438 register u_char r_one
= bcs
->hw
.tiger
.r_one
;
439 register u_char r_val
= bcs
->hw
.tiger
.r_val
;
440 register u_int bitcnt
= bcs
->hw
.tiger
.r_bitcnt
;
445 if (bcs
->mode
== L1_MODE_HDLC
) { // it's 64k
453 for (i
=0;i
<cnt
;i
++) {
454 val
= bcs
->channel
? ((*p
>>8) & 0xff) : (*p
& 0xff);
457 p
= bcs
->hw
.tiger
.rec
;
458 if ((val
& mask
) == mask
) {
459 state
= HDLC_ZERO_SEARCH
;
460 bcs
->hw
.tiger
.r_tot
++;
465 for (j
=0;j
<bits
;j
++) {
466 if (state
== HDLC_ZERO_SEARCH
) {
471 state
= HDLC_FLAG_SEARCH
;
472 if (bcs
->cs
->debug
& L1_DEB_HSCX
)
473 debugl1(bcs
->cs
,"tiger read_raw: zBit(%d,%d,%d) %x",
474 bcs
->hw
.tiger
.r_tot
,i
,j
,val
);
476 } else if (state
== HDLC_FLAG_SEARCH
) {
480 state
=HDLC_ZERO_SEARCH
;
486 state
=HDLC_FLAG_FOUND
;
487 if (bcs
->cs
->debug
& L1_DEB_HSCX
)
488 debugl1(bcs
->cs
,"tiger read_raw: flag(%d,%d,%d) %x",
489 bcs
->hw
.tiger
.r_tot
,i
,j
,val
);
493 } else if (state
== HDLC_FLAG_FOUND
) {
497 state
=HDLC_ZERO_SEARCH
;
510 } else if (r_one
!=5) {
517 if ((state
!= HDLC_ZERO_SEARCH
) &&
519 state
=HDLC_FRAME_FOUND
;
520 bcs
->hw
.tiger
.r_fcs
= PPP_INITFCS
;
521 bcs
->hw
.tiger
.rcvbuf
[0] = r_val
;
522 bcs
->hw
.tiger
.r_fcs
= PPP_FCS (bcs
->hw
.tiger
.r_fcs
, r_val
);
523 if (bcs
->cs
->debug
& L1_DEB_HSCX
)
524 debugl1(bcs
->cs
,"tiger read_raw: byte1(%d,%d,%d) rval %x val %x i %x",
525 bcs
->hw
.tiger
.r_tot
,i
,j
,r_val
,val
,
526 bcs
->cs
->hw
.njet
.irqstat0
);
528 } else if (state
== HDLC_FRAME_FOUND
) {
532 state
=HDLC_ZERO_SEARCH
;
545 debugl1(bcs
->cs
, "tiger: frame not byte aligned");
546 state
=HDLC_FLAG_SEARCH
;
547 bcs
->hw
.tiger
.r_err
++;
548 #ifdef ERROR_STATISTIC
552 if (bcs
->cs
->debug
& L1_DEB_HSCX
)
553 debugl1(bcs
->cs
,"tiger frame end(%d,%d): fcs(%x) i %x",
554 i
,j
,bcs
->hw
.tiger
.r_fcs
, bcs
->cs
->hw
.njet
.irqstat0
);
555 if (bcs
->hw
.tiger
.r_fcs
== PPP_GOODFCS
) {
556 got_frame(bcs
, (bitcnt
>>3)-3);
558 if (bcs
->cs
->debug
) {
559 debugl1(bcs
->cs
, "tiger FCS error");
560 printframe(bcs
->cs
, bcs
->hw
.tiger
.rcvbuf
,
561 (bitcnt
>>3)-1, "rec");
562 bcs
->hw
.tiger
.r_err
++;
564 #ifdef ERROR_STATISTIC
568 state
=HDLC_FLAG_FOUND
;
571 } else if (r_one
==5) {
582 if ((state
== HDLC_FRAME_FOUND
) &&
584 if ((bitcnt
>>3)>=HSCX_BUFMAX
) {
585 debugl1(bcs
->cs
, "tiger: frame too big");
587 state
=HDLC_FLAG_SEARCH
;
588 bcs
->hw
.tiger
.r_err
++;
589 #ifdef ERROR_STATISTIC
593 bcs
->hw
.tiger
.rcvbuf
[(bitcnt
>>3)-1] = r_val
;
594 bcs
->hw
.tiger
.r_fcs
=
595 PPP_FCS (bcs
->hw
.tiger
.r_fcs
, r_val
);
601 bcs
->hw
.tiger
.r_tot
++;
603 bcs
->hw
.tiger
.r_state
= state
;
604 bcs
->hw
.tiger
.r_one
= r_one
;
605 bcs
->hw
.tiger
.r_val
= r_val
;
606 bcs
->hw
.tiger
.r_bitcnt
= bitcnt
;
609 void read_tiger(struct IsdnCardState
*cs
) {
611 int cnt
= NETJET_DMA_RXSIZE
/2;
613 if ((cs
->hw
.njet
.irqstat0
& cs
->hw
.njet
.last_is0
) & NETJET_IRQM0_READ
) {
614 debugl1(cs
,"tiger warn read double dma %x/%x",
615 cs
->hw
.njet
.irqstat0
, cs
->hw
.njet
.last_is0
);
616 #ifdef ERROR_STATISTIC
618 cs
->bcs
[0].err_rdo
++;
620 cs
->bcs
[1].err_rdo
++;
624 cs
->hw
.njet
.last_is0
&= ~NETJET_IRQM0_READ
;
625 cs
->hw
.njet
.last_is0
|= (cs
->hw
.njet
.irqstat0
& NETJET_IRQM0_READ
);
627 if (cs
->hw
.njet
.irqstat0
& NETJET_IRQM0_READ_1
)
628 p
= cs
->bcs
[0].hw
.tiger
.rec
+ NETJET_DMA_RXSIZE
- 1;
630 p
= cs
->bcs
[0].hw
.tiger
.rec
+ cnt
- 1;
631 if ((cs
->bcs
[0].mode
== L1_MODE_HDLC
) || (cs
->bcs
[0].mode
== L1_MODE_HDLC_56K
))
632 read_raw(cs
->bcs
, p
, cnt
);
634 if ((cs
->bcs
[1].mode
== L1_MODE_HDLC
) || (cs
->bcs
[1].mode
== L1_MODE_HDLC_56K
))
635 read_raw(cs
->bcs
+ 1, p
, cnt
);
636 cs
->hw
.njet
.irqstat0
&= ~NETJET_IRQM0_READ
;
639 static void write_raw(struct BCState
*bcs
, u_int
*buf
, int cnt
);
641 void netjet_fill_dma(struct BCState
*bcs
)
643 register u_int
*p
, *sp
;
648 if (bcs
->cs
->debug
& L1_DEB_HSCX
)
649 debugl1(bcs
->cs
,"tiger fill_dma1: c%d %4x", bcs
->channel
,
651 if (test_and_set_bit(BC_FLG_BUSY
, &bcs
->Flag
))
653 if (bcs
->mode
== L1_MODE_HDLC
) { // it's 64k
654 if (make_raw_data(bcs
))
658 if (make_raw_data_56k(bcs
))
661 if (bcs
->cs
->debug
& L1_DEB_HSCX
)
662 debugl1(bcs
->cs
,"tiger fill_dma2: c%d %4x", bcs
->channel
,
664 if (test_and_clear_bit(BC_FLG_NOFRAME
, &bcs
->Flag
)) {
665 write_raw(bcs
, bcs
->hw
.tiger
.sendp
, bcs
->hw
.tiger
.free
);
666 } else if (test_and_clear_bit(BC_FLG_HALF
, &bcs
->Flag
)) {
667 p
= bus_to_virt(inl(bcs
->cs
->hw
.njet
.base
+ NETJET_DMA_READ_ADR
));
668 sp
= bcs
->hw
.tiger
.sendp
;
669 if (p
== bcs
->hw
.tiger
.s_end
)
670 p
= bcs
->hw
.tiger
.send
-1;
671 if (sp
== bcs
->hw
.tiger
.s_end
)
672 sp
= bcs
->hw
.tiger
.send
-1;
675 write_raw(bcs
, bcs
->hw
.tiger
.sendp
, bcs
->hw
.tiger
.free
);
679 if (p
> bcs
->hw
.tiger
.s_end
)
680 p
= bcs
->hw
.tiger
.send
;
683 if (p
> bcs
->hw
.tiger
.s_end
)
684 p
= bcs
->hw
.tiger
.send
;
685 write_raw(bcs
, p
, bcs
->hw
.tiger
.free
- cnt
);
687 } else if (test_and_clear_bit(BC_FLG_EMPTY
, &bcs
->Flag
)) {
688 p
= bus_to_virt(inl(bcs
->cs
->hw
.njet
.base
+ NETJET_DMA_READ_ADR
));
689 cnt
= bcs
->hw
.tiger
.s_end
- p
;
691 p
= bcs
->hw
.tiger
.send
+ 1;
692 cnt
= NETJET_DMA_TXSIZE
/2 - 2;
696 if (cnt
<= (NETJET_DMA_TXSIZE
/2))
697 cnt
+= NETJET_DMA_TXSIZE
/2;
701 write_raw(bcs
, p
, cnt
);
703 if (bcs
->cs
->debug
& L1_DEB_HSCX
)
704 debugl1(bcs
->cs
,"tiger fill_dma3: c%d %4x", bcs
->channel
,
708 static void write_raw(struct BCState
*bcs
, u_int
*buf
, int cnt
) {
709 u_int mask
, val
, *p
=buf
;
714 if (test_bit(BC_FLG_BUSY
, &bcs
->Flag
)) {
715 if (bcs
->hw
.tiger
.sendcnt
> cnt
) {
717 bcs
->hw
.tiger
.sendcnt
-= cnt
;
719 s_cnt
= bcs
->hw
.tiger
.sendcnt
;
720 bcs
->hw
.tiger
.sendcnt
= 0;
726 for (i
=0; i
<s_cnt
; i
++) {
727 val
= bcs
->channel
? ((bcs
->hw
.tiger
.sp
[i
] <<8) & 0xff00) :
728 (bcs
->hw
.tiger
.sp
[i
]);
731 if (p
>bcs
->hw
.tiger
.s_end
)
732 p
= bcs
->hw
.tiger
.send
;
734 bcs
->hw
.tiger
.s_tot
+= s_cnt
;
735 if (bcs
->cs
->debug
& L1_DEB_HSCX
)
736 debugl1(bcs
->cs
,"tiger write_raw: c%d %x-%x %d/%d %d %x", bcs
->channel
,
737 (u_int
)buf
, (u_int
)p
, s_cnt
, cnt
,
738 bcs
->hw
.tiger
.sendcnt
, bcs
->cs
->hw
.njet
.irqstat0
);
739 if (bcs
->cs
->debug
& L1_DEB_HSCX_FIFO
)
740 printframe(bcs
->cs
, bcs
->hw
.tiger
.sp
, s_cnt
, "snd");
741 bcs
->hw
.tiger
.sp
+= s_cnt
;
742 bcs
->hw
.tiger
.sendp
= p
;
743 if (!bcs
->hw
.tiger
.sendcnt
) {
745 debugl1(bcs
->cs
,"tiger write_raw: NULL skb s_cnt %d", s_cnt
);
747 if (bcs
->st
->lli
.l1writewakeup
&&
748 (PACKET_NOACK
!= bcs
->tx_skb
->pkt_type
))
749 bcs
->st
->lli
.l1writewakeup(bcs
->st
, bcs
->tx_skb
->len
);
750 dev_kfree_skb_any(bcs
->tx_skb
);
753 test_and_clear_bit(BC_FLG_BUSY
, &bcs
->Flag
);
754 bcs
->hw
.tiger
.free
= cnt
- s_cnt
;
755 if (bcs
->hw
.tiger
.free
> (NETJET_DMA_TXSIZE
/2))
756 test_and_set_bit(BC_FLG_HALF
, &bcs
->Flag
);
758 test_and_clear_bit(BC_FLG_HALF
, &bcs
->Flag
);
759 test_and_set_bit(BC_FLG_NOFRAME
, &bcs
->Flag
);
761 if ((bcs
->tx_skb
= skb_dequeue(&bcs
->squeue
))) {
762 netjet_fill_dma(bcs
);
766 for (i
=s_cnt
; i
<cnt
;i
++) {
768 if (p
>bcs
->hw
.tiger
.s_end
)
769 p
= bcs
->hw
.tiger
.send
;
771 if (bcs
->cs
->debug
& L1_DEB_HSCX
)
772 debugl1(bcs
->cs
, "tiger write_raw: fill rest %d",
775 bcs
->event
|= 1 << B_XMTBUFREADY
;
776 queue_task(&bcs
->tqueue
, &tq_immediate
);
777 mark_bh(IMMEDIATE_BH
);
780 } else if (test_and_clear_bit(BC_FLG_NOFRAME
, &bcs
->Flag
)) {
781 test_and_set_bit(BC_FLG_HALF
, &bcs
->Flag
);
782 fill_mem(bcs
, buf
, cnt
, bcs
->channel
, 0xff);
783 bcs
->hw
.tiger
.free
+= cnt
;
784 if (bcs
->cs
->debug
& L1_DEB_HSCX
)
785 debugl1(bcs
->cs
,"tiger write_raw: fill half");
786 } else if (test_and_clear_bit(BC_FLG_HALF
, &bcs
->Flag
)) {
787 test_and_set_bit(BC_FLG_EMPTY
, &bcs
->Flag
);
788 fill_mem(bcs
, buf
, cnt
, bcs
->channel
, 0xff);
789 if (bcs
->cs
->debug
& L1_DEB_HSCX
)
790 debugl1(bcs
->cs
,"tiger write_raw: fill full");
794 void write_tiger(struct IsdnCardState
*cs
) {
795 u_int
*p
, cnt
= NETJET_DMA_TXSIZE
/2;
797 if ((cs
->hw
.njet
.irqstat0
& cs
->hw
.njet
.last_is0
) & NETJET_IRQM0_WRITE
) {
798 debugl1(cs
,"tiger warn write double dma %x/%x",
799 cs
->hw
.njet
.irqstat0
, cs
->hw
.njet
.last_is0
);
800 #ifdef ERROR_STATISTIC
808 cs
->hw
.njet
.last_is0
&= ~NETJET_IRQM0_WRITE
;
809 cs
->hw
.njet
.last_is0
|= (cs
->hw
.njet
.irqstat0
& NETJET_IRQM0_WRITE
);
811 if (cs
->hw
.njet
.irqstat0
& NETJET_IRQM0_WRITE_1
)
812 p
= cs
->bcs
[0].hw
.tiger
.send
+ NETJET_DMA_TXSIZE
- 1;
814 p
= cs
->bcs
[0].hw
.tiger
.send
+ cnt
- 1;
815 if ((cs
->bcs
[0].mode
== L1_MODE_HDLC
) || (cs
->bcs
[0].mode
== L1_MODE_HDLC_56K
))
816 write_raw(cs
->bcs
, p
, cnt
);
817 if ((cs
->bcs
[1].mode
== L1_MODE_HDLC
) || (cs
->bcs
[1].mode
== L1_MODE_HDLC_56K
))
818 write_raw(cs
->bcs
+ 1, p
, cnt
);
819 cs
->hw
.njet
.irqstat0
&= ~NETJET_IRQM0_WRITE
;
823 tiger_l2l1(struct PStack
*st
, int pr
, void *arg
)
825 struct sk_buff
*skb
= arg
;
829 case (PH_DATA
| REQUEST
):
832 if (st
->l1
.bcs
->tx_skb
) {
833 skb_queue_tail(&st
->l1
.bcs
->squeue
, skb
);
834 restore_flags(flags
);
836 st
->l1
.bcs
->tx_skb
= skb
;
837 st
->l1
.bcs
->cs
->BC_Send_Data(st
->l1
.bcs
);
838 restore_flags(flags
);
841 case (PH_PULL
| INDICATION
):
842 if (st
->l1
.bcs
->tx_skb
) {
843 printk(KERN_WARNING
"tiger_l2l1: this shouldn't happen\n");
848 st
->l1
.bcs
->tx_skb
= skb
;
849 st
->l1
.bcs
->cs
->BC_Send_Data(st
->l1
.bcs
);
850 restore_flags(flags
);
852 case (PH_PULL
| REQUEST
):
853 if (!st
->l1
.bcs
->tx_skb
) {
854 test_and_clear_bit(FLG_L1_PULL_REQ
, &st
->l1
.Flags
);
855 st
->l1
.l1l2(st
, PH_PULL
| CONFIRM
, NULL
);
857 test_and_set_bit(FLG_L1_PULL_REQ
, &st
->l1
.Flags
);
859 case (PH_ACTIVATE
| REQUEST
):
860 test_and_set_bit(BC_FLG_ACTIV
, &st
->l1
.bcs
->Flag
);
861 mode_tiger(st
->l1
.bcs
, st
->l1
.mode
, st
->l1
.bc
);
862 l1_msg_b(st
, pr
, arg
);
864 case (PH_DEACTIVATE
| REQUEST
):
865 l1_msg_b(st
, pr
, arg
);
867 case (PH_DEACTIVATE
| CONFIRM
):
868 test_and_clear_bit(BC_FLG_ACTIV
, &st
->l1
.bcs
->Flag
);
869 test_and_clear_bit(BC_FLG_BUSY
, &st
->l1
.bcs
->Flag
);
870 mode_tiger(st
->l1
.bcs
, 0, st
->l1
.bc
);
871 st
->l1
.l1l2(st
, PH_DEACTIVATE
| CONFIRM
, NULL
);
878 close_tigerstate(struct BCState
*bcs
)
880 mode_tiger(bcs
, 0, bcs
->channel
);
881 if (test_and_clear_bit(BC_FLG_INIT
, &bcs
->Flag
)) {
882 if (bcs
->hw
.tiger
.rcvbuf
) {
883 kfree(bcs
->hw
.tiger
.rcvbuf
);
884 bcs
->hw
.tiger
.rcvbuf
= NULL
;
886 if (bcs
->hw
.tiger
.sendbuf
) {
887 kfree(bcs
->hw
.tiger
.sendbuf
);
888 bcs
->hw
.tiger
.sendbuf
= NULL
;
890 discard_queue(&bcs
->rqueue
);
891 discard_queue(&bcs
->squeue
);
893 dev_kfree_skb_any(bcs
->tx_skb
);
895 test_and_clear_bit(BC_FLG_BUSY
, &bcs
->Flag
);
901 open_tigerstate(struct IsdnCardState
*cs
, struct BCState
*bcs
)
903 if (!test_and_set_bit(BC_FLG_INIT
, &bcs
->Flag
)) {
904 if (!(bcs
->hw
.tiger
.rcvbuf
= kmalloc(HSCX_BUFMAX
, GFP_ATOMIC
))) {
906 "HiSax: No memory for tiger.rcvbuf\n");
909 if (!(bcs
->hw
.tiger
.sendbuf
= kmalloc(RAW_BUFMAX
, GFP_ATOMIC
))) {
911 "HiSax: No memory for tiger.sendbuf\n");
914 skb_queue_head_init(&bcs
->rqueue
);
915 skb_queue_head_init(&bcs
->squeue
);
918 bcs
->hw
.tiger
.sendcnt
= 0;
919 test_and_clear_bit(BC_FLG_BUSY
, &bcs
->Flag
);
926 setstack_tiger(struct PStack
*st
, struct BCState
*bcs
)
928 bcs
->channel
= st
->l1
.bc
;
929 if (open_tigerstate(st
->l1
.hardware
, bcs
))
932 st
->l2
.l2l1
= tiger_l2l1
;
933 setstack_manager(st
);
941 inittiger(struct IsdnCardState
*cs
)
943 if (!(cs
->bcs
[0].hw
.tiger
.send
= kmalloc(NETJET_DMA_TXSIZE
* sizeof(unsigned int),
944 GFP_KERNEL
| GFP_DMA
))) {
946 "HiSax: No memory for tiger.send\n");
949 cs
->bcs
[0].hw
.tiger
.s_irq
= cs
->bcs
[0].hw
.tiger
.send
+ NETJET_DMA_TXSIZE
/2 - 1;
950 cs
->bcs
[0].hw
.tiger
.s_end
= cs
->bcs
[0].hw
.tiger
.send
+ NETJET_DMA_TXSIZE
- 1;
951 cs
->bcs
[1].hw
.tiger
.send
= cs
->bcs
[0].hw
.tiger
.send
;
952 cs
->bcs
[1].hw
.tiger
.s_irq
= cs
->bcs
[0].hw
.tiger
.s_irq
;
953 cs
->bcs
[1].hw
.tiger
.s_end
= cs
->bcs
[0].hw
.tiger
.s_end
;
955 memset(cs
->bcs
[0].hw
.tiger
.send
, 0xff, NETJET_DMA_TXSIZE
* sizeof(unsigned int));
956 debugl1(cs
, "tiger: send buf %x - %x", (u_int
)cs
->bcs
[0].hw
.tiger
.send
,
957 (u_int
)(cs
->bcs
[0].hw
.tiger
.send
+ NETJET_DMA_TXSIZE
- 1));
958 outl(virt_to_bus(cs
->bcs
[0].hw
.tiger
.send
),
959 cs
->hw
.njet
.base
+ NETJET_DMA_READ_START
);
960 outl(virt_to_bus(cs
->bcs
[0].hw
.tiger
.s_irq
),
961 cs
->hw
.njet
.base
+ NETJET_DMA_READ_IRQ
);
962 outl(virt_to_bus(cs
->bcs
[0].hw
.tiger
.s_end
),
963 cs
->hw
.njet
.base
+ NETJET_DMA_READ_END
);
964 if (!(cs
->bcs
[0].hw
.tiger
.rec
= kmalloc(NETJET_DMA_RXSIZE
* sizeof(unsigned int),
965 GFP_KERNEL
| GFP_DMA
))) {
967 "HiSax: No memory for tiger.rec\n");
970 debugl1(cs
, "tiger: rec buf %x - %x", (u_int
)cs
->bcs
[0].hw
.tiger
.rec
,
971 (u_int
)(cs
->bcs
[0].hw
.tiger
.rec
+ NETJET_DMA_RXSIZE
- 1));
972 cs
->bcs
[1].hw
.tiger
.rec
= cs
->bcs
[0].hw
.tiger
.rec
;
973 memset(cs
->bcs
[0].hw
.tiger
.rec
, 0xff, NETJET_DMA_RXSIZE
* sizeof(unsigned int));
974 outl(virt_to_bus(cs
->bcs
[0].hw
.tiger
.rec
),
975 cs
->hw
.njet
.base
+ NETJET_DMA_WRITE_START
);
976 outl(virt_to_bus(cs
->bcs
[0].hw
.tiger
.rec
+ NETJET_DMA_RXSIZE
/2 - 1),
977 cs
->hw
.njet
.base
+ NETJET_DMA_WRITE_IRQ
);
978 outl(virt_to_bus(cs
->bcs
[0].hw
.tiger
.rec
+ NETJET_DMA_RXSIZE
- 1),
979 cs
->hw
.njet
.base
+ NETJET_DMA_WRITE_END
);
980 debugl1(cs
, "tiger: dmacfg %x/%x pulse=%d",
981 inl(cs
->hw
.njet
.base
+ NETJET_DMA_WRITE_ADR
),
982 inl(cs
->hw
.njet
.base
+ NETJET_DMA_READ_ADR
),
983 bytein(cs
->hw
.njet
.base
+ NETJET_PULSE_CNT
));
984 cs
->hw
.njet
.last_is0
= 0;
985 cs
->bcs
[0].BC_SetStack
= setstack_tiger
;
986 cs
->bcs
[1].BC_SetStack
= setstack_tiger
;
987 cs
->bcs
[0].BC_Close
= close_tigerstate
;
988 cs
->bcs
[1].BC_Close
= close_tigerstate
;
992 releasetiger(struct IsdnCardState
*cs
)
994 if (cs
->bcs
[0].hw
.tiger
.send
) {
995 kfree(cs
->bcs
[0].hw
.tiger
.send
);
996 cs
->bcs
[0].hw
.tiger
.send
= NULL
;
998 if (cs
->bcs
[1].hw
.tiger
.send
) {
999 cs
->bcs
[1].hw
.tiger
.send
= NULL
;
1001 if (cs
->bcs
[0].hw
.tiger
.rec
) {
1002 kfree(cs
->bcs
[0].hw
.tiger
.rec
);
1003 cs
->bcs
[0].hw
.tiger
.rec
= NULL
;
1005 if (cs
->bcs
[1].hw
.tiger
.rec
) {
1006 cs
->bcs
[1].hw
.tiger
.rec
= NULL
;
1011 release_io_netjet(struct IsdnCardState
*cs
)
1013 byteout(cs
->hw
.njet
.base
+ NETJET_IRQMASK0
, 0);
1014 byteout(cs
->hw
.njet
.base
+ NETJET_IRQMASK1
, 0);
1016 release_region(cs
->hw
.njet
.base
, 256);